From 15b9077c2d41f59c716582d8f7ae6e334630c0ac Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 13 May 2014 20:43:31 +0000 Subject: [PATCH 001/354] Branching from trunk for HADOOP-10150 and HDFS-6134 at 1594374 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1594376 13f79535-47bb-0310-9956-ffa450edef68 From 8384d57290bd94981e9c0dea546c2d307f0ad9c8 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 16 May 2014 18:53:08 +0000 Subject: [PATCH 002/354] Add CHANGES-fs-encryption.txt to Common and HDFS git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1595304 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 13 +++++++++++++ .../hadoop-hdfs/CHANGES-fs-encryption.txt | 13 +++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt create mode 100644 hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt new file mode 100644 index 00000000000..fb293606add --- /dev/null +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -0,0 +1,13 @@ +Hadoop Common Change Log for HDFS-6134 and HADOOP-10150 + +fs-encryption (Unreleased) + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt new file mode 100644 index 00000000000..4d02b398a73 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -0,0 +1,13 @@ +Hadoop HDFS Change Log for HDFS-6134 and HADOOP-10150 + +fs-encryption (Unreleased) + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES From b20180ffa6c89396d9fcfec8b029b9c600503c3d Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Sat, 24 May 2014 01:19:06 +0000 Subject: [PATCH 003/354] HADOOP-10603. Crypto input and output streams implementing Hadoop stream interfaces. Contributed by Yi Liu and Charles Lamb. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1597230 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 + .../hadoop/crypto/AESCTRCryptoCodec.java | 57 ++ .../org/apache/hadoop/crypto/CryptoCodec.java | 82 ++ .../hadoop/crypto/CryptoInputStream.java | 613 +++++++++++++++ .../hadoop/crypto/CryptoOutputStream.java | 291 +++++++ .../org/apache/hadoop/crypto/Decryptor.java | 75 ++ .../org/apache/hadoop/crypto/Encryptor.java | 75 ++ .../hadoop/crypto/JCEAESCTRCryptoCodec.java | 55 ++ .../hadoop/crypto/JCEAESCTRDecryptor.java | 84 +++ .../hadoop/crypto/JCEAESCTREncryptor.java | 84 +++ .../fs/CommonConfigurationKeysPublic.java | 11 + .../fs/crypto/CryptoFSDataInputStream.java | 37 + .../fs/crypto/CryptoFSDataOutputStream.java | 47 ++ .../src/main/resources/core-default.xml | 26 + .../hadoop/crypto/CryptoStreamsTestBase.java | 712 ++++++++++++++++++ .../hadoop/crypto/TestCryptoStreams.java | 376 +++++++++ .../crypto/TestCryptoStreamsForLocalFS.java | 114 +++ 17 files changed, 2742 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index fb293606add..e7bc580ff32 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -8,6 +8,9 @@ fs-encryption (Unreleased) IMPROVEMENTS + HADOOP-10603. Crypto input and output streams implementing Hadoop stream + interfaces. (Yi Liu and Charles Lamb) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java new file mode 100644 index 00000000000..b76f1bf2f6a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import com.google.common.base.Preconditions; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class AESCTRCryptoCodec extends CryptoCodec { + /** + * For AES, the algorithm block is fixed size of 128 bits. + * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard + */ + private static final int AES_BLOCK_SIZE = 16; + + @Override + public int getAlgorithmBlockSize() { + return AES_BLOCK_SIZE; + } + + /** + * IV is produced by combining initial IV and the counter using addition. + * IV length should be the same as {@link #AES_BLOCK_SIZE} + */ + @Override + public void calculateIV(byte[] initIV, long counter, byte[] IV) { + Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE); + Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE); + + ByteBuffer buf = ByteBuffer.wrap(IV); + buf.put(initIV); + buf.order(ByteOrder.BIG_ENDIAN); + counter += buf.getLong(AES_BLOCK_SIZE - 8); + buf.putLong(AES_BLOCK_SIZE - 8, counter); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java new file mode 100644 index 00000000000..80d824d0e19 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.security.GeneralSecurityException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ReflectionUtils; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY; + +/** + * Crypto codec class, encapsulates encryptor/decryptor pair. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class CryptoCodec implements Configurable { + + public static CryptoCodec getInstance(Configuration conf) { + final Class klass = conf.getClass( + HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, JCEAESCTRCryptoCodec.class, + CryptoCodec.class); + return ReflectionUtils.newInstance(klass, conf); + } + + /** + * Get block size of a block cipher. + * For different algorithms, the block size may be different. + * @return int block size + */ + public abstract int getAlgorithmBlockSize(); + + /** + * Get a {@link #org.apache.hadoop.crypto.Encryptor}. + * @return Encryptor + */ + public abstract Encryptor getEncryptor() throws GeneralSecurityException; + + /** + * Get a {@link #org.apache.hadoop.crypto.Decryptor}. + * @return Decryptor + */ + public abstract Decryptor getDecryptor() throws GeneralSecurityException; + + /** + * This interface is only for Counter (CTR) mode. Typically calculating + * IV(Initialization Vector) is up to Encryptor or Decryptor, for + * example {@link #javax.crypto.Cipher} will maintain encryption context + * internally when do encryption/decryption continuously using its + * Cipher#update interface. + *

+ * In Hadoop, multiple nodes may read splits of a file, so decrypting of + * file is not continuous, even for encrypting may be not continuous. For + * each part, we need to calculate the counter through file position. + *

+ * Typically IV for a file position is produced by combining initial IV and + * the counter using any lossless operation (concatenation, addition, or XOR). + * @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29 + * + * @param initIV initial IV + * @param counter counter for input stream position + * @param IV the IV for input stream position + */ + public abstract void calculateIV(byte[] initIV, long counter, byte[] IV); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java new file mode 100644 index 00000000000..ffcf1846cf2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -0,0 +1,613 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.FileDescriptor; +import java.io.FileInputStream; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.security.GeneralSecurityException; +import java.util.EnumSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.CanSetDropBehind; +import org.apache.hadoop.fs.CanSetReadahead; +import org.apache.hadoop.fs.HasEnhancedByteBufferAccess; +import org.apache.hadoop.fs.HasFileDescriptor; +import org.apache.hadoop.fs.PositionedReadable; +import org.apache.hadoop.fs.ReadOption; +import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.io.ByteBufferPool; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT; + +import com.google.common.base.Preconditions; + +/** + * CryptoInputStream decrypts data. It is not thread-safe. AES CTR mode is + * required in order to ensure that the plain text and cipher text have a 1:1 + * mapping. The decryption is buffer based. The key points of the decryption + * are (1) calculating the counter and (2) padding through stream position: + *

+ * counter = base + pos/(algorithm blocksize); + * padding = pos%(algorithm blocksize); + *

+ * The underlying stream offset is maintained as state. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class CryptoInputStream extends FilterInputStream implements + Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, + CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess { + private static final int MIN_BUFFER_SIZE = 512; + private static final byte[] oneByteBuf = new byte[1]; + private final CryptoCodec codec; + private final Decryptor decryptor; + /** + * Input data buffer. The data starts at inBuffer.position() and ends at + * to inBuffer.limit(). + */ + private ByteBuffer inBuffer; + /** + * The decrypted data buffer. The data starts at outBuffer.position() and + * ends at outBuffer.limit(); + */ + private ByteBuffer outBuffer; + private long streamOffset = 0; // Underlying stream offset. + /** + * Whether underlying stream supports + * {@link #org.apache.hadoop.fs.ByteBufferReadable} + */ + private Boolean usingByteBufferRead = null; + /** + * Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer} + * before any other data goes in. The purpose of padding is to put input data + * at proper position. + */ + private byte padding; + private boolean closed; + private final byte[] key; + private final byte[] initIV; + private byte[] iv; + + public CryptoInputStream(InputStream in, CryptoCodec codec, + int bufferSize, byte[] key, byte[] iv) throws IOException { + super(in); + Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, + "Minimum value of buffer size is 512."); + this.key = key; + this.initIV = iv; + this.iv = iv.clone(); + inBuffer = ByteBuffer.allocateDirect(bufferSize); + outBuffer = ByteBuffer.allocateDirect(bufferSize); + outBuffer.limit(0); + this.codec = codec; + try { + decryptor = codec.getDecryptor(); + } catch (GeneralSecurityException e) { + throw new IOException(e); + } + if (in instanceof Seekable) { + streamOffset = ((Seekable) in).getPos(); + } + updateDecryptor(); + } + + public CryptoInputStream(InputStream in, CryptoCodec codec, + byte[] key, byte[] iv) throws IOException { + this(in, codec, getBufferSize(codec.getConf()), key, iv); + } + + public InputStream getWrappedStream() { + return in; + } + + /** + * Decryption is buffer based. + * If there is data in {@link #outBuffer}, then read it out of this buffer. + * If there is no data in {@link #outBuffer}, then read more from the + * underlying stream and do the decryption. + * @param b the buffer into which the decrypted data is read. + * @param off the buffer offset. + * @param len the maximum number of decrypted data bytes to read. + * @return int the total number of decrypted data bytes read into the buffer. + * @throws IOException + */ + @Override + public int read(byte[] b, int off, int len) throws IOException { + checkStream(); + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + int remaining = outBuffer.remaining(); + if (remaining > 0) { + int n = Math.min(len, remaining); + outBuffer.get(b, off, n); + return n; + } else { + int n = 0; + /** + * Check whether the underlying stream is {@link ByteBufferReadable}, + * it can avoid bytes copy. + */ + if (usingByteBufferRead == null) { + if (in instanceof ByteBufferReadable) { + try { + n = ((ByteBufferReadable) in).read(inBuffer); + usingByteBufferRead = Boolean.TRUE; + } catch (UnsupportedOperationException e) { + usingByteBufferRead = Boolean.FALSE; + } + } + if (!usingByteBufferRead.booleanValue()) { + n = readFromUnderlyingStream(); + } + } else { + if (usingByteBufferRead.booleanValue()) { + n = ((ByteBufferReadable) in).read(inBuffer); + } else { + n = readFromUnderlyingStream(); + } + } + if (n <= 0) { + return n; + } + + streamOffset += n; // Read n bytes + decrypt(); + n = Math.min(len, outBuffer.remaining()); + outBuffer.get(b, off, n); + return n; + } + } + + // Read data from underlying stream. + private int readFromUnderlyingStream() throws IOException { + int toRead = inBuffer.remaining(); + byte[] tmp = getTmpBuf(); + int n = in.read(tmp, 0, toRead); + if (n > 0) { + inBuffer.put(tmp, 0, n); + } + return n; + } + + private byte[] tmpBuf; + private byte[] getTmpBuf() { + if (tmpBuf == null) { + tmpBuf = new byte[inBuffer.capacity()]; + } + return tmpBuf; + } + + /** + * Do the decryption using {@link #inBuffer} as input and {@link #outBuffer} + * as output. + */ + private void decrypt() throws IOException { + Preconditions.checkState(inBuffer.position() >= padding); + if(inBuffer.position() == padding) { + // There is no real data in inBuffer. + return; + } + inBuffer.flip(); + outBuffer.clear(); + decryptor.decrypt(inBuffer, outBuffer); + inBuffer.clear(); + outBuffer.flip(); + if (padding > 0) { + /** + * The plain text and cipher text have 1:1 mapping, they start at same + * position. + */ + outBuffer.position(padding); + padding = 0; + } + if (decryptor.isContextReset()) { + /** + * Typically we will not get here. To improve performance in CTR mode, + * we rely on the decryptor maintaining context, for example calculating + * the counter. Unfortunately, some bad implementations can't maintain + * context so we need to re-init after doing decryption. + */ + updateDecryptor(); + } + } + + /** + * Update the {@link #decryptor}. Calculate the counter and {@link #padding}. + */ + private void updateDecryptor() throws IOException { + long counter = streamOffset / codec.getAlgorithmBlockSize(); + padding = (byte)(streamOffset % codec.getAlgorithmBlockSize()); + inBuffer.position(padding); // Set proper position for input data. + codec.calculateIV(initIV, counter, iv); + decryptor.init(key, iv); + } + + /** + * Reset the underlying stream offset; and clear {@link #inBuffer} and + * {@link #outBuffer}. Typically this happens when doing {@link #seek(long)} + * or {@link #skip(long)}. + */ + private void resetStreamOffset(long offset) throws IOException { + streamOffset = offset; + inBuffer.clear(); + outBuffer.clear(); + outBuffer.limit(0); + updateDecryptor(); + } + + @Override + public void close() throws IOException { + if (closed) { + return; + } + + super.close(); + freeBuffers(); + closed = true; + } + + /** + * Free the direct buffer manually. + */ + private void freeBuffers() { + sun.misc.Cleaner inBufferCleaner = + ((sun.nio.ch.DirectBuffer) inBuffer).cleaner(); + inBufferCleaner.clean(); + sun.misc.Cleaner outBufferCleaner = + ((sun.nio.ch.DirectBuffer) outBuffer).cleaner(); + outBufferCleaner.clean(); + } + + // Positioned read. + @Override + public int read(long position, byte[] buffer, int offset, int length) + throws IOException { + checkStream(); + try { + int n = ((PositionedReadable) in).read(position, buffer, offset, length); + if (n > 0) { + /** + * Since this operation does not change the current offset of a file, + * streamOffset should be not changed and we need to restore the + * decryptor and outBuffer after decryption. + */ + decrypt(position, buffer, offset, length); + } + + return n; + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not support " + + "positioned read."); + } + } + + /** + * Decrypt given length of data in buffer: start from offset. + * Output is also buffer and start from same offset. Restore the + * {@link #decryptor} and {@link #outBuffer} after decryption. + */ + private void decrypt(long position, byte[] buffer, int offset, int length) + throws IOException { + + byte[] tmp = getTmpBuf(); + int unread = outBuffer.remaining(); + if (unread > 0) { // Cache outBuffer + outBuffer.get(tmp, 0, unread); + } + long curOffset = streamOffset; + resetStreamOffset(position); + + int n = 0; + while (n < length) { + int toDecrypt = Math.min(length - n, inBuffer.remaining()); + inBuffer.put(buffer, offset + n, toDecrypt); + // Do decryption + decrypt(); + outBuffer.get(buffer, offset + n, toDecrypt); + n += toDecrypt; + } + + // After decryption + resetStreamOffset(curOffset); + if (unread > 0) { // Restore outBuffer + outBuffer.clear(); + outBuffer.put(tmp, 0, unread); + outBuffer.flip(); + } + } + + // Positioned read fully. + @Override + public void readFully(long position, byte[] buffer, int offset, int length) + throws IOException { + checkStream(); + try { + ((PositionedReadable) in).readFully(position, buffer, offset, length); + if (length > 0) { + /** + * Since this operation does not change the current offset of a file, + * streamOffset should be not changed and we need to restore the decryptor + * and outBuffer after decryption. + */ + decrypt(position, buffer, offset, length); + } + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not support " + + "positioned readFully."); + } + } + + @Override + public void readFully(long position, byte[] buffer) throws IOException { + readFully(position, buffer, 0, buffer.length); + } + + // Seek to a position. + @Override + public void seek(long pos) throws IOException { + Preconditions.checkArgument(pos >= 0, "Cannot seek to negative offset."); + checkStream(); + try { + // If target pos we have already read and decrypt. + if (pos <= streamOffset && pos >= (streamOffset - outBuffer.remaining())) { + int forward = (int) (pos - (streamOffset - outBuffer.remaining())); + if (forward > 0) { + outBuffer.position(outBuffer.position() + forward); + } + } else { + ((Seekable) in).seek(pos); + resetStreamOffset(pos); + } + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not support " + + "seek."); + } + } + + // Skip n bytes + @Override + public long skip(long n) throws IOException { + Preconditions.checkArgument(n >= 0, "Negative skip length."); + checkStream(); + + if (n == 0) { + return 0; + } else if (n <= outBuffer.remaining()) { + int pos = outBuffer.position() + (int) n; + outBuffer.position(pos); + return n; + } else { + /** + * Subtract outBuffer.remaining() to see how many bytes we need to + * skip in underlying stream. We get real skipped bytes number of + * underlying stream then add outBuffer.remaining() to get skipped + * bytes number from user's view. + */ + n -= outBuffer.remaining(); + long skipped = in.skip(n); + if (skipped < 0) { + skipped = 0; + } + long pos = streamOffset + skipped; + skipped += outBuffer.remaining(); + resetStreamOffset(pos); + return skipped; + } + } + + // Get underlying stream position. + @Override + public long getPos() throws IOException { + checkStream(); + // Equals: ((Seekable) in).getPos() - outBuffer.remaining() + return streamOffset - outBuffer.remaining(); + } + + // ByteBuffer read. + @Override + public int read(ByteBuffer buf) throws IOException { + checkStream(); + if (in instanceof ByteBufferReadable) { + int unread = outBuffer.remaining(); + if (unread > 0) { // Have unread decrypted data in buffer. + int toRead = buf.remaining(); + if (toRead <= unread) { + int limit = outBuffer.limit(); + outBuffer.limit(outBuffer.position() + toRead); + buf.put(outBuffer); + outBuffer.limit(limit); + return toRead; + } else { + buf.put(outBuffer); + } + } + + int pos = buf.position(); + int n = ((ByteBufferReadable) in).read(buf); + if (n > 0) { + streamOffset += n; // Read n bytes + decrypt(buf, n, pos); + } + return n; + } + + throw new UnsupportedOperationException("ByteBuffer read unsupported " + + "by input stream."); + } + + /** + * Decrypt all data in buf: total n bytes from given start position. + * Output is also buf and same start position. + * buf.position() and buf.limit() should be unchanged after decryption. + */ + private void decrypt(ByteBuffer buf, int n, int start) + throws IOException { + int pos = buf.position(); + int limit = buf.limit(); + int len = 0; + while (len < n) { + buf.position(start + len); + buf.limit(start + len + Math.min(n - len, inBuffer.remaining())); + inBuffer.put(buf); + // Do decryption + decrypt(); + + buf.position(start + len); + buf.limit(limit); + len += outBuffer.remaining(); + buf.put(outBuffer); + } + buf.position(pos); + } + + @Override + public int available() throws IOException { + checkStream(); + + return in.available() + outBuffer.remaining(); + } + + @Override + public boolean markSupported() { + return false; + } + + @Override + public void mark(int readLimit) { + } + + @Override + public void reset() throws IOException { + throw new IOException("Mark/reset not supported"); + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + Preconditions.checkArgument(targetPos >= 0, + "Cannot seek to negative offset."); + checkStream(); + try { + boolean result = ((Seekable) in).seekToNewSource(targetPos); + resetStreamOffset(targetPos); + return result; + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not support " + + "seekToNewSource."); + } + } + + @Override + public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, + EnumSet opts) throws IOException, + UnsupportedOperationException { + checkStream(); + try { + if (outBuffer.remaining() > 0) { + // Have some decrypted data unread, need to reset. + ((Seekable) in).seek(getPos()); + resetStreamOffset(getPos()); + } + ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in). + read(bufferPool, maxLength, opts); + if (buffer != null) { + int n = buffer.remaining(); + if (n > 0) { + streamOffset += buffer.remaining(); // Read n bytes + int pos = buffer.position(); + decrypt(buffer, n, pos); + } + } + return buffer; + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not support " + + "enhanced byte buffer access."); + } + } + + @Override + public void releaseBuffer(ByteBuffer buffer) { + try { + ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer); + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not support " + + "release buffer."); + } + } + + @Override + public void setReadahead(Long readahead) throws IOException, + UnsupportedOperationException { + try { + ((CanSetReadahead) in).setReadahead(readahead); + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not support " + + "setting the readahead caching strategy."); + } + } + + @Override + public void setDropBehind(Boolean dropCache) throws IOException, + UnsupportedOperationException { + try { + ((CanSetDropBehind) in).setDropBehind(dropCache); + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not " + + "support setting the drop-behind caching setting."); + } + } + + @Override + public FileDescriptor getFileDescriptor() throws IOException { + if (in instanceof HasFileDescriptor) { + return ((HasFileDescriptor) in).getFileDescriptor(); + } else if (in instanceof FileInputStream) { + return ((FileInputStream) in).getFD(); + } else { + return null; + } + } + + @Override + public int read() throws IOException { + return (read(oneByteBuf, 0, 1) == -1) ? -1 : (oneByteBuf[0] & 0xff); + } + + private void checkStream() throws IOException { + if (closed) { + throw new IOException("Stream closed"); + } + } + + private static int getBufferSize(Configuration conf) { + return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY, + HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java new file mode 100644 index 00000000000..934c4479352 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.FilterOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.security.GeneralSecurityException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CanSetDropBehind; +import org.apache.hadoop.fs.Syncable; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT; + +import com.google.common.base.Preconditions; + +/** + * CryptoOutputStream encrypts data. It is not thread-safe. AES CTR mode is + * required in order to ensure that the plain text and cipher text have a 1:1 + * mapping. The encryption is buffer based. The key points of the encryption are + * (1) calculating counter and (2) padding through stream position. + *

+ * counter = base + pos/(algorithm blocksize); + * padding = pos%(algorithm blocksize); + *

+ * The underlying stream offset is maintained as state. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class CryptoOutputStream extends FilterOutputStream implements + Syncable, CanSetDropBehind { + private static final int MIN_BUFFER_SIZE = 512; + private static final byte[] oneByteBuf = new byte[1]; + private final CryptoCodec codec; + private final Encryptor encryptor; + /** + * Input data buffer. The data starts at inBuffer.position() and ends at + * inBuffer.limit(). + */ + private ByteBuffer inBuffer; + /** + * Encrypted data buffer. The data starts at outBuffer.position() and ends at + * outBuffer.limit(); + */ + private ByteBuffer outBuffer; + private long streamOffset = 0; // Underlying stream offset. + /** + * Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer} + * before any other data goes in. The purpose of padding is to put input data + * at proper position. + */ + private byte padding; + private boolean closed; + private final byte[] key; + private final byte[] initIV; + private byte[] iv; + + public CryptoOutputStream(OutputStream out, CryptoCodec codec, + int bufferSize, byte[] key, byte[] iv) throws IOException { + this(out, codec, bufferSize, key, iv, 0); + } + + public CryptoOutputStream(OutputStream out, CryptoCodec codec, + int bufferSize, byte[] key, byte[] iv, long streamOffset) + throws IOException { + super(out); + Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, + "Minimum value of buffer size is 512."); + this.key = key; + this.initIV = iv; + this.iv = iv.clone(); + inBuffer = ByteBuffer.allocateDirect(bufferSize); + outBuffer = ByteBuffer.allocateDirect(bufferSize); + this.streamOffset = streamOffset; + this.codec = codec; + try { + encryptor = codec.getEncryptor(); + } catch (GeneralSecurityException e) { + throw new IOException(e); + } + updateEncryptor(); + } + + public CryptoOutputStream(OutputStream out, CryptoCodec codec, + byte[] key, byte[] iv) throws IOException { + this(out, codec, key, iv, 0); + } + + public CryptoOutputStream(OutputStream out, CryptoCodec codec, + byte[] key, byte[] iv, long streamOffset) throws IOException { + this(out, codec, getBufferSize(codec.getConf()), key, iv, streamOffset); + } + + public OutputStream getWrappedStream() { + return out; + } + + /** + * Encryption is buffer based. + * If there is enough room in {@link #inBuffer}, then write to this buffer. + * If {@link #inBuffer} is full, then do encryption and write data to the + * underlying stream. + * @param b the data. + * @param off the start offset in the data. + * @param len the number of bytes to write. + * @throws IOException + */ + @Override + public void write(byte[] b, int off, int len) throws IOException { + checkStream(); + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || off > b.length || + len > b.length - off) { + throw new IndexOutOfBoundsException(); + } + while (len > 0) { + int remaining = inBuffer.remaining(); + if (len < remaining) { + inBuffer.put(b, off, len); + len = 0; + } else { + inBuffer.put(b, off, remaining); + off += remaining; + len -= remaining; + encrypt(); + } + } + } + + /** + * Do the encryption, input is {@link #inBuffer} and output is + * {@link #outBuffer}. + */ + private void encrypt() throws IOException { + Preconditions.checkState(inBuffer.position() >= padding); + if (inBuffer.position() == padding) { + // There is no real data in the inBuffer. + return; + } + inBuffer.flip(); + outBuffer.clear(); + encryptor.encrypt(inBuffer, outBuffer); + inBuffer.clear(); + outBuffer.flip(); + if (padding > 0) { + /** + * The plain text and cipher text have 1:1 mapping, they start at same + * position. + */ + outBuffer.position(padding); + padding = 0; + } + int len = outBuffer.remaining(); + /** + * If underlying stream supports {@link ByteBuffer} write in future, needs + * refine here. + */ + final byte[] tmp = getTmpBuf(); + outBuffer.get(tmp, 0, len); + out.write(tmp, 0, len); + + streamOffset += len; + if (encryptor.isContextReset()) { + /** + * We will generally not get here. For CTR mode, to improve + * performance, we rely on the encryptor maintaining context, for + * example to calculate the counter. But some bad implementations + * can't maintain context, and need us to re-init after doing + * encryption. + */ + updateEncryptor(); + } + } + + /** + * Update the {@link #encryptor}: calculate counter and {@link #padding}. + */ + private void updateEncryptor() throws IOException { + long counter = streamOffset / codec.getAlgorithmBlockSize(); + padding = (byte)(streamOffset % codec.getAlgorithmBlockSize()); + inBuffer.position(padding); // Set proper position for input data. + codec.calculateIV(initIV, counter, iv); + encryptor.init(key, iv); + } + + private byte[] tmpBuf; + private byte[] getTmpBuf() { + if (tmpBuf == null) { + tmpBuf = new byte[outBuffer.capacity()]; + } + return tmpBuf; + } + + @Override + public void close() throws IOException { + if (closed) { + return; + } + + super.close(); + freeBuffers(); + closed = true; + } + + /** + * Free the direct buffer manually. + */ + private void freeBuffers() { + sun.misc.Cleaner inBufferCleaner = + ((sun.nio.ch.DirectBuffer) inBuffer).cleaner(); + inBufferCleaner.clean(); + sun.misc.Cleaner outBufferCleaner = + ((sun.nio.ch.DirectBuffer) outBuffer).cleaner(); + outBufferCleaner.clean(); + } + + /** + * To flush, we need to encrypt the data in buffer and write to underlying + * stream, then do the flush. + */ + @Override + public void flush() throws IOException { + checkStream(); + encrypt(); + super.flush(); + } + + @Override + public void write(int b) throws IOException { + oneByteBuf[0] = (byte)(b & 0xff); + write(oneByteBuf, 0, oneByteBuf.length); + } + + private void checkStream() throws IOException { + if (closed) { + throw new IOException("Stream closed"); + } + } + + @Override + public void setDropBehind(Boolean dropCache) throws IOException, + UnsupportedOperationException { + try { + ((CanSetDropBehind) out).setDropBehind(dropCache); + } catch (ClassCastException e) { + throw new UnsupportedOperationException("This stream does not " + + "support setting the drop-behind caching."); + } + } + + @Override + public void hflush() throws IOException { + flush(); + if (out instanceof Syncable) { + ((Syncable)out).hflush(); + } + } + + @Override + public void hsync() throws IOException { + flush(); + if (out instanceof Syncable) { + ((Syncable)out).hsync(); + } + } + + private static int getBufferSize(Configuration conf) { + return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY, + HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java new file mode 100644 index 00000000000..4afb2216580 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface Decryptor { + + /** + * Initialize the decryptor, the internal decryption context will be + * reset. + * @param key decryption key. + * @param iv decryption initialization vector + * @throws IOException if initialization fails + */ + public void init(byte[] key, byte[] iv) throws IOException; + + /** + * Indicate whether decryption context is reset. + *

+ * It's useful for some mode like CTR which requires different IV for + * different parts of data. Usually decryptor can maintain the context + * internally such as calculating IV/counter, then continue a multiple-part + * decryption operation without reinit the decryptor using key and the new + * IV. For mode like CTR, if context is reset after each decryption, the + * decryptor should be reinit before each operation, that's not efficient. + * @return boolean whether context is reset. + */ + public boolean isContextReset(); + + /** + * This exposes a direct interface for record decryption with direct byte + * buffers. + *

+ * The decrypt() function need not always consume the buffers provided, + * it will need to be called multiple times to decrypt an entire buffer + * and the object will hold the decryption context internally. + *

+ * Some implementation may need enough space in the destination buffer to + * decrypt an entire input. + *

+ * The end result will move inBuffer.position() by the bytes-read and + * outBuffer.position() by the bytes-written. It should not modify the + * inBuffer.limit() or outBuffer.limit() to maintain consistency of operation. + *

+ * @param inBuffer in direct {@link ByteBuffer} for reading from. Requires + * inBuffer != null and inBuffer.remaining() > 0 + * @param outBuffer out direct {@link ByteBuffer} for storing the results + * into. Requires outBuffer != null and outBuffer.remaining() > 0 + * @throws IOException if decryption fails + */ + public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java new file mode 100644 index 00000000000..398cc2e9aec --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface Encryptor { + + /** + * Initialize the encryptor, the internal encryption context will be + * reset. + * @param key encryption key. + * @param iv encryption initialization vector + * @throws IOException if initialization fails + */ + public void init(byte[] key, byte[] iv) throws IOException; + + /** + * Indicate whether encryption context is reset. + *

+ * It's useful for some mode like CTR which requires different IV for + * different parts of data. Usually encryptor can maintain the context + * internally such as calculating IV/counter, then continue a multiple-part + * encryption operation without reinit the encryptor using key and the new + * IV. For mode like CTR, if context is reset after each encryption, the + * encryptor should be reinit before each operation, that's not efficient. + * @return boolean whether context is reset. + */ + public boolean isContextReset(); + + /** + * This exposes a direct interface for record encryption with direct byte + * buffers. + *

+ * The encrypt() function need not always consume the buffers provided, + * it will need to be called multiple times to encrypt an entire buffer + * and the object will hold the encryption context internally. + *

+ * Some implementation may need enough space in the destination buffer to + * encrypt an entire input. + *

+ * The end result will move inBuffer.position() by the bytes-read and + * outBuffer.position() by the bytes-written. It should not modify the + * inBuffer.limit() or outBuffer.limit() to maintain consistency of operation. + *

+ * @param inBuffer in direct {@link ByteBuffer} for reading from. Requires + * inBuffer != null and inBuffer.remaining() > 0 + * @param outBuffer out direct {@link ByteBuffer} for storing the results + * into. Requires outBuffer != null and outBuffer.remaining() > 0 + * @throws IOException if encryption fails + */ + public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException; +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java new file mode 100644 index 00000000000..aea9e07ee6a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.security.GeneralSecurityException; + +import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY; + +/** + * Implement the AES-CTR crypto codec using JCE provider. + */ +public class JCEAESCTRCryptoCodec extends AESCTRCryptoCodec { + private Configuration conf; + private String provider; + + public JCEAESCTRCryptoCodec() { + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + provider = conf.get(HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY); + } + + @Override + public Encryptor getEncryptor() throws GeneralSecurityException { + return new JCEAESCTREncryptor(provider); + } + + @Override + public Decryptor getDecryptor() throws GeneralSecurityException { + return new JCEAESCTRDecryptor(provider); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java new file mode 100644 index 00000000000..a3fb13f6291 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.GeneralSecurityException; + +import javax.crypto.Cipher; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.SecretKeySpec; + +import com.google.common.base.Preconditions; + +public class JCEAESCTRDecryptor implements Decryptor { + private final Cipher cipher; + private boolean contextReset = false; + + public JCEAESCTRDecryptor(String provider) throws GeneralSecurityException { + if (provider == null || provider.isEmpty()) { + cipher = Cipher.getInstance("AES/CTR/NoPadding"); + } else { + cipher = Cipher.getInstance("AES/CTR/NoPadding", provider); + } + } + + @Override + public void init(byte[] key, byte[] iv) throws IOException { + Preconditions.checkNotNull(key); + Preconditions.checkNotNull(iv); + contextReset = false; + try { + cipher.init(Cipher.DECRYPT_MODE, new SecretKeySpec(key, "AES"), + new IvParameterSpec(iv)); + } catch (Exception e) { + throw new IOException(e); + } + } + + /** + * For AES-CTR, will consume all input data and needs enough space in the + * destination buffer to decrypt entire input data. + */ + @Override + public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + try { + int inputSize = inBuffer.remaining(); + // Cipher#update will maintain decryption context. + int n = cipher.update(inBuffer, outBuffer); + if (n < inputSize) { + /** + * Typically code will not get here. Cipher#update will decrypt all + * input data and put result in outBuffer. + * Cipher#doFinal will reset the decryption context. + */ + contextReset = true; + cipher.doFinal(inBuffer, outBuffer); + } + } catch (Exception e) { + throw new IOException(e); + } + } + + @Override + public boolean isContextReset() { + return contextReset; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java new file mode 100644 index 00000000000..9ee70dc4723 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.GeneralSecurityException; + +import javax.crypto.Cipher; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.SecretKeySpec; + +import com.google.common.base.Preconditions; + +public class JCEAESCTREncryptor implements Encryptor { + private final Cipher cipher; + private boolean contextReset = false; + + public JCEAESCTREncryptor(String provider) throws GeneralSecurityException { + if (provider == null || provider.isEmpty()) { + cipher = Cipher.getInstance("AES/CTR/NoPadding"); + } else { + cipher = Cipher.getInstance("AES/CTR/NoPadding", provider); + } + } + + @Override + public void init(byte[] key, byte[] iv) throws IOException { + Preconditions.checkNotNull(key); + Preconditions.checkNotNull(iv); + contextReset = false; + try { + cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(key, "AES"), + new IvParameterSpec(iv)); + } catch (Exception e) { + throw new IOException(e); + } + } + + /** + * For AES-CTR, will consume all input data and needs enough space in the + * destination buffer to encrypt entire input data. + */ + @Override + public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + try { + int inputSize = inBuffer.remaining(); + // Cipher#update will maintain encryption context. + int n = cipher.update(inBuffer, outBuffer); + if (n < inputSize) { + /** + * Typically code will not get here. Cipher#update will encrypt all + * input data and put result in outBuffer. + * Cipher#doFinal will reset the encryption context. + */ + contextReset = true; + cipher.doFinal(inBuffer, outBuffer); + } + } catch (Exception e) { + throw new IOException(e); + } + } + + @Override + public boolean isContextReset() { + return contextReset; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index b6be29447af..c0853a9ded9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -282,5 +282,16 @@ public class CommonConfigurationKeysPublic { /** Class to override Sasl Properties for a connection */ public static final String HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS = "hadoop.security.saslproperties.resolver.class"; + /** See core-default.xml */ + public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY = + "hadoop.security.crypto.codec.class"; + /** See core-default.xml */ + public static final String HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY = + "hadoop.security.crypto.jce.provider"; + /** See core-default.xml */ + public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = + "hadoop.security.crypto.buffer.size"; + /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */ + public static final int HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT = 8192; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java new file mode 100644 index 00000000000..8758d28f1de --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.crypto; + +import java.io.IOException; + +import org.apache.hadoop.crypto.CryptoCodec; +import org.apache.hadoop.crypto.CryptoInputStream; +import org.apache.hadoop.fs.FSDataInputStream; + +public class CryptoFSDataInputStream extends FSDataInputStream { + + public CryptoFSDataInputStream(FSDataInputStream in, CryptoCodec codec, + int bufferSize, byte[] key, byte[] iv) throws IOException { + super(new CryptoInputStream(in, codec, bufferSize, key, iv)); + } + + public CryptoFSDataInputStream(FSDataInputStream in, CryptoCodec codec, + byte[] key, byte[] iv) throws IOException { + super(new CryptoInputStream(in, codec, key, iv)); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java new file mode 100644 index 00000000000..040fbcb8799 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.crypto; + +import java.io.IOException; + +import org.apache.hadoop.crypto.CryptoCodec; +import org.apache.hadoop.crypto.CryptoOutputStream; +import org.apache.hadoop.fs.FSDataOutputStream; + +public class CryptoFSDataOutputStream extends FSDataOutputStream { + private final FSDataOutputStream fsOut; + + public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec, + int bufferSize, byte[] key, byte[] iv) throws IOException { + super(new CryptoOutputStream(out, codec, bufferSize, key, iv, + out.getPos()), null, out.getPos()); + this.fsOut = out; + } + + public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec, + byte[] key, byte[] iv) throws IOException { + super(new CryptoOutputStream(out, codec, key, iv, out.getPos()), + null, out.getPos()); + this.fsOut = out; + } + + @Override + public long getPos() { + return fsOut.getPos(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index ea0808eef75..6073c1a9155 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1348,4 +1348,30 @@ true. + + + hadoop.security.crypto.codec.class + + + The default implementation of CryptoCodec which is used for encryption + and decryption. + + + + + hadoop.security.crypto.jce.provider + + + The JCE provider name used in CryptoCodec. + + + + + hadoop.security.crypto.buffer.size + 8192 + + The buffer size used in Crypto InputStream and OutputStream, and default + value is 8192. + + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java new file mode 100644 index 00000000000..7f36c2b2495 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java @@ -0,0 +1,712 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.EnumSet; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.HasEnhancedByteBufferAccess; +import org.apache.hadoop.fs.PositionedReadable; +import org.apache.hadoop.fs.ReadOption; +import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.io.ByteBufferPool; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.RandomDatum; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public abstract class CryptoStreamsTestBase { + protected static final Log LOG= LogFactory.getLog( + CryptoStreamsTestBase.class); + + protected static CryptoCodec codec; + private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16}; + private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}; + + protected static final int count = 10000; + protected static int defaultBufferSize = 8192; + protected static int smallBufferSize = 1024; + private byte[] data; + private int dataLen; + + @Before + public void setUp() throws IOException { + // Generate data + int seed = new Random().nextInt(); + DataOutputBuffer dataBuf = new DataOutputBuffer(); + RandomDatum.Generator generator = new RandomDatum.Generator(seed); + for(int i=0; i < count; ++i) { + generator.next(); + RandomDatum key = generator.getKey(); + RandomDatum value = generator.getValue(); + + key.write(dataBuf); + value.write(dataBuf); + } + LOG.info("Generated " + count + " records"); + data = dataBuf.getData(); + dataLen = dataBuf.getLength(); + } + + protected void writeData(OutputStream out) throws Exception { + out.write(data, 0, dataLen); + out.close(); + } + + protected int getDataLen() { + return dataLen; + } + + private int readAll(InputStream in, byte[] b, int off, int len) + throws IOException { + int n = 0; + int total = 0; + while (n != -1) { + total += n; + if (total >= len) { + break; + } + n = in.read(b, off + total, len - total); + } + + return total; + } + + protected OutputStream getOutputStream(int bufferSize) throws IOException { + return getOutputStream(bufferSize, key, iv); + } + + protected abstract OutputStream getOutputStream(int bufferSize, byte[] key, + byte[] iv) throws IOException; + + protected InputStream getInputStream(int bufferSize) throws IOException { + return getInputStream(bufferSize, key, iv); + } + + protected abstract InputStream getInputStream(int bufferSize, byte[] key, + byte[] iv) throws IOException; + + /** + * Test crypto reading with different buffer size. + */ + @Test(timeout=120000) + public void testRead() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + // Default buffer size + InputStream in = getInputStream(defaultBufferSize); + readCheck(in); + in.close(); + + // Small buffer size + in = getInputStream(smallBufferSize); + readCheck(in); + in.close(); + } + + private void readCheck(InputStream in) throws Exception { + byte[] result = new byte[dataLen]; + int n = readAll(in, result, 0, dataLen); + + Assert.assertEquals(dataLen, n); + byte[] expectedData = new byte[n]; + System.arraycopy(data, 0, expectedData, 0, n); + Assert.assertArrayEquals(result, expectedData); + + // EOF + n = in.read(result, 0, dataLen); + Assert.assertEquals(n, -1); + in.close(); + } + + /** + * Test crypto with different IV. + */ + @Test(timeout=120000) + public void testCryptoIV() throws Exception { + byte[] iv1 = iv.clone(); + + // Counter base: Long.MAX_VALUE + setCounterBaseForIV(iv1, Long.MAX_VALUE); + cryptoCheck(iv1); + + // Counter base: Long.MAX_VALUE - 1 + setCounterBaseForIV(iv1, Long.MAX_VALUE - 1); + cryptoCheck(iv1); + + // Counter base: Integer.MAX_VALUE + setCounterBaseForIV(iv1, Integer.MAX_VALUE); + cryptoCheck(iv1); + + // Counter base: 0 + setCounterBaseForIV(iv1, 0); + cryptoCheck(iv1); + + // Counter base: -1 + setCounterBaseForIV(iv1, -1); + cryptoCheck(iv1); + } + + private void cryptoCheck(byte[] iv) throws Exception { + OutputStream out = getOutputStream(defaultBufferSize, key, iv); + writeData(out); + + InputStream in = getInputStream(defaultBufferSize, key, iv); + readCheck(in); + in.close(); + } + + private void setCounterBaseForIV(byte[] iv, long counterBase) { + ByteBuffer buf = ByteBuffer.wrap(iv); + buf.order(ByteOrder.BIG_ENDIAN); + buf.putLong(iv.length - 8, counterBase); + } + + /** + * Test hflush/hsync of crypto output stream, and with different buffer size. + */ + @Test(timeout=120000) + public void testSyncable() throws IOException { + syncableCheck(); + } + + private void syncableCheck() throws IOException { + OutputStream out = getOutputStream(smallBufferSize); + try { + int bytesWritten = dataLen/3; + out.write(data, 0, bytesWritten); + ((Syncable) out).hflush(); + + InputStream in = getInputStream(defaultBufferSize); + verify(in, bytesWritten, data); + in.close(); + + out.write(data, bytesWritten, dataLen - bytesWritten); + ((Syncable) out).hsync(); + + in = getInputStream(defaultBufferSize); + verify(in, dataLen, data); + in.close(); + } finally { + out.close(); + } + } + + private void verify(InputStream in, int bytesToVerify, + byte[] expectedBytes) throws IOException { + byte[] readBuf = new byte[bytesToVerify]; + readAll(in, readBuf, 0, bytesToVerify); + for (int i=0; i= len) { + break; + } + n = ((PositionedReadable) in).read(pos + total, b, off + total, + len - total); + } + + return total; + } + + /** + * Test positioned read. + */ + @Test(timeout=120000) + public void testPositionedRead() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + InputStream in = getInputStream(defaultBufferSize); + // Pos: 1/3 dataLen + positionedReadCheck(in , dataLen/3); + + // Pos: 1/2 dataLen + positionedReadCheck(in, dataLen/2); + in.close(); + } + + private void positionedReadCheck(InputStream in, int pos) throws Exception { + byte[] result = new byte[dataLen]; + int n = readAll(in, pos, result, 0, dataLen); + + Assert.assertEquals(dataLen, n + pos); + byte[] readData = new byte[n]; + System.arraycopy(result, 0, readData, 0, n); + byte[] expectedData = new byte[n]; + System.arraycopy(data, pos, expectedData, 0, n); + Assert.assertArrayEquals(readData, expectedData); + } + + /** + * Test read fully + */ + @Test(timeout=120000) + public void testReadFully() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + InputStream in = getInputStream(defaultBufferSize); + final int len1 = dataLen/4; + // Read len1 bytes + byte [] readData = new byte[len1]; + readAll(in, readData, 0, len1); + byte[] expectedData = new byte[len1]; + System.arraycopy(data, 0, expectedData, 0, len1); + Assert.assertArrayEquals(readData, expectedData); + + // Pos: 1/3 dataLen + readFullyCheck(in, dataLen/3); + + // Read len1 bytes + readData = new byte[len1]; + readAll(in, readData, 0, len1); + expectedData = new byte[len1]; + System.arraycopy(data, len1, expectedData, 0, len1); + Assert.assertArrayEquals(readData, expectedData); + + // Pos: 1/2 dataLen + readFullyCheck(in, dataLen/2); + + // Read len1 bytes + readData = new byte[len1]; + readAll(in, readData, 0, len1); + expectedData = new byte[len1]; + System.arraycopy(data, 2 * len1, expectedData, 0, len1); + Assert.assertArrayEquals(readData, expectedData); + + in.close(); + } + + private void readFullyCheck(InputStream in, int pos) throws Exception { + byte[] result = new byte[dataLen - pos]; + ((PositionedReadable) in).readFully(pos, result); + + byte[] expectedData = new byte[dataLen - pos]; + System.arraycopy(data, pos, expectedData, 0, dataLen - pos); + Assert.assertArrayEquals(result, expectedData); + + result = new byte[dataLen]; // Exceeds maximum length + try { + ((PositionedReadable) in).readFully(pos, result); + Assert.fail("Read fully exceeds maximum length should fail."); + } catch (IOException e) { + } + } + + /** + * Test seek to different position. + */ + @Test(timeout=120000) + public void testSeek() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + InputStream in = getInputStream(defaultBufferSize); + // Pos: 1/3 dataLen + seekCheck(in, dataLen/3); + + // Pos: 0 + seekCheck(in, 0); + + // Pos: 1/2 dataLen + seekCheck(in, dataLen/2); + + // Pos: -3 + try { + seekCheck(in, -3); + Assert.fail("Seek to negative offset should fail."); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("Cannot seek to negative " + + "offset", e); + } + + // Pos: dataLen + 3 + try { + seekCheck(in, dataLen + 3); + Assert.fail("Seek after EOF should fail."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Cannot seek after EOF", e); + } + + in.close(); + } + + private void seekCheck(InputStream in, int pos) throws Exception { + byte[] result = new byte[dataLen]; + ((Seekable) in).seek(pos); + int n = readAll(in, result, 0, dataLen); + + Assert.assertEquals(dataLen, n + pos); + byte[] readData = new byte[n]; + System.arraycopy(result, 0, readData, 0, n); + byte[] expectedData = new byte[n]; + System.arraycopy(data, pos, expectedData, 0, n); + Assert.assertArrayEquals(readData, expectedData); + } + + /** + * Test get position. + */ + @Test(timeout=120000) + public void testGetPos() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + // Default buffer size + InputStream in = getInputStream(defaultBufferSize); + byte[] result = new byte[dataLen]; + int n1 = readAll(in, result, 0, dataLen/3); + Assert.assertEquals(n1, ((Seekable) in).getPos()); + + int n2 = readAll(in, result, n1, dataLen - n1); + Assert.assertEquals(n1 + n2, ((Seekable) in).getPos()); + in.close(); + } + + @Test(timeout=120000) + public void testAvailable() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + // Default buffer size + InputStream in = getInputStream(defaultBufferSize); + byte[] result = new byte[dataLen]; + int n1 = readAll(in, result, 0, dataLen/3); + Assert.assertEquals(in.available(), dataLen - n1); + + int n2 = readAll(in, result, n1, dataLen - n1); + Assert.assertEquals(in.available(), dataLen - n1 - n2); + in.close(); + } + + /** + * Test skip. + */ + @Test(timeout=120000) + public void testSkip() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + // Default buffer size + InputStream in = getInputStream(defaultBufferSize); + byte[] result = new byte[dataLen]; + int n1 = readAll(in, result, 0, dataLen/3); + Assert.assertEquals(n1, ((Seekable) in).getPos()); + + long skipped = in.skip(dataLen/3); + int n2 = readAll(in, result, 0, dataLen); + + Assert.assertEquals(dataLen, n1 + skipped + n2); + byte[] readData = new byte[n2]; + System.arraycopy(result, 0, readData, 0, n2); + byte[] expectedData = new byte[n2]; + System.arraycopy(data, dataLen - n2, expectedData, 0, n2); + Assert.assertArrayEquals(readData, expectedData); + + try { + skipped = in.skip(-3); + Assert.fail("Skip Negative length should fail."); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("Negative skip length", e); + } + + // Skip after EOF + skipped = in.skip(3); + Assert.assertEquals(skipped, 0); + + in.close(); + } + + private void byteBufferReadCheck(InputStream in, ByteBuffer buf, + int bufPos) throws Exception { + buf.position(bufPos); + int n = ((ByteBufferReadable) in).read(buf); + byte[] readData = new byte[n]; + buf.rewind(); + buf.position(bufPos); + buf.get(readData); + byte[] expectedData = new byte[n]; + System.arraycopy(data, 0, expectedData, 0, n); + Assert.assertArrayEquals(readData, expectedData); + } + + /** + * Test byte buffer read with different buffer size. + */ + @Test(timeout=120000) + public void testByteBufferRead() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + // Default buffer size, initial buffer position is 0 + InputStream in = getInputStream(defaultBufferSize); + ByteBuffer buf = ByteBuffer.allocate(dataLen + 100); + byteBufferReadCheck(in, buf, 0); + in.close(); + + // Default buffer size, initial buffer position is not 0 + in = getInputStream(defaultBufferSize); + buf.clear(); + byteBufferReadCheck(in, buf, 11); + in.close(); + + // Small buffer size, initial buffer position is 0 + in = getInputStream(smallBufferSize); + buf.clear(); + byteBufferReadCheck(in, buf, 0); + in.close(); + + // Small buffer size, initial buffer position is not 0 + in = getInputStream(smallBufferSize); + buf.clear(); + byteBufferReadCheck(in, buf, 11); + in.close(); + + // Direct buffer, default buffer size, initial buffer position is 0 + in = getInputStream(defaultBufferSize); + buf = ByteBuffer.allocateDirect(dataLen + 100); + byteBufferReadCheck(in, buf, 0); + in.close(); + + // Direct buffer, default buffer size, initial buffer position is not 0 + in = getInputStream(defaultBufferSize); + buf.clear(); + byteBufferReadCheck(in, buf, 11); + in.close(); + + // Direct buffer, small buffer size, initial buffer position is 0 + in = getInputStream(smallBufferSize); + buf.clear(); + byteBufferReadCheck(in, buf, 0); + in.close(); + + // Direct buffer, small buffer size, initial buffer position is not 0 + in = getInputStream(smallBufferSize); + buf.clear(); + byteBufferReadCheck(in, buf, 11); + in.close(); + } + + @Test(timeout=120000) + public void testCombinedOp() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + final int len1 = dataLen/8; + final int len2 = dataLen/10; + + InputStream in = getInputStream(defaultBufferSize); + // Read len1 data. + byte[] readData = new byte[len1]; + readAll(in, readData, 0, len1); + byte[] expectedData = new byte[len1]; + System.arraycopy(data, 0, expectedData, 0, len1); + Assert.assertArrayEquals(readData, expectedData); + + long pos = ((Seekable) in).getPos(); + Assert.assertEquals(len1, pos); + + // Seek forward len2 + ((Seekable) in).seek(pos + len2); + // Skip forward len2 + long n = in.skip(len2); + Assert.assertEquals(len2, n); + + // Pos: 1/4 dataLen + positionedReadCheck(in , dataLen/4); + + // Pos should be len1 + len2 + len2 + pos = ((Seekable) in).getPos(); + Assert.assertEquals(len1 + len2 + len2, pos); + + // Read forward len1 + ByteBuffer buf = ByteBuffer.allocate(len1); + int nRead = ((ByteBufferReadable) in).read(buf); + readData = new byte[nRead]; + buf.rewind(); + buf.get(readData); + expectedData = new byte[nRead]; + System.arraycopy(data, (int)pos, expectedData, 0, nRead); + Assert.assertArrayEquals(readData, expectedData); + + // Pos should be len1 + 2 * len2 + nRead + pos = ((Seekable) in).getPos(); + Assert.assertEquals(len1 + 2 * len2 + nRead, pos); + + // Pos: 1/3 dataLen + positionedReadCheck(in , dataLen/3); + + // Read forward len1 + readData = new byte[len1]; + readAll(in, readData, 0, len1); + expectedData = new byte[len1]; + System.arraycopy(data, (int)pos, expectedData, 0, len1); + Assert.assertArrayEquals(readData, expectedData); + + // Pos should be 2 * len1 + 2 * len2 + nRead + pos = ((Seekable) in).getPos(); + Assert.assertEquals(2 * len1 + 2 * len2 + nRead, pos); + + // Read forward len1 + buf = ByteBuffer.allocate(len1); + nRead = ((ByteBufferReadable) in).read(buf); + readData = new byte[nRead]; + buf.rewind(); + buf.get(readData); + expectedData = new byte[nRead]; + System.arraycopy(data, (int)pos, expectedData, 0, nRead); + Assert.assertArrayEquals(readData, expectedData); + + // ByteBuffer read after EOF + ((Seekable) in).seek(dataLen); + buf.clear(); + n = ((ByteBufferReadable) in).read(buf); + Assert.assertEquals(n, -1); + + in.close(); + } + + @Test(timeout=120000) + public void testSeekToNewSource() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + InputStream in = getInputStream(defaultBufferSize); + + final int len1 = dataLen/8; + byte[] readData = new byte[len1]; + readAll(in, readData, 0, len1); + + // Pos: 1/3 dataLen + seekToNewSourceCheck(in, dataLen/3); + + // Pos: 0 + seekToNewSourceCheck(in, 0); + + // Pos: 1/2 dataLen + seekToNewSourceCheck(in, dataLen/2); + + // Pos: -3 + try { + seekToNewSourceCheck(in, -3); + Assert.fail("Seek to negative offset should fail."); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains("Cannot seek to negative " + + "offset", e); + } + + // Pos: dataLen + 3 + try { + seekToNewSourceCheck(in, dataLen + 3); + Assert.fail("Seek after EOF should fail."); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Attempted to read past end of file", e); + } + + in.close(); + } + + private void seekToNewSourceCheck(InputStream in, int targetPos) + throws Exception { + byte[] result = new byte[dataLen]; + ((Seekable) in).seekToNewSource(targetPos); + int n = readAll(in, result, 0, dataLen); + + Assert.assertEquals(dataLen, n + targetPos); + byte[] readData = new byte[n]; + System.arraycopy(result, 0, readData, 0, n); + byte[] expectedData = new byte[n]; + System.arraycopy(data, targetPos, expectedData, 0, n); + Assert.assertArrayEquals(readData, expectedData); + } + + private ByteBufferPool getBufferPool() { + return new ByteBufferPool() { + @Override + public ByteBuffer getBuffer(boolean direct, int length) { + return ByteBuffer.allocateDirect(length); + } + + @Override + public void putBuffer(ByteBuffer buffer) { + } + }; + } + + @Test(timeout=120000) + public void testHasEnhancedByteBufferAccess() throws Exception { + OutputStream out = getOutputStream(defaultBufferSize); + writeData(out); + + InputStream in = getInputStream(defaultBufferSize); + final int len1 = dataLen/8; + // ByteBuffer size is len1 + ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).read( + getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + int n1 = buffer.remaining(); + byte[] readData = new byte[n1]; + buffer.get(readData); + byte[] expectedData = new byte[n1]; + System.arraycopy(data, 0, expectedData, 0, n1); + Assert.assertArrayEquals(readData, expectedData); + ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer); + + // Read len1 bytes + readData = new byte[len1]; + readAll(in, readData, 0, len1); + expectedData = new byte[len1]; + System.arraycopy(data, n1, expectedData, 0, len1); + Assert.assertArrayEquals(readData, expectedData); + + // ByteBuffer size is len1 + buffer = ((HasEnhancedByteBufferAccess) in).read( + getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + int n2 = buffer.remaining(); + readData = new byte[n2]; + buffer.get(readData); + expectedData = new byte[n2]; + System.arraycopy(data, n1 + len1, expectedData, 0, n2); + Assert.assertArrayEquals(readData, expectedData); + ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer); + + in.close(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java new file mode 100644 index 00000000000..ebe025b0eac --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java @@ -0,0 +1,376 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.EOFException; +import java.io.FileDescriptor; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.EnumSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.CanSetDropBehind; +import org.apache.hadoop.fs.CanSetReadahead; +import org.apache.hadoop.fs.HasEnhancedByteBufferAccess; +import org.apache.hadoop.fs.HasFileDescriptor; +import org.apache.hadoop.fs.PositionedReadable; +import org.apache.hadoop.fs.ReadOption; +import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.io.ByteBufferPool; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +public class TestCryptoStreams extends CryptoStreamsTestBase { + /** + * Data storage. + * {@link #getOutputStream(int)} will write to this buf. + * {@link #getInputStream(int)} will read from this buf. + */ + private byte[] buf; + private int bufLen; + + @BeforeClass + public static void init() throws Exception { + Configuration conf = new Configuration(); + codec = CryptoCodec.getInstance(conf); + } + + @AfterClass + public static void shutdown() throws Exception { + } + + @Override + protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv) + throws IOException { + DataOutputBuffer out = new DataOutputBuffer() { + @Override + public void flush() throws IOException { + buf = getData(); + bufLen = getLength(); + } + @Override + public void close() throws IOException { + buf = getData(); + bufLen = getLength(); + } + }; + return new CryptoOutputStream(new FakeOutputStream(out), + codec, bufferSize, key, iv); + } + + @Override + protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) + throws IOException { + DataInputBuffer in = new DataInputBuffer(); + in.reset(buf, 0, bufLen); + return new CryptoInputStream(new FakeInputStream(in), codec, bufferSize, + key, iv); + } + + private class FakeOutputStream extends OutputStream + implements Syncable, CanSetDropBehind{ + private final byte[] oneByteBuf = new byte[1]; + private final DataOutputBuffer out; + private boolean closed; + + public FakeOutputStream(DataOutputBuffer out) { + this.out = out; + } + + @Override + public void write(byte b[], int off, int len) throws IOException { + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return; + } + + checkStream(); + + out.write(b, off, len); + } + + @Override + public void flush() throws IOException { + checkStream(); + out.flush(); + } + + @Override + public void close() throws IOException { + if (closed) { + return; + } + + out.close(); + closed = true; + } + + @Override + public void write(int b) throws IOException { + oneByteBuf[0] = (byte)(b & 0xff); + write(oneByteBuf, 0, oneByteBuf.length); + } + + @Override + public void setDropBehind(Boolean dropCache) throws IOException, + UnsupportedOperationException { + } + + @Override + public void hflush() throws IOException { + checkStream(); + flush(); + } + + @Override + public void hsync() throws IOException { + checkStream(); + flush(); + } + + private void checkStream() throws IOException { + if (closed) { + throw new IOException("Stream is closed!"); + } + } + } + + private class FakeInputStream extends InputStream implements + Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, + CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess { + private final byte[] oneByteBuf = new byte[1]; + private int pos = 0; + private final byte[] data; + private final int length; + private boolean closed = false; + + public FakeInputStream(DataInputBuffer in) { + data = in.getData(); + length = in.getLength(); + } + + @Override + public void seek(long pos) throws IOException { + if (pos > length) { + throw new IOException("Cannot seek after EOF."); + } + if (pos < 0) { + throw new IOException("Cannot seek to negative offset."); + } + checkStream(); + this.pos = (int)pos; + } + + @Override + public long getPos() throws IOException { + return pos; + } + + @Override + public int available() throws IOException { + return length - pos; + } + + @Override + public int read(byte b[], int off, int len) throws IOException { + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + checkStream(); + + if (pos < length) { + int n = (int) Math.min(len, length - pos); + System.arraycopy(data, pos, b, off, n); + pos += n; + return n; + } + + return -1; + } + + private void checkStream() throws IOException { + if (closed) { + throw new IOException("Stream is closed!"); + } + } + + @Override + public int read(ByteBuffer buf) throws IOException { + checkStream(); + if (pos < length) { + int n = (int) Math.min(buf.remaining(), length - pos); + if (n > 0) { + buf.put(data, pos, n); + } + pos += n; + return n; + } + return -1; + } + + @Override + public long skip(long n) throws IOException { + checkStream(); + if ( n > 0 ) { + if( n + pos > length ) { + n = length - pos; + } + pos += n; + return n; + } + return n < 0 ? -1 : 0; + } + + @Override + public void close() throws IOException { + closed = true; + } + + @Override + public int read(long position, byte[] b, int off, int len) + throws IOException { + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + if (position > length) { + throw new IOException("Cannot read after EOF."); + } + if (position < 0) { + throw new IOException("Cannot read to negative offset."); + } + + checkStream(); + + if (position < length) { + int n = (int) Math.min(len, length - position); + System.arraycopy(data, (int)position, b, off, n); + return n; + } + + return -1; + } + + @Override + public void readFully(long position, byte[] b, int off, int len) + throws IOException { + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return; + } + + if (position > length) { + throw new IOException("Cannot read after EOF."); + } + if (position < 0) { + throw new IOException("Cannot read to negative offset."); + } + + checkStream(); + + if (position + len > length) { + throw new EOFException("Reach the end of stream."); + } + + System.arraycopy(data, (int)position, b, off, len); + } + + @Override + public void readFully(long position, byte[] buffer) throws IOException { + readFully(position, buffer, 0, buffer.length); + } + + @Override + public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, + EnumSet opts) throws IOException, + UnsupportedOperationException { + if (bufferPool == null) { + throw new IOException("Please specify buffer pool."); + } + ByteBuffer buffer = bufferPool.getBuffer(true, maxLength); + int pos = buffer.position(); + int n = read(buffer); + if (n >= 0) { + buffer.position(pos); + return buffer; + } + + return null; + } + + @Override + public void releaseBuffer(ByteBuffer buffer) { + + } + + @Override + public void setReadahead(Long readahead) throws IOException, + UnsupportedOperationException { + } + + @Override + public void setDropBehind(Boolean dropCache) throws IOException, + UnsupportedOperationException { + } + + @Override + public FileDescriptor getFileDescriptor() throws IOException { + return null; + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + if (targetPos > length) { + throw new IOException("Attempted to read past end of file."); + } + if (targetPos < 0) { + throw new IOException("Cannot seek after EOF."); + } + checkStream(); + this.pos = (int)targetPos; + return false; + } + + @Override + public int read() throws IOException { + int ret = read( oneByteBuf, 0, 1 ); + return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java new file mode 100644 index 00000000000..286fb6a3d29 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +public class TestCryptoStreamsForLocalFS extends CryptoStreamsTestBase { + private static final String TEST_ROOT_DIR + = System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs"; + + private final File base = new File(TEST_ROOT_DIR); + private final Path file = new Path(TEST_ROOT_DIR, "test-file"); + private static LocalFileSystem fileSys; + + @BeforeClass + public static void init() throws Exception { + Configuration conf = new Configuration(); + conf = new Configuration(false); + conf.set("fs.file.impl", LocalFileSystem.class.getName()); + fileSys = FileSystem.getLocal(conf); + codec = CryptoCodec.getInstance(conf); + } + + @AfterClass + public static void shutdown() throws Exception { + } + + @Before + @Override + public void setUp() throws IOException { + fileSys.delete(new Path(TEST_ROOT_DIR), true); + super.setUp(); + } + + @After + public void cleanUp() throws IOException { + FileUtil.setWritable(base, true); + FileUtil.fullyDelete(base); + assertTrue(!base.exists()); + } + + @Override + protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv) + throws IOException { + return new CryptoOutputStream(fileSys.create(file), codec, bufferSize, + key, iv); + } + + @Override + protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) + throws IOException { + return new CryptoInputStream(fileSys.open(file), codec, bufferSize, + key, iv); + } + + @Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read") + @Override + @Test(timeout=1000) + public void testByteBufferRead() throws Exception {} + + @Ignore("ChecksumFSOutputSummer doesn't support Syncable") + @Override + @Test(timeout=1000) + public void testSyncable() throws IOException {} + + @Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read") + @Override + @Test(timeout=1000) + public void testCombinedOp() throws Exception {} + + @Ignore("ChecksumFSInputChecker doesn't support enhanced ByteBuffer access") + @Override + @Test(timeout=1000) + public void testHasEnhancedByteBufferAccess() throws Exception { + } + + @Ignore("ChecksumFSInputChecker doesn't support seekToNewSource") + @Override + @Test(timeout=1000) + public void testSeekToNewSource() throws Exception { + } +} From 2e5ae1aad74575cd35f47bd5f0f789d7056ef77d Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Thu, 29 May 2014 22:09:51 +0000 Subject: [PATCH 004/354] HADOOP-10628. Javadoc and few code style improvement for Crypto input and output streams. (yliu via clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1598429 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 + .../hadoop/crypto/AESCTRCryptoCodec.java | 6 +- .../org/apache/hadoop/crypto/CryptoCodec.java | 28 ++-- .../hadoop/crypto/CryptoInputStream.java | 124 +++++++++--------- .../hadoop/crypto/CryptoOutputStream.java | 41 +++--- .../org/apache/hadoop/crypto/Decryptor.java | 41 +++--- .../org/apache/hadoop/crypto/Encryptor.java | 42 +++--- .../hadoop/crypto/JCEAESCTRDecryptor.java | 4 +- .../hadoop/crypto/JCEAESCTREncryptor.java | 4 +- .../src/main/resources/core-default.xml | 3 +- .../hadoop/crypto/CryptoStreamsTestBase.java | 101 +++++++------- 11 files changed, 194 insertions(+), 203 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index e7bc580ff32..5d06794ff88 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -11,6 +11,9 @@ fs-encryption (Unreleased) HADOOP-10603. Crypto input and output streams implementing Hadoop stream interfaces. (Yi Liu and Charles Lamb) + HADOOP-10628. Javadoc and few code style improvement for Crypto + input and output streams. (yliu via clamb) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java index b76f1bf2f6a..a39af7ef8b7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java @@ -40,15 +40,15 @@ public int getAlgorithmBlockSize() { } /** - * IV is produced by combining initial IV and the counter using addition. - * IV length should be the same as {@link #AES_BLOCK_SIZE} + * The IV is produced by adding the initial IV to the counter. IV length + * should be the same as {@link #AES_BLOCK_SIZE} */ @Override public void calculateIV(byte[] initIV, long counter, byte[] IV) { Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE); Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE); - ByteBuffer buf = ByteBuffer.wrap(IV); + final ByteBuffer buf = ByteBuffer.wrap(IV); buf.put(initIV); buf.order(ByteOrder.BIG_ENDIAN); counter += buf.getLong(AES_BLOCK_SIZE - 8); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index 80d824d0e19..dd20d61faa8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -41,37 +41,37 @@ public static CryptoCodec getInstance(Configuration conf) { } /** - * Get block size of a block cipher. + * Get the block size of a block cipher. * For different algorithms, the block size may be different. - * @return int block size + * @return int the block size */ public abstract int getAlgorithmBlockSize(); /** - * Get a {@link #org.apache.hadoop.crypto.Encryptor}. - * @return Encryptor + * Get an {@link #org.apache.hadoop.crypto.Encryptor}. + * @return Encryptor the encryptor */ public abstract Encryptor getEncryptor() throws GeneralSecurityException; /** * Get a {@link #org.apache.hadoop.crypto.Decryptor}. - * @return Decryptor + * @return Decryptor the decryptor */ public abstract Decryptor getDecryptor() throws GeneralSecurityException; /** - * This interface is only for Counter (CTR) mode. Typically calculating - * IV(Initialization Vector) is up to Encryptor or Decryptor, for - * example {@link #javax.crypto.Cipher} will maintain encryption context - * internally when do encryption/decryption continuously using its + * This interface is only for Counter (CTR) mode. Generally the Encryptor + * or Decryptor calculates the IV and maintain encryption context internally. + * For example a {@link #javax.crypto.Cipher} will maintain its encryption + * context internally when we do encryption/decryption using the * Cipher#update interface. *

- * In Hadoop, multiple nodes may read splits of a file, so decrypting of - * file is not continuous, even for encrypting may be not continuous. For - * each part, we need to calculate the counter through file position. + * Encryption/Decryption is not always on the entire file. For example, + * in Hadoop, a node may only decrypt a portion of a file (i.e. a split). + * In these situations, the counter is derived from the file position. *

- * Typically IV for a file position is produced by combining initial IV and - * the counter using any lossless operation (concatenation, addition, or XOR). + * The IV can be calculated by combining the initial IV and the counter with + * a lossless operation (concatenation, addition, or XOR). * @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29 * * @param initIV initial IV diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java index ffcf1846cf2..374ca2e9483 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -63,26 +63,30 @@ public class CryptoInputStream extends FilterInputStream implements private static final byte[] oneByteBuf = new byte[1]; private final CryptoCodec codec; private final Decryptor decryptor; + /** * Input data buffer. The data starts at inBuffer.position() and ends at * to inBuffer.limit(). */ private ByteBuffer inBuffer; + /** * The decrypted data buffer. The data starts at outBuffer.position() and * ends at outBuffer.limit(); */ private ByteBuffer outBuffer; private long streamOffset = 0; // Underlying stream offset. + /** - * Whether underlying stream supports + * Whether the underlying stream supports * {@link #org.apache.hadoop.fs.ByteBufferReadable} */ private Boolean usingByteBufferRead = null; + /** * Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer} - * before any other data goes in. The purpose of padding is to put input data - * at proper position. + * before any other data goes in. The purpose of padding is to put the input + * data at proper position. */ private byte padding; private boolean closed; @@ -144,14 +148,15 @@ public int read(byte[] b, int off, int len) throws IOException { return 0; } - int remaining = outBuffer.remaining(); + final int remaining = outBuffer.remaining(); if (remaining > 0) { int n = Math.min(len, remaining); outBuffer.get(b, off, n); return n; } else { int n = 0; - /** + + /* * Check whether the underlying stream is {@link ByteBufferReadable}, * it can avoid bytes copy. */ @@ -186,11 +191,11 @@ public int read(byte[] b, int off, int len) throws IOException { } } - // Read data from underlying stream. + /** Read data from underlying stream. */ private int readFromUnderlyingStream() throws IOException { - int toRead = inBuffer.remaining(); - byte[] tmp = getTmpBuf(); - int n = in.read(tmp, 0, toRead); + final int toRead = inBuffer.remaining(); + final byte[] tmp = getTmpBuf(); + final int n = in.read(tmp, 0, toRead); if (n > 0) { inBuffer.put(tmp, 0, n); } @@ -221,19 +226,19 @@ private void decrypt() throws IOException { inBuffer.clear(); outBuffer.flip(); if (padding > 0) { - /** - * The plain text and cipher text have 1:1 mapping, they start at same - * position. + /* + * The plain text and cipher text have a 1:1 mapping, they start at the + * same position. */ outBuffer.position(padding); padding = 0; } if (decryptor.isContextReset()) { - /** - * Typically we will not get here. To improve performance in CTR mode, - * we rely on the decryptor maintaining context, for example calculating - * the counter. Unfortunately, some bad implementations can't maintain - * context so we need to re-init after doing decryption. + /* + * This code is generally not executed since the decryptor usually + * maintains decryption context (e.g. the counter) internally. However, + * some implementations can't maintain context so a re-init is necessary + * after each decryption call. */ updateDecryptor(); } @@ -243,7 +248,7 @@ private void decrypt() throws IOException { * Update the {@link #decryptor}. Calculate the counter and {@link #padding}. */ private void updateDecryptor() throws IOException { - long counter = streamOffset / codec.getAlgorithmBlockSize(); + final long counter = streamOffset / codec.getAlgorithmBlockSize(); padding = (byte)(streamOffset % codec.getAlgorithmBlockSize()); inBuffer.position(padding); // Set proper position for input data. codec.calculateIV(initIV, counter, iv); @@ -251,8 +256,8 @@ private void updateDecryptor() throws IOException { } /** - * Reset the underlying stream offset; and clear {@link #inBuffer} and - * {@link #outBuffer}. Typically this happens when doing {@link #seek(long)} + * Reset the underlying stream offset, and clear {@link #inBuffer} and + * {@link #outBuffer}. This Typically happens during {@link #seek(long)} * or {@link #skip(long)}. */ private void resetStreamOffset(long offset) throws IOException { @@ -274,30 +279,29 @@ public void close() throws IOException { closed = true; } - /** - * Free the direct buffer manually. - */ + /** Forcibly free the direct buffer. */ private void freeBuffers() { - sun.misc.Cleaner inBufferCleaner = + final sun.misc.Cleaner inBufferCleaner = ((sun.nio.ch.DirectBuffer) inBuffer).cleaner(); inBufferCleaner.clean(); - sun.misc.Cleaner outBufferCleaner = + final sun.misc.Cleaner outBufferCleaner = ((sun.nio.ch.DirectBuffer) outBuffer).cleaner(); outBufferCleaner.clean(); } - // Positioned read. + /** Positioned read. */ @Override public int read(long position, byte[] buffer, int offset, int length) throws IOException { checkStream(); try { - int n = ((PositionedReadable) in).read(position, buffer, offset, length); + final int n = ((PositionedReadable) in).read(position, buffer, offset, + length); if (n > 0) { - /** + /* * Since this operation does not change the current offset of a file, - * streamOffset should be not changed and we need to restore the - * decryptor and outBuffer after decryption. + * streamOffset should not be changed. We need to restore the decryptor + * and outBuffer after decryption. */ decrypt(position, buffer, offset, length); } @@ -310,24 +314,23 @@ public int read(long position, byte[] buffer, int offset, int length) } /** - * Decrypt given length of data in buffer: start from offset. - * Output is also buffer and start from same offset. Restore the - * {@link #decryptor} and {@link #outBuffer} after decryption. + * Decrypt length bytes in buffer starting at offset. Output is also put + * into buffer starting at offset. Restore the {@link #decryptor} and + * {@link #outBuffer} after the decryption. */ private void decrypt(long position, byte[] buffer, int offset, int length) throws IOException { - - byte[] tmp = getTmpBuf(); + final byte[] tmp = getTmpBuf(); int unread = outBuffer.remaining(); if (unread > 0) { // Cache outBuffer outBuffer.get(tmp, 0, unread); } - long curOffset = streamOffset; + final long curOffset = streamOffset; resetStreamOffset(position); int n = 0; while (n < length) { - int toDecrypt = Math.min(length - n, inBuffer.remaining()); + final int toDecrypt = Math.min(length - n, inBuffer.remaining()); inBuffer.put(buffer, offset + n, toDecrypt); // Do decryption decrypt(); @@ -344,7 +347,7 @@ private void decrypt(long position, byte[] buffer, int offset, int length) } } - // Positioned read fully. + /** Positioned read fully. */ @Override public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { @@ -352,9 +355,9 @@ public void readFully(long position, byte[] buffer, int offset, int length) try { ((PositionedReadable) in).readFully(position, buffer, offset, length); if (length > 0) { - /** - * Since this operation does not change the current offset of a file, - * streamOffset should be not changed and we need to restore the decryptor + /* + * Since this operation does not change the current offset of the file, + * streamOffset should not be changed. We need to restore the decryptor * and outBuffer after decryption. */ decrypt(position, buffer, offset, length); @@ -370,13 +373,16 @@ public void readFully(long position, byte[] buffer) throws IOException { readFully(position, buffer, 0, buffer.length); } - // Seek to a position. + /** Seek to a position. */ @Override public void seek(long pos) throws IOException { Preconditions.checkArgument(pos >= 0, "Cannot seek to negative offset."); checkStream(); try { - // If target pos we have already read and decrypt. + /* + * If data of target pos in the underlying stream has already been read + * and decrypted in outBuffer, we just need to re-position outBuffer. + */ if (pos <= streamOffset && pos >= (streamOffset - outBuffer.remaining())) { int forward = (int) (pos - (streamOffset - outBuffer.remaining())); if (forward > 0) { @@ -392,7 +398,7 @@ public void seek(long pos) throws IOException { } } - // Skip n bytes + /** Skip n bytes */ @Override public long skip(long n) throws IOException { Preconditions.checkArgument(n >= 0, "Negative skip length."); @@ -405,11 +411,11 @@ public long skip(long n) throws IOException { outBuffer.position(pos); return n; } else { - /** + /* * Subtract outBuffer.remaining() to see how many bytes we need to - * skip in underlying stream. We get real skipped bytes number of - * underlying stream then add outBuffer.remaining() to get skipped - * bytes number from user's view. + * skip in the underlying stream. Add outBuffer.remaining() to the + * actual number of skipped bytes in the underlying stream to get the + * number of skipped bytes from the user's point of view. */ n -= outBuffer.remaining(); long skipped = in.skip(n); @@ -423,7 +429,7 @@ public long skip(long n) throws IOException { } } - // Get underlying stream position. + /** Get underlying stream position. */ @Override public long getPos() throws IOException { checkStream(); @@ -431,16 +437,16 @@ public long getPos() throws IOException { return streamOffset - outBuffer.remaining(); } - // ByteBuffer read. + /** ByteBuffer read. */ @Override public int read(ByteBuffer buf) throws IOException { checkStream(); if (in instanceof ByteBufferReadable) { - int unread = outBuffer.remaining(); + final int unread = outBuffer.remaining(); if (unread > 0) { // Have unread decrypted data in buffer. int toRead = buf.remaining(); if (toRead <= unread) { - int limit = outBuffer.limit(); + final int limit = outBuffer.limit(); outBuffer.limit(outBuffer.position() + toRead); buf.put(outBuffer); outBuffer.limit(limit); @@ -450,8 +456,8 @@ public int read(ByteBuffer buf) throws IOException { } } - int pos = buf.position(); - int n = ((ByteBufferReadable) in).read(buf); + final int pos = buf.position(); + final int n = ((ByteBufferReadable) in).read(buf); if (n > 0) { streamOffset += n; // Read n bytes decrypt(buf, n, pos); @@ -470,8 +476,8 @@ public int read(ByteBuffer buf) throws IOException { */ private void decrypt(ByteBuffer buf, int n, int start) throws IOException { - int pos = buf.position(); - int limit = buf.limit(); + final int pos = buf.position(); + final int limit = buf.limit(); int len = 0; while (len < n) { buf.position(start + len); @@ -535,13 +541,13 @@ public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, ((Seekable) in).seek(getPos()); resetStreamOffset(getPos()); } - ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in). + final ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in). read(bufferPool, maxLength, opts); if (buffer != null) { - int n = buffer.remaining(); + final int n = buffer.remaining(); if (n > 0) { streamOffset += buffer.remaining(); // Read n bytes - int pos = buffer.position(); + final int pos = buffer.position(); decrypt(buffer, n, pos); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java index 934c4479352..c5bd1dd6b5d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java @@ -52,17 +52,20 @@ public class CryptoOutputStream extends FilterOutputStream implements private static final byte[] oneByteBuf = new byte[1]; private final CryptoCodec codec; private final Encryptor encryptor; + /** * Input data buffer. The data starts at inBuffer.position() and ends at * inBuffer.limit(). */ private ByteBuffer inBuffer; + /** * Encrypted data buffer. The data starts at outBuffer.position() and ends at * outBuffer.limit(); */ private ByteBuffer outBuffer; private long streamOffset = 0; // Underlying stream offset. + /** * Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer} * before any other data goes in. The purpose of padding is to put input data @@ -134,7 +137,7 @@ public void write(byte[] b, int off, int len) throws IOException { throw new IndexOutOfBoundsException(); } while (len > 0) { - int remaining = inBuffer.remaining(); + final int remaining = inBuffer.remaining(); if (len < remaining) { inBuffer.put(b, off, len); len = 0; @@ -163,15 +166,16 @@ private void encrypt() throws IOException { inBuffer.clear(); outBuffer.flip(); if (padding > 0) { - /** - * The plain text and cipher text have 1:1 mapping, they start at same - * position. + /* + * The plain text and cipher text have a 1:1 mapping, they start at the + * same position. */ outBuffer.position(padding); padding = 0; } - int len = outBuffer.remaining(); - /** + final int len = outBuffer.remaining(); + + /* * If underlying stream supports {@link ByteBuffer} write in future, needs * refine here. */ @@ -181,12 +185,11 @@ private void encrypt() throws IOException { streamOffset += len; if (encryptor.isContextReset()) { - /** - * We will generally not get here. For CTR mode, to improve - * performance, we rely on the encryptor maintaining context, for - * example to calculate the counter. But some bad implementations - * can't maintain context, and need us to re-init after doing - * encryption. + /* + * This code is generally not executed since the encryptor usually + * maintains encryption context (e.g. the counter) internally. However, + * some implementations can't maintain context so a re-init is necessary + * after each encryption call. */ updateEncryptor(); } @@ -196,7 +199,7 @@ private void encrypt() throws IOException { * Update the {@link #encryptor}: calculate counter and {@link #padding}. */ private void updateEncryptor() throws IOException { - long counter = streamOffset / codec.getAlgorithmBlockSize(); + final long counter = streamOffset / codec.getAlgorithmBlockSize(); padding = (byte)(streamOffset % codec.getAlgorithmBlockSize()); inBuffer.position(padding); // Set proper position for input data. codec.calculateIV(initIV, counter, iv); @@ -222,21 +225,19 @@ public void close() throws IOException { closed = true; } - /** - * Free the direct buffer manually. - */ + /** Forcibly free the direct buffer. */ private void freeBuffers() { - sun.misc.Cleaner inBufferCleaner = + final sun.misc.Cleaner inBufferCleaner = ((sun.nio.ch.DirectBuffer) inBuffer).cleaner(); inBufferCleaner.clean(); - sun.misc.Cleaner outBufferCleaner = + final sun.misc.Cleaner outBufferCleaner = ((sun.nio.ch.DirectBuffer) outBuffer).cleaner(); outBufferCleaner.clean(); } /** - * To flush, we need to encrypt the data in buffer and write to underlying - * stream, then do the flush. + * To flush, we need to encrypt the data in the buffer and write to the + * underlying stream, then do the flush. */ @Override public void flush() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java index 4afb2216580..ded016a4acc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java @@ -28,7 +28,7 @@ public interface Decryptor { /** - * Initialize the decryptor, the internal decryption context will be + * Initialize the decryptor and the internal decryption context. * reset. * @param key decryption key. * @param iv decryption initialization vector @@ -37,37 +37,34 @@ public interface Decryptor { public void init(byte[] key, byte[] iv) throws IOException; /** - * Indicate whether decryption context is reset. + * Indicate whether the decryption context is reset. *

- * It's useful for some mode like CTR which requires different IV for - * different parts of data. Usually decryptor can maintain the context - * internally such as calculating IV/counter, then continue a multiple-part - * decryption operation without reinit the decryptor using key and the new - * IV. For mode like CTR, if context is reset after each decryption, the - * decryptor should be reinit before each operation, that's not efficient. + * Certain modes, like CTR, require a different IV depending on the + * position in the stream. Generally, the decryptor maintains any necessary + * context for calculating the IV and counter so that no reinit is necessary + * during the decryption. Reinit before each operation is inefficient. * @return boolean whether context is reset. */ public boolean isContextReset(); /** - * This exposes a direct interface for record decryption with direct byte - * buffers. + * This presents a direct interface decrypting with direct ByteBuffers. *

- * The decrypt() function need not always consume the buffers provided, - * it will need to be called multiple times to decrypt an entire buffer - * and the object will hold the decryption context internally. + * This function does not always decrypt the entire buffer and may potentially + * need to be called multiple times to process an entire buffer. The object + * may hold the decryption context internally. *

- * Some implementation may need enough space in the destination buffer to - * decrypt an entire input. + * Some implementations may require sufficient space in the destination + * buffer to decrypt the entire input buffer. *

- * The end result will move inBuffer.position() by the bytes-read and - * outBuffer.position() by the bytes-written. It should not modify the - * inBuffer.limit() or outBuffer.limit() to maintain consistency of operation. + * Upon return, inBuffer.position() will be advanced by the number of bytes + * read and outBuffer.position() by bytes written. Implementations should + * not modify inBuffer.limit() and outBuffer.limit(). *

- * @param inBuffer in direct {@link ByteBuffer} for reading from. Requires - * inBuffer != null and inBuffer.remaining() > 0 - * @param outBuffer out direct {@link ByteBuffer} for storing the results - * into. Requires outBuffer != null and outBuffer.remaining() > 0 + * @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may + * not be null and inBuffer.remaining() must be > 0 + * @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may + * not be null and outBuffer.remaining() must be > 0 * @throws IOException if decryption fails */ public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java index 398cc2e9aec..00213b8cbbf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java @@ -28,8 +28,7 @@ public interface Encryptor { /** - * Initialize the encryptor, the internal encryption context will be - * reset. + * Initialize the encryptor and the internal encryption context. * @param key encryption key. * @param iv encryption initialization vector * @throws IOException if initialization fails @@ -37,37 +36,34 @@ public interface Encryptor { public void init(byte[] key, byte[] iv) throws IOException; /** - * Indicate whether encryption context is reset. + * Indicate whether the encryption context is reset. *

- * It's useful for some mode like CTR which requires different IV for - * different parts of data. Usually encryptor can maintain the context - * internally such as calculating IV/counter, then continue a multiple-part - * encryption operation without reinit the encryptor using key and the new - * IV. For mode like CTR, if context is reset after each encryption, the - * encryptor should be reinit before each operation, that's not efficient. + * Certain modes, like CTR, require a different IV depending on the + * position in the stream. Generally, the encryptor maintains any necessary + * context for calculating the IV and counter so that no reinit is necessary + * during the encryption. Reinit before each operation is inefficient. * @return boolean whether context is reset. */ public boolean isContextReset(); /** - * This exposes a direct interface for record encryption with direct byte - * buffers. + * This presents a direct interface encrypting with direct ByteBuffers. *

- * The encrypt() function need not always consume the buffers provided, - * it will need to be called multiple times to encrypt an entire buffer - * and the object will hold the encryption context internally. + * This function does not always encrypt the entire buffer and may potentially + * need to be called multiple times to process an entire buffer. The object + * may hold the encryption context internally. *

- * Some implementation may need enough space in the destination buffer to - * encrypt an entire input. + * Some implementations may require sufficient space in the destination + * buffer to encrypt the entire input buffer. *

- * The end result will move inBuffer.position() by the bytes-read and - * outBuffer.position() by the bytes-written. It should not modify the - * inBuffer.limit() or outBuffer.limit() to maintain consistency of operation. + * Upon return, inBuffer.position() will be advanced by the number of bytes + * read and outBuffer.position() by bytes written. Implementations should + * not modify inBuffer.limit() and outBuffer.limit(). *

- * @param inBuffer in direct {@link ByteBuffer} for reading from. Requires - * inBuffer != null and inBuffer.remaining() > 0 - * @param outBuffer out direct {@link ByteBuffer} for storing the results - * into. Requires outBuffer != null and outBuffer.remaining() > 0 + * @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may + * not be null and inBuffer.remaining() must be > 0 + * @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may + * not be null and outBuffer.remaining() must be > 0 * @throws IOException if encryption fails */ public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java index a3fb13f6291..569d3de9704 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java @@ -53,8 +53,8 @@ public void init(byte[] key, byte[] iv) throws IOException { } /** - * For AES-CTR, will consume all input data and needs enough space in the - * destination buffer to decrypt entire input data. + * AES-CTR will consume all of the input data. It requires enough space in + * the destination buffer to decrypt entire input buffer. */ @Override public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java index 9ee70dc4723..28e45735110 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java @@ -53,8 +53,8 @@ public void init(byte[] key, byte[] iv) throws IOException { } /** - * For AES-CTR, will consume all input data and needs enough space in the - * destination buffer to encrypt entire input data. + * AES-CTR will consume all of the input data. It requires enough space in + * the destination buffer to encrypt entire input buffer. */ @Override public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 6073c1a9155..718310f0626 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1370,8 +1370,7 @@ hadoop.security.crypto.buffer.size 8192 - The buffer size used in Crypto InputStream and OutputStream, and default - value is 8192. + The buffer size used by CryptoInputStream and CryptoOutputStream. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java index 7f36c2b2495..f5a8ad49ec5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java @@ -42,7 +42,7 @@ import org.junit.Test; public abstract class CryptoStreamsTestBase { - protected static final Log LOG= LogFactory.getLog( + protected static final Log LOG = LogFactory.getLog( CryptoStreamsTestBase.class); protected static CryptoCodec codec; @@ -60,13 +60,13 @@ public abstract class CryptoStreamsTestBase { @Before public void setUp() throws IOException { // Generate data - int seed = new Random().nextInt(); - DataOutputBuffer dataBuf = new DataOutputBuffer(); - RandomDatum.Generator generator = new RandomDatum.Generator(seed); - for(int i=0; i < count; ++i) { + final int seed = new Random().nextInt(); + final DataOutputBuffer dataBuf = new DataOutputBuffer(); + final RandomDatum.Generator generator = new RandomDatum.Generator(seed); + for(int i = 0; i < count; ++i) { generator.next(); - RandomDatum key = generator.getKey(); - RandomDatum value = generator.getValue(); + final RandomDatum key = generator.getKey(); + final RandomDatum value = generator.getValue(); key.write(dataBuf); value.write(dataBuf); @@ -114,9 +114,7 @@ protected InputStream getInputStream(int bufferSize) throws IOException { protected abstract InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) throws IOException; - /** - * Test crypto reading with different buffer size. - */ + /** Test crypto reading with different buffer size. */ @Test(timeout=120000) public void testRead() throws Exception { OutputStream out = getOutputStream(defaultBufferSize); @@ -148,9 +146,7 @@ private void readCheck(InputStream in) throws Exception { in.close(); } - /** - * Test crypto with different IV. - */ + /** Test crypto with different IV. */ @Test(timeout=120000) public void testCryptoIV() throws Exception { byte[] iv1 = iv.clone(); @@ -202,7 +198,7 @@ public void testSyncable() throws IOException { private void syncableCheck() throws IOException { OutputStream out = getOutputStream(smallBufferSize); try { - int bytesWritten = dataLen/3; + int bytesWritten = dataLen / 3; out.write(data, 0, bytesWritten); ((Syncable) out).hflush(); @@ -223,9 +219,9 @@ private void syncableCheck() throws IOException { private void verify(InputStream in, int bytesToVerify, byte[] expectedBytes) throws IOException { - byte[] readBuf = new byte[bytesToVerify]; + final byte[] readBuf = new byte[bytesToVerify]; readAll(in, readBuf, 0, bytesToVerify); - for (int i=0; i Date: Fri, 30 May 2014 08:08:36 +0000 Subject: [PATCH 005/354] HADOOP-10632. Minor improvements to Crypto input and output streams. Contributed by Yi Liu git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1598485 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 5 +- .../hadoop/crypto/AESCTRCryptoCodec.java | 29 +- .../org/apache/hadoop/crypto/CryptoCodec.java | 12 +- .../hadoop/crypto/CryptoInputStream.java | 255 +++++++++++------- .../hadoop/crypto/CryptoOutputStream.java | 50 ++-- .../hadoop/crypto/CryptoStreamUtils.java | 55 ++++ .../org/apache/hadoop/crypto/Decryptor.java | 2 +- .../org/apache/hadoop/crypto/Encryptor.java | 2 +- .../hadoop/crypto/JCEAESCTRCryptoCodec.java | 93 ++++++- .../hadoop/crypto/JCEAESCTRDecryptor.java | 84 ------ .../hadoop/crypto/JCEAESCTREncryptor.java | 84 ------ 11 files changed, 349 insertions(+), 322 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 5d06794ff88..1e7c6f62f17 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -12,7 +12,10 @@ fs-encryption (Unreleased) interfaces. (Yi Liu and Charles Lamb) HADOOP-10628. Javadoc and few code style improvement for Crypto - input and output streams. (yliu via clamb) + input and output streams. (Yi Liu via clamb) + + HADOOP-10632. Minor improvements to Crypto input and output streams. + (Yi Liu) OPTIMIZATIONS diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java index a39af7ef8b7..252e001e66f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java @@ -17,15 +17,12 @@ */ package org.apache.hadoop.crypto; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import com.google.common.base.Preconditions; -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Evolving public abstract class AESCTRCryptoCodec extends CryptoCodec { /** @@ -33,6 +30,7 @@ public abstract class AESCTRCryptoCodec extends CryptoCodec { * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard */ private static final int AES_BLOCK_SIZE = 16; + private static final int CTR_OFFSET = 8; @Override public int getAlgorithmBlockSize() { @@ -48,10 +46,23 @@ public void calculateIV(byte[] initIV, long counter, byte[] IV) { Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE); Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE); - final ByteBuffer buf = ByteBuffer.wrap(IV); - buf.put(initIV); - buf.order(ByteOrder.BIG_ENDIAN); - counter += buf.getLong(AES_BLOCK_SIZE - 8); - buf.putLong(AES_BLOCK_SIZE - 8, counter); + System.arraycopy(initIV, 0, IV, 0, CTR_OFFSET); + long l = (initIV[CTR_OFFSET + 0] << 56) + + ((initIV[CTR_OFFSET + 1] & 0xFF) << 48) + + ((initIV[CTR_OFFSET + 2] & 0xFF) << 40) + + ((initIV[CTR_OFFSET + 3] & 0xFF) << 32) + + ((initIV[CTR_OFFSET + 4] & 0xFF) << 24) + + ((initIV[CTR_OFFSET + 5] & 0xFF) << 16) + + ((initIV[CTR_OFFSET + 6] & 0xFF) << 8) + + (initIV[CTR_OFFSET + 7] & 0xFF); + l += counter; + IV[CTR_OFFSET + 0] = (byte) (l >>> 56); + IV[CTR_OFFSET + 1] = (byte) (l >>> 48); + IV[CTR_OFFSET + 2] = (byte) (l >>> 40); + IV[CTR_OFFSET + 3] = (byte) (l >>> 32); + IV[CTR_OFFSET + 4] = (byte) (l >>> 24); + IV[CTR_OFFSET + 5] = (byte) (l >>> 16); + IV[CTR_OFFSET + 6] = (byte) (l >>> 8); + IV[CTR_OFFSET + 7] = (byte) (l); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index dd20d61faa8..da695e97571 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -29,7 +29,7 @@ /** * Crypto codec class, encapsulates encryptor/decryptor pair. */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Evolving public abstract class CryptoCodec implements Configurable { @@ -48,21 +48,21 @@ public static CryptoCodec getInstance(Configuration conf) { public abstract int getAlgorithmBlockSize(); /** - * Get an {@link #org.apache.hadoop.crypto.Encryptor}. + * Create a {@link org.apache.hadoop.crypto.Encryptor}. * @return Encryptor the encryptor */ - public abstract Encryptor getEncryptor() throws GeneralSecurityException; + public abstract Encryptor createEncryptor() throws GeneralSecurityException; /** - * Get a {@link #org.apache.hadoop.crypto.Decryptor}. + * Create a {@link org.apache.hadoop.crypto.Decryptor}. * @return Decryptor the decryptor */ - public abstract Decryptor getDecryptor() throws GeneralSecurityException; + public abstract Decryptor createDecryptor() throws GeneralSecurityException; /** * This interface is only for Counter (CTR) mode. Generally the Encryptor * or Decryptor calculates the IV and maintain encryption context internally. - * For example a {@link #javax.crypto.Cipher} will maintain its encryption + * For example a {@link javax.crypto.Cipher} will maintain its encryption * context internally when we do encryption/decryption using the * Cipher#update interface. *

diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java index 374ca2e9483..e3eea41b1d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -25,10 +25,11 @@ import java.nio.ByteBuffer; import java.security.GeneralSecurityException; import java.util.EnumSet; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ByteBufferReadable; import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.CanSetReadahead; @@ -38,8 +39,6 @@ import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.io.ByteBufferPool; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT; import com.google.common.base.Preconditions; @@ -54,15 +53,15 @@ *

* The underlying stream offset is maintained as state. */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Evolving public class CryptoInputStream extends FilterInputStream implements Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess { - private static final int MIN_BUFFER_SIZE = 512; private static final byte[] oneByteBuf = new byte[1]; private final CryptoCodec codec; private final Decryptor decryptor; + private final int bufferSize; /** * Input data buffer. The data starts at inBuffer.position() and ends at @@ -79,7 +78,7 @@ public class CryptoInputStream extends FilterInputStream implements /** * Whether the underlying stream supports - * {@link #org.apache.hadoop.fs.ByteBufferReadable} + * {@link org.apache.hadoop.fs.ByteBufferReadable} */ private Boolean usingByteBufferRead = null; @@ -94,32 +93,33 @@ public class CryptoInputStream extends FilterInputStream implements private final byte[] initIV; private byte[] iv; + /** DirectBuffer pool */ + private final Queue bufferPool = + new ConcurrentLinkedQueue(); + /** Decryptor pool */ + private final Queue decryptorPool = + new ConcurrentLinkedQueue(); + public CryptoInputStream(InputStream in, CryptoCodec codec, int bufferSize, byte[] key, byte[] iv) throws IOException { super(in); - Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, - "Minimum value of buffer size is 512."); - this.key = key; - this.initIV = iv; - this.iv = iv.clone(); - inBuffer = ByteBuffer.allocateDirect(bufferSize); - outBuffer = ByteBuffer.allocateDirect(bufferSize); - outBuffer.limit(0); + this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize); this.codec = codec; - try { - decryptor = codec.getDecryptor(); - } catch (GeneralSecurityException e) { - throw new IOException(e); - } + this.key = key.clone(); + this.initIV = iv.clone(); + this.iv = iv.clone(); + inBuffer = ByteBuffer.allocateDirect(this.bufferSize); + outBuffer = ByteBuffer.allocateDirect(this.bufferSize); + decryptor = getDecryptor(); if (in instanceof Seekable) { streamOffset = ((Seekable) in).getPos(); } - updateDecryptor(); + resetStreamOffset(streamOffset); } public CryptoInputStream(InputStream in, CryptoCodec codec, byte[] key, byte[] iv) throws IOException { - this(in, codec, getBufferSize(codec.getConf()), key, iv); + this(in, codec, CryptoStreamUtils.getBufferSize(codec.getConf()), key, iv); } public InputStream getWrappedStream() { @@ -169,14 +169,14 @@ public int read(byte[] b, int off, int len) throws IOException { usingByteBufferRead = Boolean.FALSE; } } - if (!usingByteBufferRead.booleanValue()) { - n = readFromUnderlyingStream(); + if (!usingByteBufferRead) { + n = readFromUnderlyingStream(inBuffer); } } else { - if (usingByteBufferRead.booleanValue()) { + if (usingByteBufferRead) { n = ((ByteBufferReadable) in).read(inBuffer); } else { - n = readFromUnderlyingStream(); + n = readFromUnderlyingStream(inBuffer); } } if (n <= 0) { @@ -184,7 +184,8 @@ public int read(byte[] b, int off, int len) throws IOException { } streamOffset += n; // Read n bytes - decrypt(); + decrypt(decryptor, inBuffer, outBuffer, padding); + padding = afterDecryption(decryptor, inBuffer, streamOffset, iv); n = Math.min(len, outBuffer.remaining()); outBuffer.get(b, off, n); return n; @@ -192,7 +193,7 @@ public int read(byte[] b, int off, int len) throws IOException { } /** Read data from underlying stream. */ - private int readFromUnderlyingStream() throws IOException { + private int readFromUnderlyingStream(ByteBuffer inBuffer) throws IOException { final int toRead = inBuffer.remaining(); final byte[] tmp = getTmpBuf(); final int n = in.read(tmp, 0, toRead); @@ -205,16 +206,18 @@ private int readFromUnderlyingStream() throws IOException { private byte[] tmpBuf; private byte[] getTmpBuf() { if (tmpBuf == null) { - tmpBuf = new byte[inBuffer.capacity()]; + tmpBuf = new byte[bufferSize]; } return tmpBuf; } /** - * Do the decryption using {@link #inBuffer} as input and {@link #outBuffer} - * as output. + * Do the decryption using inBuffer as input and outBuffer as output. + * Upon return, inBuffer is cleared; the decrypted data starts at + * outBuffer.position() and ends at outBuffer.limit(); */ - private void decrypt() throws IOException { + private void decrypt(Decryptor decryptor, ByteBuffer inBuffer, + ByteBuffer outBuffer, byte padding) throws IOException { Preconditions.checkState(inBuffer.position() >= padding); if(inBuffer.position() == padding) { // There is no real data in inBuffer. @@ -231,8 +234,16 @@ private void decrypt() throws IOException { * same position. */ outBuffer.position(padding); - padding = 0; } + } + + /** + * This method is executed immediately after decryption. Check whether + * decryptor should be updated and recalculate padding if needed. + */ + private byte afterDecryption(Decryptor decryptor, ByteBuffer inBuffer, + long position, byte[] iv) throws IOException { + byte padding = 0; if (decryptor.isContextReset()) { /* * This code is generally not executed since the decryptor usually @@ -240,23 +251,31 @@ private void decrypt() throws IOException { * some implementations can't maintain context so a re-init is necessary * after each decryption call. */ - updateDecryptor(); + updateDecryptor(decryptor, position, iv); + padding = getPadding(position); + inBuffer.position(padding); } + return padding; } - /** - * Update the {@link #decryptor}. Calculate the counter and {@link #padding}. - */ - private void updateDecryptor() throws IOException { - final long counter = streamOffset / codec.getAlgorithmBlockSize(); - padding = (byte)(streamOffset % codec.getAlgorithmBlockSize()); - inBuffer.position(padding); // Set proper position for input data. + private long getCounter(long position) { + return position / codec.getAlgorithmBlockSize(); + } + + private byte getPadding(long position) { + return (byte)(position % codec.getAlgorithmBlockSize()); + } + + /** Calculate the counter and iv, update the decryptor. */ + private void updateDecryptor(Decryptor decryptor, long position, byte[] iv) + throws IOException { + final long counter = getCounter(position); codec.calculateIV(initIV, counter, iv); decryptor.init(key, iv); } /** - * Reset the underlying stream offset, and clear {@link #inBuffer} and + * Reset the underlying stream offset; clear {@link #inBuffer} and * {@link #outBuffer}. This Typically happens during {@link #seek(long)} * or {@link #skip(long)}. */ @@ -265,7 +284,9 @@ private void resetStreamOffset(long offset) throws IOException { inBuffer.clear(); outBuffer.clear(); outBuffer.limit(0); - updateDecryptor(); + updateDecryptor(decryptor, offset, iv); + padding = getPadding(offset); + inBuffer.position(padding); // Set proper position for input data. } @Override @@ -279,17 +300,7 @@ public void close() throws IOException { closed = true; } - /** Forcibly free the direct buffer. */ - private void freeBuffers() { - final sun.misc.Cleaner inBufferCleaner = - ((sun.nio.ch.DirectBuffer) inBuffer).cleaner(); - inBufferCleaner.clean(); - final sun.misc.Cleaner outBufferCleaner = - ((sun.nio.ch.DirectBuffer) outBuffer).cleaner(); - outBufferCleaner.clean(); - } - - /** Positioned read. */ + /** Positioned read. It is thread-safe */ @Override public int read(long position, byte[] buffer, int offset, int length) throws IOException { @@ -298,12 +309,8 @@ public int read(long position, byte[] buffer, int offset, int length) final int n = ((PositionedReadable) in).read(position, buffer, offset, length); if (n > 0) { - /* - * Since this operation does not change the current offset of a file, - * streamOffset should not be changed. We need to restore the decryptor - * and outBuffer after decryption. - */ - decrypt(position, buffer, offset, length); + // This operation does not change the current offset of the file + decrypt(position, buffer, offset, n); } return n; @@ -315,39 +322,39 @@ public int read(long position, byte[] buffer, int offset, int length) /** * Decrypt length bytes in buffer starting at offset. Output is also put - * into buffer starting at offset. Restore the {@link #decryptor} and - * {@link #outBuffer} after the decryption. + * into buffer starting at offset. It is thread-safe. */ private void decrypt(long position, byte[] buffer, int offset, int length) throws IOException { - final byte[] tmp = getTmpBuf(); - int unread = outBuffer.remaining(); - if (unread > 0) { // Cache outBuffer - outBuffer.get(tmp, 0, unread); - } - final long curOffset = streamOffset; - resetStreamOffset(position); - - int n = 0; - while (n < length) { - final int toDecrypt = Math.min(length - n, inBuffer.remaining()); - inBuffer.put(buffer, offset + n, toDecrypt); - // Do decryption - decrypt(); - outBuffer.get(buffer, offset + n, toDecrypt); - n += toDecrypt; - } - - // After decryption - resetStreamOffset(curOffset); - if (unread > 0) { // Restore outBuffer - outBuffer.clear(); - outBuffer.put(tmp, 0, unread); - outBuffer.flip(); + ByteBuffer inBuffer = getBuffer(); + ByteBuffer outBuffer = getBuffer(); + Decryptor decryptor = null; + try { + decryptor = getDecryptor(); + byte[] iv = initIV.clone(); + updateDecryptor(decryptor, position, iv); + byte padding = getPadding(position); + inBuffer.position(padding); // Set proper position for input data. + + int n = 0; + while (n < length) { + int toDecrypt = Math.min(length - n, inBuffer.remaining()); + inBuffer.put(buffer, offset + n, toDecrypt); + // Do decryption + decrypt(decryptor, inBuffer, outBuffer, padding); + + outBuffer.get(buffer, offset + n, toDecrypt); + n += toDecrypt; + padding = afterDecryption(decryptor, inBuffer, position + n, iv); + } + } finally { + returnBuffer(inBuffer); + returnBuffer(outBuffer); + returnDecryptor(decryptor); } } - /** Positioned read fully. */ + /** Positioned read fully. It is thread-safe */ @Override public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { @@ -355,11 +362,7 @@ public void readFully(long position, byte[] buffer, int offset, int length) try { ((PositionedReadable) in).readFully(position, buffer, offset, length); if (length > 0) { - /* - * Since this operation does not change the current offset of the file, - * streamOffset should not be changed. We need to restore the decryptor - * and outBuffer after decryption. - */ + // This operation does not change the current offset of the file decrypt(position, buffer, offset, length); } } catch (ClassCastException e) { @@ -484,12 +487,15 @@ private void decrypt(ByteBuffer buf, int n, int start) buf.limit(start + len + Math.min(n - len, inBuffer.remaining())); inBuffer.put(buf); // Do decryption - decrypt(); - - buf.position(start + len); - buf.limit(limit); - len += outBuffer.remaining(); - buf.put(outBuffer); + try { + decrypt(decryptor, inBuffer, outBuffer, padding); + buf.position(start + len); + buf.limit(limit); + len += outBuffer.remaining(); + buf.put(outBuffer); + } finally { + padding = afterDecryption(decryptor, inBuffer, streamOffset - (n - len), iv); + } } buf.position(pos); } @@ -612,8 +618,57 @@ private void checkStream() throws IOException { } } - private static int getBufferSize(Configuration conf) { - return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY, - HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT); + /** Get direct buffer from pool */ + private ByteBuffer getBuffer() { + ByteBuffer buffer = bufferPool.poll(); + if (buffer == null) { + buffer = ByteBuffer.allocateDirect(bufferSize); + } + + return buffer; + } + + /** Return direct buffer to pool */ + private void returnBuffer(ByteBuffer buf) { + if (buf != null) { + buf.clear(); + bufferPool.add(buf); + } + } + + /** Forcibly free the direct buffers. */ + private void freeBuffers() { + CryptoStreamUtils.freeDB(inBuffer); + CryptoStreamUtils.freeDB(outBuffer); + cleanBufferPool(); + } + + /** Clean direct buffer pool */ + private void cleanBufferPool() { + ByteBuffer buf; + while ((buf = bufferPool.poll()) != null) { + CryptoStreamUtils.freeDB(buf); + } + } + + /** Get decryptor from pool */ + private Decryptor getDecryptor() throws IOException { + Decryptor decryptor = decryptorPool.poll(); + if (decryptor == null) { + try { + decryptor = codec.createDecryptor(); + } catch (GeneralSecurityException e) { + throw new IOException(e); + } + } + + return decryptor; + } + + /** Return decryptor to pool */ + private void returnDecryptor(Decryptor decryptor) { + if (decryptor != null) { + decryptorPool.add(decryptor); + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java index c5bd1dd6b5d..61eca0a4616 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java @@ -25,11 +25,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.Syncable; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT; import com.google.common.base.Preconditions; @@ -44,14 +41,14 @@ *

* The underlying stream offset is maintained as state. */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Evolving public class CryptoOutputStream extends FilterOutputStream implements Syncable, CanSetDropBehind { - private static final int MIN_BUFFER_SIZE = 512; private static final byte[] oneByteBuf = new byte[1]; private final CryptoCodec codec; private final Encryptor encryptor; + private final int bufferSize; /** * Input data buffer. The data starts at inBuffer.position() and ends at @@ -86,17 +83,16 @@ public CryptoOutputStream(OutputStream out, CryptoCodec codec, int bufferSize, byte[] key, byte[] iv, long streamOffset) throws IOException { super(out); - Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, - "Minimum value of buffer size is 512."); - this.key = key; - this.initIV = iv; - this.iv = iv.clone(); - inBuffer = ByteBuffer.allocateDirect(bufferSize); - outBuffer = ByteBuffer.allocateDirect(bufferSize); - this.streamOffset = streamOffset; + this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize); this.codec = codec; + this.key = key.clone(); + this.initIV = iv.clone(); + this.iv = iv.clone(); + inBuffer = ByteBuffer.allocateDirect(this.bufferSize); + outBuffer = ByteBuffer.allocateDirect(this.bufferSize); + this.streamOffset = streamOffset; try { - encryptor = codec.getEncryptor(); + encryptor = codec.createEncryptor(); } catch (GeneralSecurityException e) { throw new IOException(e); } @@ -110,7 +106,8 @@ public CryptoOutputStream(OutputStream out, CryptoCodec codec, public CryptoOutputStream(OutputStream out, CryptoCodec codec, byte[] key, byte[] iv, long streamOffset) throws IOException { - this(out, codec, getBufferSize(codec.getConf()), key, iv, streamOffset); + this(out, codec, CryptoStreamUtils.getBufferSize(codec.getConf()), + key, iv, streamOffset); } public OutputStream getWrappedStream() { @@ -195,9 +192,7 @@ private void encrypt() throws IOException { } } - /** - * Update the {@link #encryptor}: calculate counter and {@link #padding}. - */ + /** Update the {@link #encryptor}: calculate counter and {@link #padding}. */ private void updateEncryptor() throws IOException { final long counter = streamOffset / codec.getAlgorithmBlockSize(); padding = (byte)(streamOffset % codec.getAlgorithmBlockSize()); @@ -209,7 +204,7 @@ private void updateEncryptor() throws IOException { private byte[] tmpBuf; private byte[] getTmpBuf() { if (tmpBuf == null) { - tmpBuf = new byte[outBuffer.capacity()]; + tmpBuf = new byte[bufferSize]; } return tmpBuf; } @@ -225,16 +220,6 @@ public void close() throws IOException { closed = true; } - /** Forcibly free the direct buffer. */ - private void freeBuffers() { - final sun.misc.Cleaner inBufferCleaner = - ((sun.nio.ch.DirectBuffer) inBuffer).cleaner(); - inBufferCleaner.clean(); - final sun.misc.Cleaner outBufferCleaner = - ((sun.nio.ch.DirectBuffer) outBuffer).cleaner(); - outBufferCleaner.clean(); - } - /** * To flush, we need to encrypt the data in the buffer and write to the * underlying stream, then do the flush. @@ -285,8 +270,9 @@ public void hsync() throws IOException { } } - private static int getBufferSize(Configuration conf) { - return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY, - HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT); + /** Forcibly free the direct buffers. */ + private void freeBuffers() { + CryptoStreamUtils.freeDB(inBuffer); + CryptoStreamUtils.freeDB(outBuffer); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java new file mode 100644 index 00000000000..c9aad816b54 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; + +import com.google.common.base.Preconditions; + +@InterfaceAudience.Private +public class CryptoStreamUtils { + private static final int MIN_BUFFER_SIZE = 512; + + /** Forcibly free the direct buffer. */ + public static void freeDB(ByteBuffer buffer) { + if (buffer instanceof sun.nio.ch.DirectBuffer) { + final sun.misc.Cleaner bufferCleaner = + ((sun.nio.ch.DirectBuffer) buffer).cleaner(); + bufferCleaner.clean(); + } + } + + /** Read crypto buffer size */ + public static int getBufferSize(Configuration conf) { + return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY, + HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT); + } + + /** Check and floor buffer size */ + public static int checkBufferSize(CryptoCodec codec, int bufferSize) { + Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, + "Minimum value of buffer size is " + MIN_BUFFER_SIZE + "."); + return bufferSize - bufferSize % codec.getAlgorithmBlockSize(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java index ded016a4acc..9958415ebd2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Evolving public interface Decryptor { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java index 00213b8cbbf..6dc3cfbe38f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Evolving public interface Encryptor { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java index aea9e07ee6a..bd0e502eeed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java @@ -17,14 +17,25 @@ */ package org.apache.hadoop.crypto; +import java.io.IOException; +import java.nio.ByteBuffer; import java.security.GeneralSecurityException; +import javax.crypto.Cipher; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.SecretKeySpec; + +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; + +import com.google.common.base.Preconditions; + import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY; /** * Implement the AES-CTR crypto codec using JCE provider. */ +@InterfaceAudience.Private public class JCEAESCTRCryptoCodec extends AESCTRCryptoCodec { private Configuration conf; private String provider; @@ -44,12 +55,86 @@ public void setConf(Configuration conf) { } @Override - public Encryptor getEncryptor() throws GeneralSecurityException { - return new JCEAESCTREncryptor(provider); + public Encryptor createEncryptor() throws GeneralSecurityException { + return new JCEAESCTRCipher(Cipher.ENCRYPT_MODE, provider); } @Override - public Decryptor getDecryptor() throws GeneralSecurityException { - return new JCEAESCTRDecryptor(provider); + public Decryptor createDecryptor() throws GeneralSecurityException { + return new JCEAESCTRCipher(Cipher.DECRYPT_MODE, provider); + } + + private static class JCEAESCTRCipher implements Encryptor, Decryptor { + private final Cipher cipher; + private final int mode; + private boolean contextReset = false; + + public JCEAESCTRCipher(int mode, String provider) + throws GeneralSecurityException { + this.mode = mode; + if (provider == null || provider.isEmpty()) { + cipher = Cipher.getInstance("AES/CTR/NoPadding"); + } else { + cipher = Cipher.getInstance("AES/CTR/NoPadding", provider); + } + } + + @Override + public void init(byte[] key, byte[] iv) throws IOException { + Preconditions.checkNotNull(key); + Preconditions.checkNotNull(iv); + contextReset = false; + try { + cipher.init(mode, new SecretKeySpec(key, "AES"), + new IvParameterSpec(iv)); + } catch (Exception e) { + throw new IOException(e); + } + } + + /** + * AES-CTR will consume all of the input data. It requires enough space in + * the destination buffer to encrypt entire input buffer. + */ + @Override + public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + process(inBuffer, outBuffer); + } + + /** + * AES-CTR will consume all of the input data. It requires enough space in + * the destination buffer to decrypt entire input buffer. + */ + @Override + public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + process(inBuffer, outBuffer); + } + + private void process(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + try { + int inputSize = inBuffer.remaining(); + // Cipher#update will maintain crypto context. + int n = cipher.update(inBuffer, outBuffer); + if (n < inputSize) { + /** + * Typically code will not get here. Cipher#update will consume all + * input data and put result in outBuffer. + * Cipher#doFinal will reset the crypto context. + */ + contextReset = true; + cipher.doFinal(inBuffer, outBuffer); + } + } catch (Exception e) { + throw new IOException(e); + } + } + + @Override + public boolean isContextReset() { + return contextReset; + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java index 569d3de9704..e69de29bb2d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.crypto; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.security.GeneralSecurityException; - -import javax.crypto.Cipher; -import javax.crypto.spec.IvParameterSpec; -import javax.crypto.spec.SecretKeySpec; - -import com.google.common.base.Preconditions; - -public class JCEAESCTRDecryptor implements Decryptor { - private final Cipher cipher; - private boolean contextReset = false; - - public JCEAESCTRDecryptor(String provider) throws GeneralSecurityException { - if (provider == null || provider.isEmpty()) { - cipher = Cipher.getInstance("AES/CTR/NoPadding"); - } else { - cipher = Cipher.getInstance("AES/CTR/NoPadding", provider); - } - } - - @Override - public void init(byte[] key, byte[] iv) throws IOException { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(iv); - contextReset = false; - try { - cipher.init(Cipher.DECRYPT_MODE, new SecretKeySpec(key, "AES"), - new IvParameterSpec(iv)); - } catch (Exception e) { - throw new IOException(e); - } - } - - /** - * AES-CTR will consume all of the input data. It requires enough space in - * the destination buffer to decrypt entire input buffer. - */ - @Override - public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) - throws IOException { - try { - int inputSize = inBuffer.remaining(); - // Cipher#update will maintain decryption context. - int n = cipher.update(inBuffer, outBuffer); - if (n < inputSize) { - /** - * Typically code will not get here. Cipher#update will decrypt all - * input data and put result in outBuffer. - * Cipher#doFinal will reset the decryption context. - */ - contextReset = true; - cipher.doFinal(inBuffer, outBuffer); - } - } catch (Exception e) { - throw new IOException(e); - } - } - - @Override - public boolean isContextReset() { - return contextReset; - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java index 28e45735110..e69de29bb2d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.crypto; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.security.GeneralSecurityException; - -import javax.crypto.Cipher; -import javax.crypto.spec.IvParameterSpec; -import javax.crypto.spec.SecretKeySpec; - -import com.google.common.base.Preconditions; - -public class JCEAESCTREncryptor implements Encryptor { - private final Cipher cipher; - private boolean contextReset = false; - - public JCEAESCTREncryptor(String provider) throws GeneralSecurityException { - if (provider == null || provider.isEmpty()) { - cipher = Cipher.getInstance("AES/CTR/NoPadding"); - } else { - cipher = Cipher.getInstance("AES/CTR/NoPadding", provider); - } - } - - @Override - public void init(byte[] key, byte[] iv) throws IOException { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(iv); - contextReset = false; - try { - cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(key, "AES"), - new IvParameterSpec(iv)); - } catch (Exception e) { - throw new IOException(e); - } - } - - /** - * AES-CTR will consume all of the input data. It requires enough space in - * the destination buffer to encrypt entire input buffer. - */ - @Override - public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) - throws IOException { - try { - int inputSize = inBuffer.remaining(); - // Cipher#update will maintain encryption context. - int n = cipher.update(inBuffer, outBuffer); - if (n < inputSize) { - /** - * Typically code will not get here. Cipher#update will encrypt all - * input data and put result in outBuffer. - * Cipher#doFinal will reset the encryption context. - */ - contextReset = true; - cipher.doFinal(inBuffer, outBuffer); - } - } catch (Exception e) { - throw new IOException(e); - } - } - - @Override - public boolean isContextReset() { - return contextReset; - } -} From f7921030cd9e3ef1c23f25c1b8defedd8ddcadb4 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Fri, 30 May 2014 08:22:41 +0000 Subject: [PATCH 006/354] HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. Contributed by Yi Liu git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1598493 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 2 + .../org/apache/hadoop/crypto/CryptoCodec.java | 7 ++ .../hadoop/crypto/JCEAESCTRCryptoCodec.java | 21 +++++ .../fs/CommonConfigurationKeysPublic.java | 6 ++ .../src/main/resources/core-default.xml | 8 ++ .../apache/hadoop/crypto/TestCryptoCodec.java | 81 +++++++++++++++++++ 6 files changed, 125 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 1e7c6f62f17..c8dc9d46709 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -17,6 +17,8 @@ fs-encryption (Unreleased) HADOOP-10632. Minor improvements to Crypto input and output streams. (Yi Liu) + HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index da695e97571..277246c087b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -79,4 +79,11 @@ public static CryptoCodec getInstance(Configuration conf) { * @param IV the IV for input stream position */ public abstract void calculateIV(byte[] initIV, long counter, byte[] IV); + + /** + * Generate secure random. + * @param bytes length of the secure random + * @return byte[] the secure random + */ + public abstract byte[] generateSecureRandom(int bytes); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java index bd0e502eeed..22a036a5602 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.security.GeneralSecurityException; +import java.security.SecureRandom; import javax.crypto.Cipher; import javax.crypto.spec.IvParameterSpec; @@ -31,6 +32,8 @@ import com.google.common.base.Preconditions; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT; /** * Implement the AES-CTR crypto codec using JCE provider. @@ -39,6 +42,7 @@ public class JCEAESCTRCryptoCodec extends AESCTRCryptoCodec { private Configuration conf; private String provider; + private SecureRandom random; public JCEAESCTRCryptoCodec() { } @@ -52,6 +56,16 @@ public Configuration getConf() { public void setConf(Configuration conf) { this.conf = conf; provider = conf.get(HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY); + final String secureRandomAlg = conf.get( + HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY, + HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT); + try { + random = (provider != null) ? + SecureRandom.getInstance(secureRandomAlg, provider) : + SecureRandom.getInstance(secureRandomAlg); + } catch (GeneralSecurityException e) { + throw new IllegalArgumentException(e); + } } @Override @@ -64,6 +78,13 @@ public Decryptor createDecryptor() throws GeneralSecurityException { return new JCEAESCTRCipher(Cipher.DECRYPT_MODE, provider); } + @Override + public byte[] generateSecureRandom(int bytes) { + final byte[] data = new byte[bytes]; + random.nextBytes(data); + return data; + } + private static class JCEAESCTRCipher implements Encryptor, Decryptor { private final Cipher cipher; private final int mode; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 6c595c4f057..279bbc7e2b3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -296,5 +296,11 @@ public class CommonConfigurationKeysPublic { /** Class to override Impersonation provider */ public static final String HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS = "hadoop.security.impersonation.provider.class"; + /** See core-default.xml */ + public static final String HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY = + "hadoop.security.secure.random.algorithm"; + /** Defalt value for HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY */ + public static final String HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT = + "SHA1PRNG"; } diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 21c290c43cd..a174d396d5e 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1384,4 +1384,12 @@ The buffer size used by CryptoInputStream and CryptoOutputStream. + + + hadoop.security.secure.random.algorithm + + + The secure random algorithm. + + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java new file mode 100644 index 00000000000..8213ad8a77b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import org.apache.hadoop.conf.Configuration; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestCryptoCodec { + private static CryptoCodec codec; + + @BeforeClass + public static void init() throws Exception { + Configuration conf = new Configuration(); + codec = CryptoCodec.getInstance(conf); + } + + @AfterClass + public static void shutdown() throws Exception { + } + + @Test(timeout=120000) + public void testSecureRandom() throws Exception { + // len = 16 + checkSecureRandom(16); + + // len = 32 + checkSecureRandom(32); + + // len = 128 + checkSecureRandom(128); + } + + private void checkSecureRandom(int len) { + byte[] rand = codec.generateSecureRandom(len); + byte[] rand1 = codec.generateSecureRandom(len); + + Assert.assertEquals(len, rand.length); + Assert.assertEquals(len, rand1.length); + Assert.assertFalse(bytesArrayEquals(rand, rand1)); + } + + private boolean bytesArrayEquals(byte[] expected, byte[] actual) { + if ((expected == null && actual != null) || + (expected != null && actual == null)) { + return false; + } + if (expected == null && actual == null) { + return true; + } + + if (expected.length != actual.length) { + return false; + } + + for (int i = 0; i < expected.length; i++) { + if (expected[i] != actual[i]) { + return false; + } + } + + return true; + } +} From 4054a408915d26b54a3958320cc0d7dfbe65b2e0 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Fri, 30 May 2014 23:53:45 +0000 Subject: [PATCH 007/354] HDFS-6388. HDFS integration with KeyProvider. (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1598783 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 2 ++ .../hadoop/hdfs/server/namenode/NameNode.java | 36 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index c8dc9d46709..82f68d75160 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -6,6 +6,8 @@ fs-encryption (Unreleased) NEW FEATURES + HDFS-6388. HDFS integration with KeyProvider. (clamb) + IMPROVEMENTS HADOOP-10603. Crypto input and output streams implementing Hadoop stream diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 9cdad26eb24..504e3ed2013 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -27,6 +27,8 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -270,6 +272,9 @@ public long getProtocolVersion(String protocol, private NameNodeRpcServer rpcServer; + /* The KeyProvider, if any. */ + private KeyProvider provider = null; + private JvmPauseMonitor pauseMonitor; private ObjectName nameNodeStatusBeanName; /** @@ -581,6 +586,7 @@ protected void initialize(Configuration conf) throws IOException { startHttpServer(conf); } loadNamesystem(conf); + initializeKeyProvider(conf); rpcServer = createRpcServer(conf); if (clientNamenodeAddress == null) { @@ -699,6 +705,36 @@ private void stopHttpServer() { } } + private void initializeKeyProvider(final Configuration conf) { + try { + final List providers = KeyProviderFactory.getProviders(conf); + if (providers == null) { + return; + } + + if (providers.size() == 0) { + LOG.info("No KeyProviders found."); + return; + } + + if (providers.size() > 1) { + final String err = + "Multiple KeyProviders found. Only one is permitted."; + LOG.error(err); + throw new RuntimeException(err); + } + provider = providers.get(0); + if (provider.isTransient()) { + final String err = + "A KeyProvider was found but it is a transient provider."; + LOG.error(err); + throw new RuntimeException(err); + } + } catch (IOException e) { + LOG.error("Exception while initializing KeyProvider", e); + } + } + /** * Start NameNode. *

From 75ec5792dfc5472b2aa9da7030d4e2ae7421d87c Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Mon, 2 Jun 2014 14:35:32 +0000 Subject: [PATCH 008/354] HADOOP-10653. Add a new constructor for CryptoInputStream that receives current position of wrapped stream. Contributed by Yi Liu git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1599228 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 +++ .../apache/hadoop/crypto/CryptoInputStream.java | 10 +++++++--- .../apache/hadoop/crypto/CryptoStreamUtils.java | 14 ++++++++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 82f68d75160..c99436606e9 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -21,6 +21,9 @@ fs-encryption (Unreleased) HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu) + HADOOP-10653. Add a new constructor for CryptoInputStream that + receives current position of wrapped stream. (Yi Liu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java index e3eea41b1d4..fe663f2cc9f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -102,18 +102,22 @@ public class CryptoInputStream extends FilterInputStream implements public CryptoInputStream(InputStream in, CryptoCodec codec, int bufferSize, byte[] key, byte[] iv) throws IOException { + this(in, codec, bufferSize, key, iv, + CryptoStreamUtils.getInputStreamOffset(in)); + } + + public CryptoInputStream(InputStream in, CryptoCodec codec, + int bufferSize, byte[] key, byte[] iv, long streamOffset) throws IOException { super(in); this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize); this.codec = codec; this.key = key.clone(); this.initIV = iv.clone(); this.iv = iv.clone(); + this.streamOffset = streamOffset; inBuffer = ByteBuffer.allocateDirect(this.bufferSize); outBuffer = ByteBuffer.allocateDirect(this.bufferSize); decryptor = getDecryptor(); - if (in instanceof Seekable) { - streamOffset = ((Seekable) in).getPos(); - } resetStreamOffset(streamOffset); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java index c9aad816b54..dfa27df172d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java @@ -20,10 +20,13 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY; +import java.io.IOException; +import java.io.InputStream; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Seekable; import com.google.common.base.Preconditions; @@ -52,4 +55,15 @@ public static int checkBufferSize(CryptoCodec codec, int bufferSize) { "Minimum value of buffer size is " + MIN_BUFFER_SIZE + "."); return bufferSize - bufferSize % codec.getAlgorithmBlockSize(); } + + /** + * If input stream is {@link org.apache.hadoop.fs.Seekable}, return it's + * current position, otherwise return 0; + */ + public static long getInputStreamOffset(InputStream in) throws IOException { + if (in instanceof Seekable) { + return ((Seekable) in).getPos(); + } + return 0; + } } From d86db3f76f03a63c56e6dd9f5531d3b8a78980f6 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 5 Jun 2014 01:30:40 +0000 Subject: [PATCH 009/354] HADOOP-10662. NullPointerException in CryptoInputStream while wrapped stream is not ByteBufferReadable. Add tests using normal stream. Contributed by Yi Liu git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1600553 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 + .../hadoop/crypto/CryptoInputStream.java | 2 + .../crypto/TestCryptoStreamsNormal.java | 123 ++++++++++++++++++ 3 files changed, 128 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index c99436606e9..66b55b0f508 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -24,6 +24,9 @@ fs-encryption (Unreleased) HADOOP-10653. Add a new constructor for CryptoInputStream that receives current position of wrapped stream. (Yi Liu) + HADOOP-10662. NullPointerException in CryptoInputStream while wrapped + stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java index fe663f2cc9f..55c891a0ace 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -172,6 +172,8 @@ public int read(byte[] b, int off, int len) throws IOException { } catch (UnsupportedOperationException e) { usingByteBufferRead = Boolean.FALSE; } + } else { + usingByteBufferRead = Boolean.FALSE; } if (!usingByteBufferRead) { n = readFromUnderlyingStream(inBuffer); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java new file mode 100644 index 00000000000..e9c313fde36 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.hadoop.conf.Configuration; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +/** + * Test crypto streams using normal stream which does not support the + * additional interfaces that the Hadoop FileSystem streams implement + * (Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, + * CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, Syncable, + * CanSetDropBehind) + */ +public class TestCryptoStreamsNormal extends CryptoStreamsTestBase { + /** + * Data storage. + * {@link #getOutputStream(int, byte[], byte[])} will write to this buffer. + * {@link #getInputStream(int, byte[], byte[])} will read from this buffer. + */ + private byte[] buffer; + private int bufferLen; + + @BeforeClass + public static void init() throws Exception { + Configuration conf = new Configuration(); + codec = CryptoCodec.getInstance(conf); + } + + @AfterClass + public static void shutdown() throws Exception { + } + + @Override + protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv) + throws IOException { + OutputStream out = new ByteArrayOutputStream() { + @Override + public void flush() throws IOException { + buffer = buf; + bufferLen = count; + } + @Override + public void close() throws IOException { + buffer = buf; + bufferLen = count; + } + }; + return new CryptoOutputStream(out, codec, bufferSize, key, iv); + } + + @Override + protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) + throws IOException { + ByteArrayInputStream in = new ByteArrayInputStream(buffer, 0, bufferLen); + return new CryptoInputStream(in, codec, bufferSize, + key, iv); + } + + @Ignore("Wrapped stream doesn't support Syncable") + @Override + @Test(timeout=1000) + public void testSyncable() throws IOException {} + + @Ignore("Wrapped stream doesn't support PositionedRead") + @Override + @Test(timeout=1000) + public void testPositionedRead() throws IOException {} + + @Ignore("Wrapped stream doesn't support ReadFully") + @Override + @Test(timeout=1000) + public void testReadFully() throws IOException {} + + @Ignore("Wrapped stream doesn't support Seek") + @Override + @Test(timeout=1000) + public void testSeek() throws IOException {} + + @Ignore("Wrapped stream doesn't support ByteBufferRead") + @Override + @Test(timeout=1000) + public void testByteBufferRead() throws IOException {} + + @Ignore("Wrapped stream doesn't support ByteBufferRead, Seek") + @Override + @Test(timeout=1000) + public void testCombinedOp() throws IOException {} + + @Ignore("Wrapped stream doesn't support SeekToNewSource") + @Override + @Test(timeout=1000) + public void testSeekToNewSource() throws IOException {} + + @Ignore("Wrapped stream doesn't support HasEnhancedByteBufferAccess") + @Override + @Test(timeout=1000) + public void testHasEnhancedByteBufferAccess() throws IOException {} +} From bdee397e95e98ece071345822e2e4d3f690f09c3 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Thu, 5 Jun 2014 10:10:48 +0000 Subject: [PATCH 010/354] HADOOP-6392. Wire crypto streams for encrypted files in DFSClient. (clamb and yliu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1600582 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 + .../apache/hadoop/fs/FSDataOutputStream.java | 2 +- .../main/java/org/apache/hadoop/fs/Hdfs.java | 55 ++- .../apache/hadoop/hdfs/DFSInputStream.java | 22 ++ .../apache/hadoop/hdfs/DFSOutputStream.java | 22 ++ .../hdfs/client/HdfsDataInputStream.java | 38 +- .../hdfs/client/HdfsDataOutputStream.java | 36 +- .../hadoop/hdfs/protocol/HdfsConstants.java | 4 + .../hadoop/hdfs/protocol/HdfsFileStatus.java | 28 +- .../hdfs/protocol/HdfsLocatedFileStatus.java | 6 +- .../hadoop/hdfs/protocol/LocatedBlocks.java | 25 +- .../SnapshottableDirectoryStatus.java | 2 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 20 +- .../server/blockmanagement/BlockManager.java | 8 +- .../hdfs/server/namenode/FSDirectory.java | 8 +- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 5 +- .../hadoop-hdfs/src/main/proto/hdfs.proto | 6 + .../apache/hadoop/fs/TestHDFSEncryption.java | 352 ++++++++++++++++++ .../hadoop/hdfs/TestDFSClientRetries.java | 7 +- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 2 +- .../org/apache/hadoop/hdfs/TestLease.java | 4 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 2 +- .../apache/hadoop/hdfs/web/TestJsonUtil.java | 2 +- 23 files changed, 612 insertions(+), 47 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSEncryption.java diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 66b55b0f508..e19a4abf7a7 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -10,6 +10,9 @@ fs-encryption (Unreleased) IMPROVEMENTS + HADOOP-6392. Wire crypto streams for encrypted files in + DFSClient. (clamb and yliu) + HADOOP-10603. Crypto input and output streams implementing Hadoop stream interfaces. (Yi Liu and Charles Lamb) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java index 689e9717d22..d8257729af8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java @@ -99,7 +99,7 @@ public void close() throws IOException { } /** - * Get a reference to the wrapped output stream. Used by unit tests. + * Get a reference to the wrapped output stream. * * @return the underlying output stream */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index e308a966f55..6b2a01bcf39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.fs; - import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; @@ -31,12 +30,17 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoCodec; +import org.apache.hadoop.crypto.CryptoOutputStream; +import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.hdfs.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; @@ -53,11 +57,14 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.util.Progressable; +import com.google.common.base.Preconditions; + @InterfaceAudience.Private @InterfaceStability.Evolving public class Hdfs extends AbstractFileSystem { DFSClient dfs; + final CryptoCodec factory; private boolean verifyChecksum = true; static { @@ -84,6 +91,7 @@ public class Hdfs extends AbstractFileSystem { } this.dfs = new DFSClient(theUri, conf, getStatistics()); + this.factory = CryptoCodec.getInstance(conf); } @Override @@ -96,9 +104,27 @@ public HdfsDataOutputStream createInternal(Path f, EnumSet createFlag, FsPermission absolutePermission, int bufferSize, short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt, boolean createParent) throws IOException { - return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f), - absolutePermission, createFlag, createParent, replication, blockSize, - progress, bufferSize, checksumOpt), getStatistics()); + + final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f), + absolutePermission, createFlag, createParent, replication, blockSize, + progress, bufferSize, checksumOpt); + final byte[] key = dfsos.getKey(); + final byte[] iv = dfsos.getIv(); + Preconditions.checkState(!(key == null ^ iv == null), + "Only one of the Key and IV were found."); + if (false && key != null) { + + /* + * The Key and IV were found. Wrap up the output stream with an encryption + * wrapper. + */ + final CryptoOutputStream cbos = + new CryptoOutputStream(dfsos, factory, key, iv); + return new HdfsDataOutputStream(cbos, getStatistics()); + } else { + /* No key/IV present so no encryption. */ + return new HdfsDataOutputStream(dfsos, getStatistics()); + } } @Override @@ -307,8 +333,25 @@ public void mkdir(Path dir, FsPermission permission, boolean createParent) @Override public HdfsDataInputStream open(Path f, int bufferSize) throws IOException, UnresolvedLinkException { - return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f), - bufferSize, verifyChecksum)); + final DFSInputStream dfsis = dfs.open(getUriPath(f), + bufferSize, verifyChecksum); + final byte[] key = dfsis.getKey(); + final byte[] iv = dfsis.getIv(); + Preconditions.checkState(!(key == null ^ iv == null), + "Only one of the Key and IV were found."); + if (false && key != null) { + + /* + * The Key and IV were found. Wrap up the input stream with an encryption + * wrapper. + */ + final CryptoInputStream cbis = + new CryptoInputStream(dfsis, factory, key, iv); + return new HdfsDataInputStream(cbis); + } else { + /* No key/IV pair so no encryption. */ + return new HdfsDataInputStream(dfsis); + } } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 1750aa7a30b..f9763eced1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -88,6 +88,8 @@ public class DFSInputStream extends FSInputStream private final boolean verifyChecksum; private LocatedBlocks locatedBlocks = null; private long lastBlockBeingWrittenLength = 0; + private byte[] key = null; + private byte[] iv = null; private DatanodeInfo currentNode = null; private LocatedBlock currentLocatedBlock = null; private long pos = 0; @@ -297,6 +299,8 @@ private long fetchLocatedBlocksAndGetLastBlockLength() throws IOException { } } + key = locatedBlocks.getKey(); + iv = locatedBlocks.getIv(); currentNode = null; return lastBlockBeingWrittenLength; } @@ -1517,6 +1521,24 @@ public synchronized ReadStatistics getReadStatistics() { return new ReadStatistics(readStatistics); } + /** + * Get the encryption key for this stream. + * + * @return byte[] the key + */ + public synchronized byte[] getKey() { + return key; + } + + /** + * Get the encryption initialization vector (IV) for this stream. + * + * @return byte[] the initialization vector (IV). + */ + public synchronized byte[] getIv() { + return iv; + } + private synchronized void closeCurrentBlockReader() { if (blockReader == null) return; // Close the current block reader so that the new caching settings can diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 8ae750d8e89..dde4bad7ef2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -154,6 +154,8 @@ public class DFSOutputStream extends FSOutputSummer private boolean shouldSyncBlock = false; // force blocks to disk upon close private final AtomicReference cachingStrategy; private boolean failPacket = false; + private byte[] key = null; + private byte[] iv = null; private static class Packet { private static final long HEART_BEAT_SEQNO = -1L; @@ -1562,6 +1564,8 @@ private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress, this.fileId = stat.getFileId(); this.blockSize = stat.getBlockSize(); this.blockReplication = stat.getReplication(); + this.key = stat.getKey(); + this.iv = stat.getIv(); this.progress = progress; this.cachingStrategy = new AtomicReference( dfsClient.getDefaultWriteCachingStrategy()); @@ -2178,6 +2182,24 @@ long getInitialLen() { return initialFileSize; } + /** + * Get the encryption key for this stream. + * + * @return byte[] the key. + */ + public byte[] getKey() { + return key; + } + + /** + * Get the encryption initialization vector (IV) for this stream. + * + * @return byte[] the initialization vector (IV). + */ + public byte[] getIv() { + return iv; + } + /** * Returns the access token currently used by streamer, for testing only */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java index 9ed895ed7e4..e1269c49a3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java @@ -17,17 +17,21 @@ */ package org.apache.hadoop.hdfs.client; +import java.io.InputStream; import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import com.google.common.base.Preconditions; + /** * The Hdfs implementation of {@link FSDataInputStream}. */ @@ -38,25 +42,49 @@ public HdfsDataInputStream(DFSInputStream in) throws IOException { super(in); } + public HdfsDataInputStream(CryptoInputStream in) throws IOException { + super(in); + Preconditions.checkArgument(in.getWrappedStream() instanceof DFSInputStream, + "CryptoInputStream should wrap a DFSInputStream"); + } + + private DFSInputStream getDFSInputStream() { + if (in instanceof CryptoInputStream) { + return (DFSInputStream) ((CryptoInputStream) in).getWrappedStream(); + } + return (DFSInputStream) in; + } + + /** + * Get a reference to the wrapped output stream. We always want to return the + * actual underlying InputStream, even when we're using a CryptoStream. e.g. + * in the delegated methods below. + * + * @return the underlying output stream + */ + public InputStream getWrappedStream() { + return in; + } + /** * Get the datanode from which the stream is currently reading. */ public DatanodeInfo getCurrentDatanode() { - return ((DFSInputStream) in).getCurrentDatanode(); + return getDFSInputStream().getCurrentDatanode(); } /** * Get the block containing the target position. */ public ExtendedBlock getCurrentBlock() { - return ((DFSInputStream) in).getCurrentBlock(); + return getDFSInputStream().getCurrentBlock(); } /** * Get the collection of blocks that has already been located. */ public synchronized List getAllBlocks() throws IOException { - return ((DFSInputStream) in).getAllBlocks(); + return getDFSInputStream().getAllBlocks(); } /** @@ -66,7 +94,7 @@ public synchronized List getAllBlocks() throws IOException { * @return The visible length of the file. */ public long getVisibleLength() throws IOException { - return ((DFSInputStream) in).getFileLength(); + return getDFSInputStream().getFileLength(); } /** @@ -76,6 +104,6 @@ public long getVisibleLength() throws IOException { * bytes read through HdfsDataInputStream. */ public synchronized DFSInputStream.ReadStatistics getReadStatistics() { - return ((DFSInputStream) in).getReadStatistics(); + return getDFSInputStream().getReadStatistics(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java index adc8764b66f..214967863e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java @@ -18,14 +18,18 @@ package org.apache.hadoop.hdfs.client; import java.io.IOException; +import java.io.OutputStream; import java.util.EnumSet; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSOutputStream; +import com.google.common.base.Preconditions; + /** * The Hdfs implementation of {@link FSDataOutputStream}. */ @@ -42,6 +46,18 @@ public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats this(out, stats, 0L); } + public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats, + long startPosition) throws IOException { + super(out, stats, startPosition); + Preconditions.checkArgument(out.getWrappedStream() instanceof DFSOutputStream, + "CryptoOutputStream should wrap a DFSOutputStream"); + } + + public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats) + throws IOException { + this(out, stats, 0L); + } + /** * Get the actual number of replicas of the current block. * @@ -55,7 +71,11 @@ public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats * @return the number of valid replicas of the current block */ public synchronized int getCurrentBlockReplication() throws IOException { - return ((DFSOutputStream)getWrappedStream()).getCurrentBlockReplication(); + OutputStream wrappedStream = getWrappedStream(); + if (wrappedStream instanceof CryptoOutputStream) { + wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream(); + } + return ((DFSOutputStream) wrappedStream).getCurrentBlockReplication(); } /** @@ -67,14 +87,20 @@ public synchronized int getCurrentBlockReplication() throws IOException { * @see FSDataOutputStream#hsync() */ public void hsync(EnumSet syncFlags) throws IOException { - ((DFSOutputStream) getWrappedStream()).hsync(syncFlags); + OutputStream wrappedStream = getWrappedStream(); + if (wrappedStream instanceof CryptoOutputStream) { + ((CryptoOutputStream) wrappedStream).flush(); + wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream(); + } + ((DFSOutputStream) wrappedStream).hsync(syncFlags); } public static enum SyncFlag { + /** - * When doing sync to DataNodes, also update the metadata (block - * length) in the NameNode + * When doing sync to DataNodes, also update the metadata (block length) in + * the NameNode. */ UPDATE_LENGTH; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 7cc8c318803..42abbe855dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -161,4 +161,8 @@ public static enum DatanodeReportType { public static final String SEPARATOR_DOT_SNAPSHOT_DIR = Path.SEPARATOR + DOT_SNAPSHOT_DIR; + + /* Temporary until we stop hard-coding these values. */ + public static final byte[] KEY = "0123456789012345".getBytes(); + public static final byte[] IV = "ABCDEFGJIJKLMNOP".getBytes(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index 66c56faa14b..b7310c5c0c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -44,6 +44,9 @@ public class HdfsFileStatus { private final String owner; private final String group; private final long fileId; + + private final byte[] key; + private final byte[] iv; // Used by dir, not including dot and dotdot. Always zero for a regular file. private final int childrenNum; @@ -65,9 +68,18 @@ public class HdfsFileStatus { * @param fileId the file id */ public HdfsFileStatus(long length, boolean isdir, int block_replication, - long blocksize, long modification_time, long access_time, - FsPermission permission, String owner, String group, - byte[] symlink, byte[] path, long fileId, int childrenNum) { + long blocksize, long modification_time, long access_time, + FsPermission permission, String owner, String group, byte[] symlink, + byte[] path, long fileId, int childrenNum) { + this(length, isdir, block_replication, blocksize, modification_time, + access_time, permission, owner, group, symlink, path, fileId, + childrenNum, HdfsConstants.KEY, HdfsConstants.IV); + } + + public HdfsFileStatus(long length, boolean isdir, int block_replication, + long blocksize, long modification_time, long access_time, + FsPermission permission, String owner, String group, byte[] symlink, + byte[] path, long fileId, int childrenNum, byte[] key, byte[] iv) { this.length = length; this.isdir = isdir; this.block_replication = (short)block_replication; @@ -85,6 +97,8 @@ public HdfsFileStatus(long length, boolean isdir, int block_replication, this.path = path; this.fileId = fileId; this.childrenNum = childrenNum; + this.key = key; + this.iv = iv; } /** @@ -238,6 +252,14 @@ final public long getFileId() { return fileId; } + final public byte[] getKey() { + return key; + } + + final public byte[] getIv() { + return iv; + } + final public int getChildrenNum() { return childrenNum; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java index 0f90e435a43..dfe566077be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java @@ -56,10 +56,10 @@ public HdfsLocatedFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] symlink, byte[] path, long fileId, LocatedBlocks locations, - int childrenNum) { + int childrenNum, byte[] key, byte[] iv) { super(length, isdir, block_replication, blocksize, modification_time, - access_time, permission, owner, group, symlink, path, fileId, - childrenNum); + access_time, permission, owner, group, symlink, path, fileId, + childrenNum, key, iv); this.locations = locations; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java index bac0e6a35b6..4fc2bc064c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java @@ -35,22 +35,27 @@ public class LocatedBlocks { private final boolean underConstruction; private LocatedBlock lastLocatedBlock = null; private boolean isLastBlockComplete = false; + private final byte[] key; + private final byte[] iv; public LocatedBlocks() { fileLength = 0; blocks = null; underConstruction = false; + key = null; + iv = null; } - - /** public Constructor */ + public LocatedBlocks(long flength, boolean isUnderConstuction, - List blks, - LocatedBlock lastBlock, boolean isLastBlockCompleted) { + List blks, LocatedBlock lastBlock, + boolean isLastBlockCompleted, byte[] key, byte[] iv) { fileLength = flength; blocks = blks; underConstruction = isUnderConstuction; this.lastLocatedBlock = lastBlock; this.isLastBlockComplete = isLastBlockCompleted; + this.key = key; + this.iv = iv; } /** @@ -92,13 +97,21 @@ public long getFileLength() { } /** - * Return ture if file was under construction when - * this LocatedBlocks was constructed, false otherwise. + * Return true if file was under construction when this LocatedBlocks was + * constructed, false otherwise. */ public boolean isUnderConstruction() { return underConstruction; } + public byte[] getKey() { + return key; + } + + public byte[] getIv() { + return iv; + } + /** * Find block containing specified offset. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index 959439b6342..b18e3f3458f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -61,7 +61,7 @@ public SnapshottableDirectoryStatus(long modification_time, long access_time, int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, access_time, permission, owner, group, null, localName, inodeId, - childrenNum); + childrenNum, null /* key */, null /* IV */); this.snapshotNumber = snapshotNumber; this.snapshotQuota = snapshotQuota; this.parentFullPath = parentFullPath; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 01a173f2d95..ffa305d55ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -1127,7 +1127,9 @@ public static LocatedBlocks convert(LocatedBlocksProto lb) { lb.getFileLength(), lb.getUnderConstruction(), PBHelper.convertLocatedBlock(lb.getBlocksList()), lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, - lb.getIsLastBlockComplete()); + lb.getIsLastBlockComplete(), + lb.hasKey() ? lb.getKey().toByteArray() : null, + lb.hasIv() ? lb.getIv().toByteArray() : null); } public static LocatedBlocksProto convert(LocatedBlocks lb) { @@ -1139,6 +1141,12 @@ public static LocatedBlocksProto convert(LocatedBlocks lb) { if (lb.getLastLocatedBlock() != null) { builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); } + if (lb.getKey() != null) { + builder.setKey(ByteString.copyFrom(lb.getKey())); + } + if (lb.getIv() != null) { + builder.setIv(ByteString.copyFrom(lb.getIv())); + } return builder.setFileLength(lb.getFileLength()) .setUnderConstruction(lb.isUnderConstruction()) .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())) @@ -1264,7 +1272,9 @@ public static HdfsFileStatus convert(HdfsFileStatusProto fs) { fs.getPath().toByteArray(), fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID, fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null, - fs.hasChildrenNum() ? fs.getChildrenNum() : -1); + fs.hasChildrenNum() ? fs.getChildrenNum() : -1, + fs.hasKey() ? fs.getKey().toByteArray() : null, + fs.hasIv() ? fs.getIv().toByteArray() : null); } public static SnapshottableDirectoryStatus convert( @@ -1314,6 +1324,12 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) { if (fs.isSymlink()) { builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); } + if (fs.getKey() != null) { + builder.setKey(ByteString.copyFrom(fs.getKey())); + } + if (fs.getIv() != null) { + builder.setIv(ByteString.copyFrom(fs.getIv())); + } if (fs instanceof HdfsLocatedFileStatus) { LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); if (locations != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index c58036fc8aa..d841100823f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -52,6 +52,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; + import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; @@ -847,7 +849,8 @@ public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks, return null; } else if (blocks.length == 0) { return new LocatedBlocks(0, isFileUnderConstruction, - Collections.emptyList(), null, false); + Collections.emptyList(), null, false, + null /* key */, null /* IV */); } else { if (LOG.isDebugEnabled()) { LOG.debug("blocks = " + java.util.Arrays.asList(blocks)); @@ -872,7 +875,8 @@ public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks, } return new LocatedBlocks( fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction, - locatedblocks, lastlb, isComplete); + locatedblocks, lastlb, isComplete, + HdfsConstants.KEY, HdfsConstants.IV); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index a21b7dba339..939cb52d473 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1640,7 +1640,7 @@ private HdfsFileStatus getFileInfo4DotSnapshot(String src) throws UnresolvedLinkException { if (getINode4DotSnapshot(src) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, - HdfsFileStatus.EMPTY_NAME, -1L, 0); + HdfsFileStatus.EMPTY_NAME, -1L, 0, null /* key */, null /* IV */); } return null; } @@ -2611,7 +2611,9 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, node.isSymlink() ? node.asSymlink().getSymlink() : null, path, node.getId(), - childrenNum); + childrenNum, + HdfsConstants.KEY, // key + HdfsConstants.IV); // IV } /** @@ -2651,7 +2653,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, getPermissionForFileStatus(node, snapshot), node.getUserName(snapshot), node.getGroupName(snapshot), node.isSymlink() ? node.asSymlink().getSymlink() : null, path, - node.getId(), loc, childrenNum); + node.getId(), loc, childrenNum, null /* key */, null /* IV */); // Set caching information for the located blocks. if (loc != null) { CacheManager cacheManager = namesystem.getCacheManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 41a79559f9e..3980b559b70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -251,7 +251,8 @@ public static HdfsFileStatus toFileStatus(final Map json, boolean includes : childrenNumLong.intValue(); return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication, blockSize, mTime, aTime, permission, owner, group, - symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum); + symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, + null /* key */, null /* IV */); } /** Convert an ExtendedBlock to a Json map. */ @@ -531,7 +532,7 @@ public static LocatedBlocks toLocatedBlocks(final Map json (Map)m.get("lastLocatedBlock")); final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete"); return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks, - lastLocatedBlock, isLastBlockComplete); + lastLocatedBlock, isLastBlockComplete, null /* key */, null /* IV */); } /** Convert a ContentSummary to a Json string. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 2d7ca245a73..6ca22a136e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -179,6 +179,8 @@ message LocatedBlocksProto { required bool underConstruction = 3; optional LocatedBlockProto lastBlock = 4; required bool isLastBlockComplete = 5; + optional bytes key = 6; + optional bytes iv = 7; } @@ -212,6 +214,10 @@ message HdfsFileStatusProto { // Optional field for fileId optional uint64 fileId = 13 [default = 0]; // default as an invalid id optional int32 childrenNum = 14 [default = -1]; + + // Optional fields for key/iv for encryption + optional bytes key = 15; + optional bytes iv = 16; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSEncryption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSEncryption.java new file mode 100644 index 00000000000..4b32fe42135 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSEncryption.java @@ -0,0 +1,352 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import static org.apache.hadoop.fs.CreateFlag.CREATE; +import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize; +import static org.apache.hadoop.fs.FileContextTestHelper.getFileData; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.UUID; + +import javax.security.auth.login.LoginException; + +import org.apache.commons.lang.RandomStringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; +import org.apache.hadoop.crypto.key.KeyProviderFactory; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestHDFSEncryption { + private static MiniDFSCluster cluster; + private static Path defaultWorkingDirectory; + private static final HdfsConfiguration CONF = new HdfsConfiguration(); + private static FileContext fc; + private Path localFsRootPath; + private Path src1; + /* The KeyProvider, if any. */ + private static KeyProvider provider = null; + + private static File tmpDir; + + @BeforeClass + public static void clusterSetupAtBegining() throws IOException, + LoginException, URISyntaxException { + tmpDir = new File(System.getProperty("test.build.data", "target"), + UUID.randomUUID().toString()).getAbsoluteFile(); + tmpDir.mkdirs(); + + CONF.set(KeyProviderFactory.KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); + initializeKeyProvider(CONF); + try { + createOneKey(); + KeyVersion blort = provider.getCurrentKey("blort"); + } catch (java.security.NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + + cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1).build(); + cluster.waitClusterUp(); + + URI uri0 = cluster.getURI(0); + fc = FileContext.getFileContext(uri0, CONF); + defaultWorkingDirectory = fc.makeQualified(new Path("/user/" + + UserGroupInformation.getCurrentUser().getShortUserName())); + fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); + } + + private static void initializeKeyProvider(final Configuration conf) + throws IOException { + final List providers = KeyProviderFactory.getProviders(conf); + if (providers == null) { + return; + } + + if (providers.size() == 0) { + return; + } + + if (providers.size() > 1) { + final String err = + "Multiple KeyProviders found. Only one is permitted."; + throw new RuntimeException(err); + } + provider = providers.get(0); + if (provider.isTransient()) { + final String err = + "A KeyProvider was found but it is a transient provider."; + throw new RuntimeException(err); + } + } + + private static void createOneKey() + throws java.security.NoSuchAlgorithmException, IOException { + final org.apache.hadoop.crypto.key.KeyProvider.Options options = + KeyProvider.options(CONF); + provider.createKey("blort", options); + provider.flush(); + } + + @AfterClass + public static void ClusterShutdownAtEnd() throws Exception { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Before + public void setUp() throws Exception { + File testBuildData = new File(System.getProperty("test.build.data", + "build/test/data"), RandomStringUtils.randomAlphanumeric(10)); + Path rootPath = new Path(testBuildData.getAbsolutePath(), + "root-uri"); + localFsRootPath = rootPath.makeQualified(LocalFileSystem.NAME, null); + fc.mkdir(getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true); + src1 = getTestRootPath(fc, "testfile"); + } + + @After + public void tearDown() throws Exception { + final boolean del = + fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test")), true); + assertTrue(del); + fc.delete(localFsRootPath, true); + } + + protected final FileContextTestHelper fileContextTestHelper = + createFileContextHelper(); + + protected FileContextTestHelper createFileContextHelper() { + return new FileContextTestHelper(); + } + + protected Path getDefaultWorkingDirectory() { + return defaultWorkingDirectory; + } + + private Path getTestRootPath(FileContext fc, String path) { + return fileContextTestHelper.getTestRootPath(fc, path); + } + + protected IOException unwrapException(IOException e) { + if (e instanceof RemoteException) { + return ((RemoteException) e).unwrapRemoteException(); + } + return e; + } + + private static final int NUM_BLOCKS = 3; + + private static final byte[] data = getFileData(NUM_BLOCKS, + getDefaultBlockSize()); + + private void writeSomeData() throws Exception { + writeSomeData(false, false); + } + + private void writeSomeData(boolean doHFlush, boolean doHSync) throws Exception { + final FSDataOutputStream out = + fc.create(src1, EnumSet.of(CREATE), Options.CreateOpts.createParent()); + out.write(data, 0, data.length); + if (doHFlush) { + out.hflush(); + } + + if (doHSync) { + out.hsync(); + } + + out.close(); + } + + private void writeAndVerify(boolean doHFlush, boolean doHSync) throws Exception { + writeSomeData(doHFlush, doHSync); + + final FSDataInputStream in = fc.open(src1); + try { + final byte[] readBuf = new byte[getDefaultBlockSize() * NUM_BLOCKS]; + + in.readFully(readBuf); + assertTrue("Expected read-back data to be equal (hflush=" + doHFlush + + " hfsync=" + doHSync + ")", Arrays.equals(data, readBuf)); + } finally { + in.close(); + } + } + + @Test + public void testBasicEncryptionStreamNoFlushNoSync() throws Exception { + writeAndVerify(false, false); + } + + @Test + public void testBasicEncryptionStreamFlushSync() throws Exception { + writeAndVerify(true, true); + } + + @Test + public void testBasicEncryptionStreamNoFlushSync() throws Exception { + writeAndVerify(false, true); + } + + @Test + public void testBasicEncryptionStreamFlushNoSync() throws Exception { + writeAndVerify(true, false); + } + + @Test + public void testGetPos() throws Exception { + writeSomeData(); + + final FSDataInputStream in = fc.open(src1); + + int expectedGetPos = 0; + while (in.read() != -1) { + assertTrue(++expectedGetPos == in.getPos()); + } + } + + @Test + public void testDoubleClose() throws Exception { + writeSomeData(); + + final FSDataInputStream in = fc.open(src1); + in.close(); + try { + in.close(); + } catch (Exception e) { + fail("Caught unexpected exception on double-close: " + e); + } + } + + @Test + public void testHFlush() throws Exception { + final DistributedFileSystem fs = cluster.getFileSystem(); + final FSDataOutputStream out = + fc.create(src1, EnumSet.of(CREATE), Options.CreateOpts.createParent()); + out.write(data, 0, data.length); + out.hflush(); + out.close(); + } + + @Test + public void testSeekBogusArgs() throws Exception { + writeSomeData(); + + final FSDataInputStream in = fc.open(src1); + try { + in.seek(-1); + fail("Expected IOException"); + } catch (Exception e) { + GenericTestUtils.assertExceptionContains("Cannot seek to negative offset", e); + } + + try { + in.seek(1 << 20); + fail("Expected IOException"); + } catch (Exception e) { + GenericTestUtils.assertExceptionContains("Cannot seek after EOF", e); + } + in.close(); + } + + @Test + public void testSeekForward() throws Exception { + writeSomeData(); + + final FSDataInputStream in = fc.open(src1); + + for (int seekInc = 1; seekInc < 1024; seekInc += 32) { + long seekTo = 0; + while (seekTo < data.length) { + in.seek(seekTo); + int b = in.read(); + byte expected = data[(int) seekTo]; + assertTrue("seek(" + seekTo + ") Expected: " + expected + ", but got: " + b, + b == expected); + seekTo += seekInc; + } + } + in.close(); + } + + @Test + public void testSeekBackwards() throws Exception { + writeSomeData(); + + final FSDataInputStream in = fc.open(src1); + + for (int seekInc = 1; seekInc < 1024; seekInc += 32) { + long seekTo = data.length - 1; + while (seekTo >= 0) { + in.seek(seekTo); + int b = in.read(); + byte expected = data[(int) seekTo]; + assertTrue("seek(" + seekTo + ") Expected: " + expected + ", but got: " + b, + b == expected); + seekTo -= seekInc; + } + } + in.close(); + } + + @Test + public void testPostionedReadable() throws Exception { + writeSomeData(); + + final FSDataInputStream in = fc.open(src1); + + try { + final byte[] oneByteToRead = new byte[1]; + for (int i = 0; i < data.length; i++) { + int nread = in.read(i, oneByteToRead, 0, 1); + final byte b = oneByteToRead[0]; + byte expected = data[(int) i]; + assertTrue("read() expected only one byte to be read, but got " + nread, nread == 1); + assertTrue("read() expected: " + expected + ", but got: " + b, + b == expected); + } + } finally { + in.close(); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index c11cdc34ead..f42496920b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -253,12 +253,12 @@ public Object answer(InvocationOnMock invocation) Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0)).when(mockNN).getFileInfo(anyString()); + 1010, 0, null, null)).when(mockNN).getFileInfo(anyString()); Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0)) + 1010, 0, null, null)) .when(mockNN) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), @@ -494,7 +494,8 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) { List badBlocks = new ArrayList(); badBlocks.add(badLocatedBlock); return new LocatedBlocks(goodBlockList.getFileLength(), false, - badBlocks, null, true); + badBlocks, null, true, + null /* key */, null /* IV */); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index cb32de0fccb..4c34681e4fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -95,7 +95,7 @@ public void testLocatedBlocks2Locations() { LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true); List ls = Arrays.asList(l1, l2); - LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true); + LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null); BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index b8cab893696..6487843d008 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -339,12 +339,12 @@ public void testFactory() throws Exception { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0)).when(mcp).getFileInfo(anyString()); + 1010, 0, null, null)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0)) + 1010, 0, null, null)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 0f68e791a6d..17df7f6ef22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -1015,7 +1015,7 @@ public void testFsckFileNotFound() throws Exception { HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, - fileId, numChildren); + fileId, numChildren, null, null); Result res = new Result(conf); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index ffaf193d863..e82c7066f9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -64,7 +64,7 @@ public void testHdfsFileStatus() { final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), - INodeId.GRANDFATHER_INODE_ID, 0); + INodeId.GRANDFATHER_INODE_ID, 0, null, null); final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus); From 1cbff8ff824a330fe34a49400bdec963e74f7015 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 6 Jun 2014 00:55:14 +0000 Subject: [PATCH 011/354] HDFS-6476. Print out the KeyProvider after finding KP successfully on startup. Contributed by Juan Yu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1600799 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt | 3 +++ .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java | 1 + 2 files changed, 4 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 4d02b398a73..10e5abbd9f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -8,6 +8,9 @@ fs-encryption (Unreleased) IMPROVEMENTS + HDFS-6476. Print out the KeyProvider after finding KP successfully on + startup. (Juan Yu via wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 504e3ed2013..8f916ce253b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -730,6 +730,7 @@ private void initializeKeyProvider(final Configuration conf) { LOG.error(err); throw new RuntimeException(err); } + LOG.info("Found KeyProvider: " + provider.toString()); } catch (IOException e) { LOG.error("Exception while initializing KeyProvider", e); } From 3f892e9d534e05936ab849eeabef77ded4c8651b Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Fri, 6 Jun 2014 01:42:18 +0000 Subject: [PATCH 012/354] HDFS-6473. Protocol and API for Encryption Zones (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1600803 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../org/apache/hadoop/hdfs/DFSClient.java | 29 +++++++ .../hadoop/hdfs/DistributedFileSystem.java | 17 ++++ .../apache/hadoop/hdfs/client/HdfsAdmin.java | 70 ++++++++++++++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 26 ++++++ .../hadoop/hdfs/protocol/EncryptionZone.java | 79 +++++++++++++++++++ ...amenodeProtocolServerSideTranslatorPB.java | 41 ++++++++++ .../ClientNamenodeProtocolTranslatorPB.java | 41 +++++++++- .../hadoop/hdfs/protocolPB/PBHelper.java | 42 ++++++++++ .../hdfs/server/namenode/FSNamesystem.java | 12 +++ .../server/namenode/NameNodeRpcServer.java | 17 ++++ .../main/proto/ClientNamenodeProtocol.proto | 7 ++ .../src/main/proto/encryption.proto | 61 ++++++++++++++ 14 files changed, 444 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 10e5abbd9f1..b7abd062f4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -8,6 +8,8 @@ fs-encryption (Unreleased) IMPROVEMENTS + HDFS-6473. Protocol and API for Encryption Zones (clamb) + HDFS-6476. Print out the KeyProvider after finding KP successfully on startup. (Juan Yu via wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 1e1972961d8..03635aa3041 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -294,6 +294,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> datatransfer.proto fsimage.proto hdfs.proto + encryption.proto ${project.build.directory}/generated-sources/java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 4d2da799e15..2a39bac224e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -136,6 +136,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -2775,6 +2776,34 @@ public AclStatus getAclStatus(String src) throws IOException { } } + public void createEncryptionZone(String src, String keyId) + throws IOException { + checkOpen(); + try { + namenode.createEncryptionZone(src, keyId); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + SafeModeException.class, + UnresolvedPathException.class); + } + } + + public void deleteEncryptionZone(String src) throws IOException { + checkOpen(); + try { + namenode.deleteEncryptionZone(src); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + SafeModeException.class, + UnresolvedPathException.class); + } + } + + public List listEncryptionZones() throws IOException { + checkOpen(); + return namenode.listEncryptionZones(); + } + public void setXAttr(String src, String name, byte[] value, EnumSet flag) throws IOException { checkOpen(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 1f577271b44..2fe049f68ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -1795,6 +1796,22 @@ public AclStatus next(final FileSystem fs, final Path p) }.resolve(this, absF); } + /* HDFS only */ + public void createEncryptionZone(Path path, String keyId) + throws IOException { + dfs.createEncryptionZone(getPathName(path), keyId); + } + + /* HDFS only */ + public void deleteEncryptionZone(Path path) throws IOException { + dfs.deleteEncryptionZone(getPathName(path)); + } + + /* HDFS only */ + public List listEncryptionZones() throws IOException { + return dfs.listEncryptionZones(); + } + @Override public void setXAttr(Path path, final String name, final byte[] value, final EnumSet flag) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index 0f0769e302c..a13edfe5c5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hdfs.client; +import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.util.EnumSet; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -33,7 +35,9 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.hdfs.tools.DFSAdmin; /** @@ -225,4 +229,70 @@ public void removeCachePool(String poolName) throws IOException { public RemoteIterator listCachePools() throws IOException { return dfs.listCachePools(); } + + /** + * Create an encryption zone rooted at path using the optional encryption key + * id. An encryption zone is a portion of the HDFS file system hierarchy in + * which all files are encrypted with the same key, but possibly different + * key versions per file. + *

+ * Path must refer to an empty, existing directory. Otherwise an IOException + * will be thrown. keyId specifies the id of an encryption key in the + * KeyProvider that the Namenode has been configured to use. If keyId is + * null, then a key is generated in the KeyProvider using {@link + * java.util.UUID} to generate a key id. + * + * @param path The path of the root of the encryption zone. + * + * @param keyId An optional keyId in the KeyProvider. If null, then + * a key is generated. + * + * @throws IOException if there was a general IO exception + * + * @throws AccessControlException if the caller does not have access to path + * + * @throws FileNotFoundException if the path does not exist + */ + public void createEncryptionZone(Path path, String keyId) + throws IOException, AccessControlException, FileNotFoundException { + dfs.createEncryptionZone(path, keyId); + } + + /** + * Delete the encryption zone rooted at path. Path must refer to an existing, + * empty directory. Otherwise, an IOException is thrown. This method removes + * those extended attributes on the directory which indicate that it is part + * of an encryption zone. Following successful completion of this call, any + * new files created in the directory (or it's children) will not be + * encrypted. The directory is not removed by this method. + * + * @param path The path of the root of the encryption zone. + * + * @throws IOException if there was a general IO exception + * + * @throws AccessControlException if the caller does not have access to path + * + * @throws FileNotFoundException if the path does not exist + */ + public void deleteEncryptionZone(Path path) + throws IOException, AccessControlException, FileNotFoundException { + dfs.deleteEncryptionZone(path); + } + + /** + * Return a list of all {@EncryptionZone}s in the HDFS hierarchy which are + * visible to the caller. If the caller is the HDFS admin, then the returned + * EncryptionZone instances will have the key id field filled in. If the + * caller is not the HDFS admin, then the EncryptionZone instances will only + * have the path field filled in and only those zones that are visible to the + * user are returned. + * + * @throws IOException if there was a general IO exception + * + * @return List the list of Encryption Zones that the caller has + * access to. + */ + public List listEncryptionZones() throws IOException { + return dfs.listEncryptionZones(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index c97a8c800a8..fbbd44a9d0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1257,6 +1257,32 @@ public void removeAclEntries(String src, List aclSpec) @Idempotent public AclStatus getAclStatus(String src) throws IOException; + /** + * Create an encryption zone + */ + @AtMostOnce + public void createEncryptionZone(String src, String keyId) + throws IOException; + + /** + * Delete an encryption zone + */ + @AtMostOnce + public void deleteEncryptionZone(String src) + throws IOException; + + /** + * Return a list of all {@EncryptionZone}s in the HDFS hierarchy which are + * visible to the caller. If the caller is the HDFS admin, then the returned + * EncryptionZone instances will have the key id field filled in. If the + * caller is not the HDFS admin, then the EncryptionZone instances will only + * have the path field filled in and only those zones that are visible to the + * user are returned. + */ + @Idempotent + public List listEncryptionZones() + throws IOException; + /** * Set xattr of a file or directory. * A regular user only can set xattr of "user" namespace. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java new file mode 100644 index 00000000000..f4fcc609e41 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A simple class for representing an encryption zone. Presently an encryption + * zone only has a path (the root of the encryption zone) and a key id. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class EncryptionZone { + + private final String path; + private final String keyId; + + public EncryptionZone(String path, String keyId) { + this.path = path; + this.keyId = keyId; + } + + public String getPath() { + return path; + } + + public String getKeyId() { + return keyId; + } + + @Override + public int hashCode() { + return new HashCodeBuilder(13, 31). + append(path).append(keyId). + toHashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj == this) { + return true; + } + if (obj.getClass() != getClass()) { + return false; + } + + EncryptionZone rhs = (EncryptionZone) obj; + return new EqualsBuilder(). + append(path, rhs.path). + append(keyId, rhs.keyId). + isEquals(); + } + + @Override + public String toString() { + return "EncryptionZone [path=" + path + ", keyId=" + keyId + "]"; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 2b19669d6aa..7f4c03d544f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -171,6 +171,12 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.DeleteEncryptionZoneResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.DeleteEncryptionZoneRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; @@ -1275,6 +1281,41 @@ public GetAclStatusResponseProto getAclStatus(RpcController controller, } } + @Override + public CreateEncryptionZoneResponseProto createEncryptionZone( + RpcController controller, CreateEncryptionZoneRequestProto req) + throws ServiceException { + try { + server.createEncryptionZone(req.getSrc(), req.getKeyId()); + return CreateEncryptionZoneResponseProto.newBuilder().build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public DeleteEncryptionZoneResponseProto deleteEncryptionZone( + RpcController controller, DeleteEncryptionZoneRequestProto req) + throws ServiceException { + try { + server.deleteEncryptionZone(req.getSrc()); + return DeleteEncryptionZoneResponseProto.newBuilder().build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public ListEncryptionZonesResponseProto listEncryptionZones( + RpcController controller, ListEncryptionZonesRequestProto req) + throws ServiceException { + try { + return PBHelper.convertListEZResponse(server.listEncryptionZones()); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public SetXAttrResponseProto setXAttr(RpcController controller, SetXAttrRequestProto req) throws ServiceException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 57a46515356..bf43544bb62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -143,6 +144,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.DeleteEncryptionZoneRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto; @@ -1273,7 +1277,42 @@ public AclStatus getAclStatus(String src) throws IOException { throw ProtobufHelper.getRemoteException(e); } } - + + @Override + public void createEncryptionZone(String src, String keyId) + throws IOException { + final CreateEncryptionZoneRequestProto req = + CreateEncryptionZoneRequestProto.newBuilder(). + setSrc(src).setKeyId(keyId).build(); + try { + rpcProxy.createEncryptionZone(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void deleteEncryptionZone(String src) throws IOException { + final DeleteEncryptionZoneRequestProto req = + DeleteEncryptionZoneRequestProto.newBuilder().setSrc(src).build(); + try { + rpcProxy.deleteEncryptionZone(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public List listEncryptionZones() throws IOException { + final ListEncryptionZonesRequestProto req = + ListEncryptionZonesRequestProto.newBuilder().build(); + try { + return PBHelper.convert(rpcProxy.listEncryptionZones(null, req)); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index ffa305d55ef..6a4cc0ce1b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FsAclPermission; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -110,6 +111,8 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; @@ -2180,6 +2183,45 @@ public static List convertXAttrs(List xAttrSpec) { return xAttrs; } + public static List convert(ListEncryptionZonesResponseProto a) { + final List ezs = a.getPathsAndKeysList(); + return convertEZ(ezs); + } + + public static ListEncryptionZonesResponseProto convertListEZResponse( + List ezs) { + final ListEncryptionZonesResponseProto.Builder builder = + ListEncryptionZonesResponseProto.newBuilder(); + builder.addAllPathsAndKeys(convertEZProto(ezs)); + return builder.build(); + } + + public static List convertEZProto( + List ezs) { + final ArrayList ret = + Lists.newArrayListWithCapacity(ezs.size()); + for (EncryptionZone a : ezs) { + final EncryptionZoneProto.Builder builder = + EncryptionZoneProto.newBuilder(); + builder.setPath(a.getPath()); + builder.setKeyId(a.getKeyId()); + ret.add(builder.build()); + } + return ret; + } + + public static List convertEZ( + List ezs) { + final ArrayList ret = + Lists.newArrayListWithCapacity(ezs.size()); + for (EncryptionZoneProto a : ezs) { + final EncryptionZone ez = + new EncryptionZone(a.getPath(), a.getKeyId()); + ret.add(ez); + } + return ret; + } + public static List convert(GetXAttrsResponseProto a) { List xAttrs = a.getXAttrsList(); return convertXAttrs(xAttrs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ce8cbeedfc6..cbf11877533 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -155,6 +155,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -7824,6 +7825,17 @@ AclStatus getAclStatus(String src) throws IOException { } } + void createEncryptionZone(final String src, final String keyId) + throws IOException { + } + + void deleteEncryptionZone(final String src) throws IOException { + } + + List listEncryptionZones() throws IOException { + return null; + } + /** * Set xattr for a file or directory. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 34e7979ced3..52cf5548ca0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -1384,6 +1385,22 @@ public AclStatus getAclStatus(String src) throws IOException { return namesystem.getAclStatus(src); } + @Override + public void createEncryptionZone(String src, String keyId) + throws IOException { + namesystem.createEncryptionZone(src, keyId); + } + + @Override + public void deleteEncryptionZone(String src) throws IOException { + namesystem.deleteEncryptionZone(src); + } + + @Override + public List listEncryptionZones() throws IOException { + return namesystem.listEncryptionZones(); + } + @Override public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 80b96f43173..73e4aea2ce3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -32,6 +32,7 @@ import "Security.proto"; import "hdfs.proto"; import "acl.proto"; import "xattr.proto"; +import "encryption.proto"; /** * The ClientNamenodeProtocol Service defines the interface between a client @@ -766,4 +767,10 @@ service ClientNamenodeProtocol { returns(GetXAttrsResponseProto); rpc removeXAttr(RemoveXAttrRequestProto) returns(RemoveXAttrResponseProto); + rpc createEncryptionZone(CreateEncryptionZoneRequestProto) + returns(CreateEncryptionZoneResponseProto); + rpc deleteEncryptionZone(DeleteEncryptionZoneRequestProto) + returns(DeleteEncryptionZoneResponseProto); + rpc listEncryptionZones(ListEncryptionZonesRequestProto) + returns(ListEncryptionZonesResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto new file mode 100644 index 00000000000..748f2cb40c2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * These .proto interfaces are private and stable. + * Please see http://wiki.apache.org/hadoop/Compatibility + * for what changes are allowed for a *stable* .proto interface. + */ + +// This file contains protocol buffers that are used throughout HDFS -- i.e. +// by the client, server, and data transfer protocols. + + +option java_package = "org.apache.hadoop.hdfs.protocol.proto"; +option java_outer_classname = "EncryptionZonesProtos"; +option java_generate_equals_and_hash = true; +package hadoop.hdfs; + +import "hdfs.proto"; + +message CreateEncryptionZoneRequestProto { + required string src = 1; + optional string keyId = 2; +} + +message CreateEncryptionZoneResponseProto { +} + +message DeleteEncryptionZoneRequestProto { + required string src = 1; +} + +message DeleteEncryptionZoneResponseProto { +} + +message ListEncryptionZonesRequestProto { +} + +message EncryptionZoneProto { + required string path = 1; + required string keyId = 2; +} + +message ListEncryptionZonesResponseProto { + repeated EncryptionZoneProto pathsAndKeys = 1; +} From a93a37a8cd99368ea9f52778492eb4d1ad593c27 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Fri, 6 Jun 2014 01:49:53 +0000 Subject: [PATCH 013/354] Various fixes to CHANGES-fs-encryption.txt. Various Jiras ended up in the hadoop-common CHANGES-fs-encryption.txt by accident. Moved them to HDFS CHANGES-fs-encryption.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1600804 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 5 ----- hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index e19a4abf7a7..4b8e982cfb3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -6,13 +6,8 @@ fs-encryption (Unreleased) NEW FEATURES - HDFS-6388. HDFS integration with KeyProvider. (clamb) - IMPROVEMENTS - HADOOP-6392. Wire crypto streams for encrypted files in - DFSClient. (clamb and yliu) - HADOOP-10603. Crypto input and output streams implementing Hadoop stream interfaces. (Yi Liu and Charles Lamb) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index b7abd062f4d..ad971b42673 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -6,10 +6,15 @@ fs-encryption (Unreleased) NEW FEATURES + HDFS-6388. HDFS integration with KeyProvider. (clamb) + IMPROVEMENTS HDFS-6473. Protocol and API for Encryption Zones (clamb) + HDFS-6392. Wire crypto streams for encrypted files in + DFSClient. (clamb and yliu) + HDFS-6476. Print out the KeyProvider after finding KP successfully on startup. (Juan Yu via wang) From 6ef3a9e746c52d91f1e5b4ac9f41627bd42674d7 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Wed, 18 Jun 2014 22:33:50 +0000 Subject: [PATCH 014/354] HDFS-6386. HDFS Encryption Zones (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1603658 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../org/apache/hadoop/hdfs/DFSClient.java | 6 +- .../hadoop/hdfs/protocol/HdfsConstants.java | 5 + .../ClientNamenodeProtocolTranslatorPB.java | 10 +- .../hdfs/server/namenode/FSDirectory.java | 46 ++ .../hdfs/server/namenode/FSNamesystem.java | 254 ++++++++++- .../hadoop/hdfs/server/namenode/NameNode.java | 37 -- .../hadoop/hdfs/TestEncryptionZonesAPI.java | 404 ++++++++++++++++++ 8 files changed, 719 insertions(+), 45 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index ad971b42673..c02b8c674dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -10,6 +10,8 @@ fs-encryption (Unreleased) IMPROVEMENTS + HDFS-6386. HDFS Encryption Zones (clamb) + HDFS-6473. Protocol and API for Encryption Zones (clamb) HDFS-6392. Wire crypto streams for encrypted files in diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 924e42442b8..bf8cf2fb20c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2802,7 +2802,11 @@ public void deleteEncryptionZone(String src) throws IOException { public List listEncryptionZones() throws IOException { checkOpen(); - return namenode.listEncryptionZones(); + try { + return namenode.listEncryptionZones(); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class); + } } public void setXAttr(String src, String name, byte[] value, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 42abbe855dc..06c658cb2ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -162,6 +162,11 @@ public static enum DatanodeReportType { public static final String SEPARATOR_DOT_SNAPSHOT_DIR = Path.SEPARATOR + DOT_SNAPSHOT_DIR; + public static final String CRYPTO_XATTR_KEY_ID = "system.hdfs.crypto.key-id"; + public static final String CRYPTO_XATTR_KEY_VERSION_ID = + "system.hdfs.crypto.key-version-id"; + public static final String CRYPTO_XATTR_IV = "system.hdfs.crypto.iv"; + public static final int CRYPTO_KEY_SIZE = 128; /* Temporary until we stop hard-coding these values. */ public static final byte[] KEY = "0123456789012345".getBytes(); public static final byte[] IV = "ABCDEFGJIJKLMNOP".getBytes(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index f366ddb6d8a..2a7ddd5305d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -1282,9 +1282,13 @@ public AclStatus getAclStatus(String src) throws IOException { @Override public void createEncryptionZone(String src, String keyId) throws IOException { - final CreateEncryptionZoneRequestProto req = - CreateEncryptionZoneRequestProto.newBuilder(). - setSrc(src).setKeyId(keyId).build(); + final CreateEncryptionZoneRequestProto.Builder builder = + CreateEncryptionZoneRequestProto.newBuilder(); + builder.setSrc(src); + if (keyId != null && !keyId.isEmpty()) { + builder.setKeyId(keyId); + } + CreateEncryptionZoneRequestProto req = builder.build(); try { rpcProxy.createEncryptionZone(null, req); } catch (ServiceException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 4abd8889fe2..8bfa6125b59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -32,6 +32,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.Options; @@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -84,6 +86,10 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_KEY_ID; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_IV; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_KEY_VERSION_ID; + /************************************************* * FSDirectory stores the filesystem directory state. * It handles writing/loading values to disk, and logging @@ -130,6 +136,7 @@ private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) { private final INodeMap inodeMap; // Synchronized by dirLock private long yieldCount = 0; // keep track of lock yield count. private final int inodeXAttrsLimit; //inode xattrs max limit + private final CryptoCodec codec; // lock to protect the directory and BlockMap private final ReentrantReadWriteLock dirLock; @@ -198,6 +205,7 @@ public int getWriteHoldCount() { this.inodeXAttrsLimit = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); + this.codec = CryptoCodec.getInstance(conf); Preconditions.checkArgument(this.inodeXAttrsLimit >= 0, "Cannot set a negative limit on the number of xattrs per inode (%s).", DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY); @@ -2662,6 +2670,44 @@ List filterINodeXAttr(List existingXAttrs, return xAttrs; } + XAttr createEncryptionZone(String src, String keyId) + throws IOException { + writeLock(); + try { + if (isNonEmptyDirectory(src)) { + throw new IOException( + "Attempt to create an encryption zone for a non-empty directory."); + } + final XAttr keyIdXAttr = + XAttrHelper.buildXAttr(CRYPTO_XATTR_KEY_ID, keyId.getBytes()); + unprotectedSetXAttr(src, keyIdXAttr, EnumSet.of(XAttrSetFlag.CREATE)); + return keyIdXAttr; + } finally { + writeUnlock(); + } + } + + XAttr deleteEncryptionZone(String src) + throws IOException { + writeLock(); + try { + if (isNonEmptyDirectory(src)) { + throw new IOException( + "Attempt to delete an encryption zone for a non-empty directory."); + } + final XAttr keyIdXAttr = + XAttrHelper.buildXAttr(CRYPTO_XATTR_KEY_ID, null); + final XAttr removedXAttr = unprotectedRemoveXAttr(src, keyIdXAttr); + if (removedXAttr == null) { + throw new IOException( + src + " does not appear to be the root of an encryption zone"); + } + return removedXAttr; + } finally { + writeUnlock(); + } + } + void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { writeLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 74a61338949..360f16d96ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -83,12 +83,16 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_KEY_SIZE; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_IV; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_KEY_VERSION_ID; import static org.apache.hadoop.util.Time.now; import java.io.*; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.URI; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -102,6 +106,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -116,6 +121,9 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; +import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; @@ -145,6 +153,7 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; @@ -261,6 +270,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; @@ -515,6 +525,11 @@ private void logAuditEvent(boolean succeeded, private final NNConf nnConf; + private KeyProvider provider = null; + private KeyProvider.Options providerOptions = null; + + private final Map encryptionZones; + /** * Set the last allocated inode id when fsimage or editlog is loaded. */ @@ -675,6 +690,8 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { */ FSNamesystem(Configuration conf, FSImage fsImage, boolean ignoreRetryCache) throws IOException { + initializeKeyProvider(conf); + providerOptions = KeyProvider.options(conf); if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) { LOG.info("Enabling async auditlog"); @@ -781,6 +798,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { auditLoggers.get(0) instanceof DefaultAuditLogger; this.retryCache = ignoreRetryCache ? null : initRetryCache(conf); this.nnConf = new NNConf(conf); + this.encryptionZones = new HashMap(); } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); @@ -826,6 +844,42 @@ void addCacheEntry(byte[] clientId, int callId) { } } + private void initializeKeyProvider(final Configuration conf) { + try { + final List providers = KeyProviderFactory.getProviders(conf); + if (providers == null) { + return; + } + + if (providers.size() == 0) { + LOG.info("No KeyProviders found."); + return; + } + + if (providers.size() > 1) { + final String err = + "Multiple KeyProviders found. Only one is permitted."; + LOG.error(err); + throw new RuntimeException(err); + } + provider = providers.get(0); + if (provider.isTransient()) { + final String err = + "A KeyProvider was found but it is a transient provider."; + LOG.error(err); + throw new RuntimeException(err); + } + LOG.info("Found KeyProvider: " + provider.toString()); + } catch (IOException e) { + LOG.error("Exception while initializing KeyProvider", e); + } + } + + @VisibleForTesting + public KeyProvider getProvider() { + return provider; + } + @VisibleForTesting static RetryCache initRetryCache(Configuration conf) { boolean enable = conf.getBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, @@ -2358,7 +2412,7 @@ permissions, true, now())) { throw ie; } } - + /** * Append to an existing file for append. *

@@ -8057,14 +8111,206 @@ AclStatus getAclStatus(String src) throws IOException { } } - void createEncryptionZone(final String src, final String keyId) - throws IOException { + /** + * Create an encryption zone on directory src either using keyIdArg if + * supplied or generating a keyId if it's null. + * + * @param src the path of a directory which will be the root of the + * encryption zone. The directory must be empty. + * + * @param keyIdArg an optional keyId of a key in the configured + * KeyProvider. If this is null, then a a new key is generated. + * + * @throws AccessControlException if the caller is not the superuser. + * + * @throws UnresolvedLinkException if the path can't be resolved. + * + * @throws SafeModeException if the Namenode is in safe mode. + */ + void createEncryptionZone(final String src, String keyIdArg) + throws IOException, UnresolvedLinkException, + SafeModeException, AccessControlException { + final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + + boolean createdKey = false; + String keyId = keyIdArg; + boolean success = false; + try { + if (keyId == null || keyId.isEmpty()) { + keyId = createNewKey(src); + createdKey = true; + } else { + if (provider.getCurrentKey(keyId) == null) { + + /* + * It would be nice if we threw something more specific than + * IOException when the key is not found, but the KeyProvider API + * doesn't provide for that. If that API is ever changed to throw + * something more specific (e.g. UnknownKeyException) then we can + * update this to match it, or better yet, just rethrow the + * KeyProvider's exception. + */ + throw new IOException("Key " + keyId + " doesn't exist."); + } + } + createEncryptionZoneInt(src, keyId, cacheEntry != null); + success = true; + } catch (AccessControlException e) { + logAuditEvent(false, "createEncryptionZone", src); + throw e; + } finally { + RetryCache.setState(cacheEntry, success); + if (!success && createdKey) { + /* Unwind key creation. */ + provider.deleteKey(keyId); + } + } } - void deleteEncryptionZone(final String src) throws IOException { + private void createEncryptionZoneInt(final String srcArg, String keyId, + final boolean logRetryCache) throws IOException { + String src = srcArg; + HdfsFileStatus resultingStat = null; + checkSuperuserPrivilege(); + checkOperation(OperationCategory.WRITE); + final byte[][] pathComponents = + FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkSuperuserPrivilege(); + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot create encryption zone on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + + EncryptionZone ez = getEncryptionZoneForPath(src); + if (ez != null) { + throw new IOException("Directory " + src + + " is already in an encryption zone. (" + ez.getPath() + ")"); + } + + final XAttr keyIdXAttr = dir.createEncryptionZone(src, keyId); + getEditLog().logSetXAttr(src, keyIdXAttr, logRetryCache); + encryptionZones.put(src, new EncryptionZone(src, keyId)); + resultingStat = getAuditFileInfo(src, false); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "createEncryptionZone", src, null, resultingStat); + } + + private String createNewKey(String src) + throws IOException { + final String keyId = UUID.randomUUID().toString(); + // TODO pass in hdfs://HOST:PORT (HDFS-6490) + providerOptions.setDescription(src); + providerOptions.setBitLength(CRYPTO_KEY_SIZE); + try { + provider.createKey(keyId, providerOptions); + } catch (NoSuchAlgorithmException e) { + throw new IOException(e); + } + return keyId; + } + + /** + * Delete the encryption zone on directory src. + * + * @param src the path of a directory which is the root of the encryption + * zone. The directory must be empty and must be marked as an encryption + * zone. + * + * @throws AccessControlException if the caller is not the superuser. + * + * @throws UnresolvedLinkException if the path can't be resolved. + * + * @throws SafeModeException if the Namenode is in safe mode. + */ + void deleteEncryptionZone(final String src) + throws IOException, UnresolvedLinkException, + SafeModeException, AccessControlException { + final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + + boolean success = false; + try { + deleteEncryptionZoneInt(src, cacheEntry != null); + encryptionZones.remove(src); + success = true; + } catch (AccessControlException e) { + logAuditEvent(false, "deleteEncryptionZone", src); + throw e; + } finally { + RetryCache.setState(cacheEntry, success); + } + } + + private void deleteEncryptionZoneInt(final String srcArg, + final boolean logRetryCache) throws IOException { + String src = srcArg; + HdfsFileStatus resultingStat = null; + checkSuperuserPrivilege(); + checkOperation(OperationCategory.WRITE); + final byte[][] pathComponents = + FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkSuperuserPrivilege(); + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot delete encryption zone on " + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + final EncryptionZone ez = encryptionZones.get(src); + if (ez == null) { + throw new IOException("Directory " + src + + " is not the root of an encryption zone."); + } + final XAttr removedXAttr = dir.deleteEncryptionZone(src); + if (removedXAttr != null) { + getEditLog().logRemoveXAttr(src, removedXAttr); + } + encryptionZones.remove(src); + resultingStat = getAuditFileInfo(src, false); + } finally { + writeUnlock(); + } + getEditLog().logSync(); + logAuditEvent(true, "deleteEncryptionZone", src, null, resultingStat); } List listEncryptionZones() throws IOException { + boolean success = false; + checkSuperuserPrivilege(); + checkOperation(OperationCategory.READ); + readLock(); + try { + checkSuperuserPrivilege(); + checkOperation(OperationCategory.READ); + final List ret = + Lists.newArrayList(encryptionZones.values()); + success = true; + return ret; + } finally { + readUnlock(); + logAuditEvent(success, "listEncryptionZones", null); + } + } + + /** Lookup the encryption zone of a path. */ + private EncryptionZone getEncryptionZoneForPath(String src) { + final String[] components = INode.getPathNames(src); + for (int i = components.length; i > 0; i--) { + final List l = Arrays.asList(Arrays.copyOfRange(components, 0, i)); + String p = Joiner.on(Path.SEPARATOR).join(l); + final EncryptionZone ret = encryptionZones.get(p); + if (ret != null) { + return ret; + } + } return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 8f916ce253b..9cdad26eb24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -27,8 +27,6 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -272,9 +270,6 @@ public long getProtocolVersion(String protocol, private NameNodeRpcServer rpcServer; - /* The KeyProvider, if any. */ - private KeyProvider provider = null; - private JvmPauseMonitor pauseMonitor; private ObjectName nameNodeStatusBeanName; /** @@ -586,7 +581,6 @@ protected void initialize(Configuration conf) throws IOException { startHttpServer(conf); } loadNamesystem(conf); - initializeKeyProvider(conf); rpcServer = createRpcServer(conf); if (clientNamenodeAddress == null) { @@ -705,37 +699,6 @@ private void stopHttpServer() { } } - private void initializeKeyProvider(final Configuration conf) { - try { - final List providers = KeyProviderFactory.getProviders(conf); - if (providers == null) { - return; - } - - if (providers.size() == 0) { - LOG.info("No KeyProviders found."); - return; - } - - if (providers.size() > 1) { - final String err = - "Multiple KeyProviders found. Only one is permitted."; - LOG.error(err); - throw new RuntimeException(err); - } - provider = providers.get(0); - if (provider.isTransient()) { - final String err = - "A KeyProvider was found but it is a transient provider."; - LOG.error(err); - throw new RuntimeException(err); - } - LOG.info("Found KeyProvider: " + provider.toString()); - } catch (IOException e) { - LOG.error("Exception while initializing KeyProvider", e); - } - } - /** * Start NameNode. *

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java new file mode 100644 index 00000000000..eb851d4dcb6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java @@ -0,0 +1,404 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.File; +import java.io.IOException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivilegedExceptionAction; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.base.Preconditions; + +import static org.junit.Assert.fail; + +public class TestEncryptionZonesAPI { + + private static final Path TEST_PATH = new Path("/test"); + private static final Path TEST_PATH_WITH_CHILD = new Path(TEST_PATH, "foo"); + private static final Path TEST_PATH_WITH_MULTIPLE_CHILDREN = + new Path(TEST_PATH_WITH_CHILD, "baz"); + private static final String TEST_KEYID = "mykeyid"; + private final Configuration conf = new Configuration(); + private MiniDFSCluster cluster; + private static File tmpDir; + private FileSystem fs; + + @Before + public void setUpCluster() throws IOException { + tmpDir = new File(System.getProperty("test.build.data", "target"), + UUID.randomUUID().toString()).getAbsoluteFile(); + conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + fs = cluster.getFileSystem(); + } + + @After + public void shutDownCluster() { + if (cluster != null) { + cluster.shutdown(); + } + } + + /** Test failure of Create EZ on a directory that doesn't exist. */ + @Test(timeout = 30000) + public void testCreateEncryptionZoneDirectoryDoesntExist() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + try { + dfsAdmin.createEncryptionZone(TEST_PATH, null); + fail("expected /test doesn't exist"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("cannot find", e); + } + } + + /** Test failure of Create EZ on a directory which is already an EZ. */ + @Test(timeout = 30000) + public void testCreateEncryptionZoneWhichAlreadyExists() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + try { + dfsAdmin.createEncryptionZone(TEST_PATH, null); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("already in an encryption zone", + e); + } + } + + /** Test success of Create EZ in which a key is created. */ + @Test(timeout = 30000) + public void testCreateEncryptionZoneAndGenerateKeyDirectoryEmpty() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + } + + /** Test failure of Create EZ operation in an existing EZ. */ + @Test(timeout = 30000) + public void testCreateEncryptionZoneInExistingEncryptionZone() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + FileSystem.mkdirs(fs, TEST_PATH_WITH_CHILD, + new FsPermission((short) 0777)); + try { + dfsAdmin.createEncryptionZone(TEST_PATH_WITH_CHILD, null); + fail("EZ in an EZ"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("already in an encryption zone", e); + } + } + + /** Test failure of creating an EZ using a non-empty directory. */ + @Test(timeout = 30000) + public void testCreateEncryptionZoneAndGenerateKeyDirectoryNotEmpty() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + FileSystem.create(fs, new Path("/test/foo"), + new FsPermission((short) 0777)); + try { + dfsAdmin.createEncryptionZone(TEST_PATH, null); + fail("expected key doesn't exist"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("create an encryption zone", e); + } + } + + /** Test failure of creating an EZ passing a key that doesn't exist. */ + @Test(timeout = 30000) + public void testCreateEncryptionZoneKeyDoesntExist() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + try { + dfsAdmin.createEncryptionZone(TEST_PATH, TEST_KEYID); + fail("expected key doesn't exist"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("doesn't exist.", e); + } + final List zones = dfsAdmin.listEncryptionZones(); + Preconditions.checkState(zones.size() == 0, "More than one zone found?"); + } + + /** Test success of creating an EZ when they key exists. */ + @Test(timeout = 30000) + public void testCreateEncryptionZoneKeyExist() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + createAKey(TEST_KEYID); + dfsAdmin.createEncryptionZone(TEST_PATH, TEST_KEYID); + final List zones = dfsAdmin.listEncryptionZones(); + Preconditions.checkState(zones.size() == 1, "More than one zone found?"); + final EncryptionZone ez = zones.get(0); + GenericTestUtils.assertMatches(ez.toString(), + "EncryptionZone \\[path=/test, keyId="); + } + + /** Helper function to create a key in the Key Provider. */ + private void createAKey(String keyId) + throws NoSuchAlgorithmException, IOException { + KeyProvider provider = + cluster.getNameNode().getNamesystem().getProvider(); + final KeyProvider.Options options = KeyProvider.options(conf); + provider.createKey(keyId, options); + provider.flush(); + } + + /** Test failure of create/delete encryption zones as a non super user. */ + @Test(timeout = 30000) + public void testCreateAndDeleteEncryptionZoneAsNonSuperUser() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] { "mygroup" }); + + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0700)); + + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final HdfsAdmin userAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + try { + userAdmin.createEncryptionZone(TEST_PATH, null); + fail("createEncryptionZone is superuser-only operation"); + } catch (AccessControlException e) { + GenericTestUtils.assertExceptionContains( + "Superuser privilege is required", e); + } + return null; + } + }); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final HdfsAdmin userAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + try { + userAdmin.deleteEncryptionZone(TEST_PATH); + fail("deleteEncryptionZone is superuser-only operation"); + } catch (AccessControlException e) { + GenericTestUtils.assertExceptionContains( + "Superuser privilege is required", e); + } + return null; + } + }); + } + + /** Test failure of deleting an EZ passing a directory that doesn't exist. */ + @Test(timeout = 30000) + public void testDeleteEncryptionZoneDirectoryDoesntExist() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + try { + dfsAdmin.deleteEncryptionZone(TEST_PATH); + fail("Directory doesn't exist"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "is not the root of an encryption zone", e); + } + } + + /** Test failure of deleting an EZ which is not empty. */ + @Test(timeout = 30000) + public void testDeleteEncryptionZoneAndGenerateKeyDirectoryNotEmpty() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + FileSystem.create(fs, new Path("/test/foo"), + new FsPermission((short) 0777)); + try { + dfsAdmin.deleteEncryptionZone(TEST_PATH); + fail("Directory not empty"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("non-empty directory", e); + } + } + + /** Test success of deleting an EZ. */ + @Test(timeout = 30000) + public void testDeleteEncryptionZone() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + List zones = dfsAdmin.listEncryptionZones(); + Preconditions.checkState(zones.size() == 1, "More than one zone found?"); + dfsAdmin.deleteEncryptionZone(TEST_PATH); + zones = dfsAdmin.listEncryptionZones(); + Preconditions.checkState(zones.size() == 0, "More than one zone found?"); + } + + /** + * Test failure of deleting an EZ on a subdir that is not the root of an EZ. + */ + @Test(timeout = 30000) + public void testDeleteEncryptionZoneInExistingEncryptionZone() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + FileSystem.mkdirs(fs, TEST_PATH_WITH_CHILD, new FsPermission((short) 0777)); + try { + dfsAdmin.deleteEncryptionZone(TEST_PATH_WITH_CHILD); + fail("EZ in an EZ"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "is not the root of an encryption zone", e); + } + } + + /** + * Test success of creating and deleting an encryption zone a few levels down. + */ + @Test(timeout = 30000) + public void testCreateAndDeleteEncryptionZoneDownAFewLevels() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + FileSystem.mkdirs(fs, TEST_PATH_WITH_MULTIPLE_CHILDREN, + new FsPermission((short) 0777)); + try { + dfsAdmin.deleteEncryptionZone(TEST_PATH_WITH_MULTIPLE_CHILDREN); + fail("EZ in an EZ"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "is not the root of an encryption zone", e); + } + final List zones = dfsAdmin.listEncryptionZones(); + Preconditions.checkState(zones.size() == 1, "More than one zone found?"); + final EncryptionZone ez = zones.get(0); + GenericTestUtils.assertMatches(ez.toString(), + "EncryptionZone \\[path=/test, keyId="); + } + + /** Test failure of creating an EZ using a non-empty directory. */ + @Test(timeout = 30000) + public void testCreateFileInEncryptionZone() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH, null); + FileSystem.create(fs, TEST_PATH_WITH_CHILD, new FsPermission((short) 0777)); + + final List zones = dfsAdmin.listEncryptionZones(); + final EncryptionZone ez = zones.get(0); + GenericTestUtils.assertMatches(ez.toString(), + "EncryptionZone \\[path=/test, keyId="); + } + + /** Test listing encryption zones. */ + @Test(timeout = 30000) + public void testListEncryptionZones() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + final int N_EZs = 5; + final Set ezPathNames = new HashSet(N_EZs); + for (int i = 0; i < N_EZs; i++) { + final Path p = new Path(TEST_PATH, "" + i); + ezPathNames.add(p.toString()); + FileSystem.mkdirs(fs, p, new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(p, null); + } + + final List zones = dfsAdmin.listEncryptionZones(); + Preconditions.checkState(zones.size() == N_EZs, "wrong number of EZs returned"); + for (EncryptionZone z : zones) { + final String ezPathName = z.getPath(); + Preconditions.checkState(ezPathNames.remove( + ezPathName), "Path " + ezPathName + " not returned from listEZ"); + } + Preconditions.checkState(ezPathNames.size() == 0); + } + + /** Test listing encryption zones as a non super user. */ + @Test(timeout = 30000) + public void testListEncryptionZonesAsNonSuperUser() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + + final Path TEST_PATH_SUPERUSER_ONLY = new Path(TEST_PATH, "superuseronly"); + final Path TEST_PATH_ALL = new Path(TEST_PATH, "accessall"); + + FileSystem.mkdirs(fs, TEST_PATH_SUPERUSER_ONLY, + new FsPermission((short) 0700)); + dfsAdmin.createEncryptionZone(TEST_PATH_SUPERUSER_ONLY, null); + FileSystem.mkdirs(fs, TEST_PATH_ALL, + new FsPermission((short) 0707)); + dfsAdmin.createEncryptionZone(TEST_PATH_ALL, null); + + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final HdfsAdmin userAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + try { + final List zones = userAdmin.listEncryptionZones(); + } catch (AccessControlException e) { + GenericTestUtils.assertExceptionContains( + "Superuser privilege is required", e); + } + return null; + } + }); + } +} From f43f0999d9b43ac0f72ffde616dad4e60ff01cd9 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Sun, 22 Jun 2014 07:20:47 +0000 Subject: [PATCH 015/354] HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[]. (wang via yliu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1604537 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 +++ .../main/java/org/apache/hadoop/crypto/CryptoCodec.java | 9 +++++---- .../org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java | 6 ++---- .../java/org/apache/hadoop/crypto/TestCryptoCodec.java | 6 ++++-- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 4b8e982cfb3..8983f8e73cb 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -25,6 +25,9 @@ fs-encryption (Unreleased) HADOOP-10662. NullPointerException in CryptoInputStream while wrapped stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu) + HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[]. + (wang via yliu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index 277246c087b..b166423db0e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -81,9 +81,10 @@ public static CryptoCodec getInstance(Configuration conf) { public abstract void calculateIV(byte[] initIV, long counter, byte[] IV); /** - * Generate secure random. - * @param bytes length of the secure random - * @return byte[] the secure random + * Generate a number of secure, random bytes suitable for cryptographic use. + * This method needs to be thread-safe. + * + * @param bytes byte array to populate with random data */ - public abstract byte[] generateSecureRandom(int bytes); + public abstract void generateSecureRandom(byte[] bytes); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java index 22a036a5602..a2eeea48c94 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java @@ -79,10 +79,8 @@ public Decryptor createDecryptor() throws GeneralSecurityException { } @Override - public byte[] generateSecureRandom(int bytes) { - final byte[] data = new byte[bytes]; - random.nextBytes(data); - return data; + public void generateSecureRandom(byte[] bytes) { + random.nextBytes(bytes); } private static class JCEAESCTRCipher implements Encryptor, Decryptor { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java index 8213ad8a77b..f4a34a18549 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java @@ -49,8 +49,10 @@ public void testSecureRandom() throws Exception { } private void checkSecureRandom(int len) { - byte[] rand = codec.generateSecureRandom(len); - byte[] rand1 = codec.generateSecureRandom(len); + byte[] rand = new byte[len]; + byte[] rand1 = new byte[len]; + codec.generateSecureRandom(rand); + codec.generateSecureRandom(rand1); Assert.assertEquals(len, rand.length); Assert.assertEquals(len, rand1.length); From 26763657999765d08cc2de609d5f7860dfd14137 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Wed, 25 Jun 2014 17:42:03 +0000 Subject: [PATCH 016/354] HDFS-6387. HDFS CLI admin tool for creating & deleting an encryption zone. (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1605518 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 + .../hadoop-hdfs/src/main/bin/hdfs | 3 + .../apache/hadoop/hdfs/tools/CryptoAdmin.java | 344 ++++++++++++++++++ .../org/apache/hadoop/cli/TestCryptoCLI.java | 169 +++++++++ .../cli/util/CLICommandCryptoAdmin.java | 21 ++ .../cli/util/CryptoAdminCmdExecutor.java | 37 ++ .../src/test/resources/testCryptoConf.xml | 266 ++++++++++++++ 7 files changed, 843 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index c02b8c674dc..91b0a082a54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -10,6 +10,9 @@ fs-encryption (Unreleased) IMPROVEMENTS + HDFS-6387. HDFS CLI admin tool for creating & deleting an + encryption zone. (clamb) + HDFS-6386. HDFS Encryption Zones (clamb) HDFS-6473. Protocol and API for Encryption Zones (clamb) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index fa2d863a75c..5fbb3db1459 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -61,6 +61,7 @@ function print_usage(){ echo " portmap run a portmap service" echo " nfs3 run an NFS version 3 gateway" echo " cacheadmin configure the HDFS cache" + echo " crypto configure HDFS encryption zones" echo "" echo "Most commands print help when invoked w/o parameters." } @@ -184,6 +185,8 @@ elif [ "$COMMAND" = "nfs3" ] ; then HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NFS3_OPTS" elif [ "$COMMAND" = "cacheadmin" ] ; then CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin +elif [ "$COMMAND" = "crypto" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin else CLASS="$COMMAND" fi diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java new file mode 100644 index 00000000000..9347d4de5d9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java @@ -0,0 +1,344 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.tools.TableListing; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Tool; + +import com.google.common.base.Joiner; + +/** + * This class implements crypto command-line operations. + */ +@InterfaceAudience.Private +public class CryptoAdmin extends Configured implements Tool { + + /** + * Maximum length for printed lines + */ + private static final int MAX_LINE_WIDTH = 80; + + public CryptoAdmin() { + this(null); + } + + public CryptoAdmin(Configuration conf) { + super(conf); + } + + @Override + public int run(String[] args) throws IOException { + if (args.length == 0) { + printUsage(false); + return 1; + } + final Command command = determineCommand(args[0]); + if (command == null) { + System.err.println("Can't understand command '" + args[0] + "'"); + if (!args[0].startsWith("-")) { + System.err.println("Command names must start with dashes."); + } + printUsage(false); + return 1; + } + final List argsList = new LinkedList(); + for (int j = 1; j < args.length; j++) { + argsList.add(args[j]); + } + try { + return command.run(getConf(), argsList); + } catch (IllegalArgumentException e) { + System.err.println(prettifyException(e)); + return -1; + } + } + + public static void main(String[] argsArray) throws IOException { + final CryptoAdmin cryptoAdmin = new CryptoAdmin(new Configuration()); + System.exit(cryptoAdmin.run(argsArray)); + } + + private static DistributedFileSystem getDFS(Configuration conf) + throws IOException { + final FileSystem fs = FileSystem.get(conf); + if (!(fs instanceof DistributedFileSystem)) { + throw new IllegalArgumentException("FileSystem " + fs.getUri() + + " is not an HDFS file system"); + } + return (DistributedFileSystem) fs; + } + + /** + * NN exceptions contain the stack trace as part of the exception message. + * When it's a known error, pretty-print the error and squish the stack trace. + */ + private static String prettifyException(Exception e) { + return e.getClass().getSimpleName() + ": " + + e.getLocalizedMessage().split("\n")[0]; + } + + private static TableListing getOptionDescriptionListing() { + final TableListing listing = new TableListing.Builder() + .addField("").addField("", true) + .wrapWidth(MAX_LINE_WIDTH).hideHeaders().build(); + return listing; + } + + interface Command { + String getName(); + String getShortUsage(); + String getLongUsage(); + int run(Configuration conf, List args) throws IOException; + } + + private static class CreateZoneCommand implements Command { + @Override + public String getName() { + return "-createZone"; + } + + @Override + public String getShortUsage() { + return "[" + getName() + " [-keyId ] -path " + "]\n"; + } + + @Override + public String getLongUsage() { + final TableListing listing = getOptionDescriptionListing(); + listing.addRow("", "The path of the encryption zone to create. " + + "It must be an empty directory."); + listing.addRow("", "The keyId of the new encryption zone."); + return getShortUsage() + "\n" + + "Create a new encryption zone.\n\n" + + listing.toString(); + } + + @Override + public int run(Configuration conf, List args) throws IOException { + final String path = StringUtils.popOptionWithArgument("-path", args); + if (path == null) { + System.err.println("You must specify a path with -path."); + return 1; + } + + final String keyId = + StringUtils.popOptionWithArgument("-keyId", args); + + if (!args.isEmpty()) { + System.err.println("Can't understand argument: " + args.get(0)); + return 1; + } + + final DistributedFileSystem dfs = getDFS(conf); + try { + dfs.createEncryptionZone(new Path(path), keyId); + System.out.println("Added encryption zone " + path); + } catch (IOException e) { + System.err.println(prettifyException(e)); + return 2; + } + + return 0; + } + } + + private static class DeleteZoneCommand implements Command { + @Override + public String getName() { + return "-deleteZone"; + } + + @Override + public String getShortUsage() { + return "[" + getName() + " -path " + "]\n"; + } + + @Override + public String getLongUsage() { + final TableListing listing = getOptionDescriptionListing(); + listing.addRow("", "The path of the encryption zone to delete. " + + "It must be an empty directory and an existing encryption zone."); + return getShortUsage() + "\n" + + "Delete an encryption zone.\n\n" + + listing.toString(); + } + + @Override + public int run(Configuration conf, List args) throws IOException { + final String path = StringUtils.popOptionWithArgument("-path", args); + if (path == null) { + System.err.println("You must specify a path with -path."); + return 1; + } + + if (!args.isEmpty()) { + System.err.println("Can't understand argument: " + args.get(0)); + return 1; + } + + final DistributedFileSystem dfs = getDFS(conf); + try { + dfs.deleteEncryptionZone(new Path(path)); + System.out.println("Deleted encryption zone " + path); + } catch (IOException e) { + System.err.println(prettifyException(e)); + return 2; + } + + return 0; + } + } + + private static class ListZonesCommand implements Command { + @Override + public String getName() { + return "-listZones"; + } + + @Override + public String getShortUsage() { + return "[" + getName()+ "]\n"; + } + + @Override + public String getLongUsage() { + return getShortUsage() + "\n" + + "List all encryption zones.\n\n"; + } + + @Override + public int run(Configuration conf, List args) throws IOException { + if (!args.isEmpty()) { + System.err.println("Can't understand argument: " + args.get(0)); + return 1; + } + + final DistributedFileSystem dfs = getDFS(conf); + try { + final TableListing listing = new TableListing.Builder() + .addField("").addField("", true) + .wrapWidth(MAX_LINE_WIDTH).hideHeaders().build(); + final List ezs = dfs.listEncryptionZones(); + for (EncryptionZone ez : ezs) { + listing.addRow(ez.getPath(), ez.getKeyId()); + } + System.out.println(listing.toString()); + } catch (IOException e) { + System.err.println(prettifyException(e)); + return 2; + } + + return 0; + } + } + + private static class HelpCommand implements Command { + @Override + public String getName() { + return "-help"; + } + + @Override + public String getShortUsage() { + return "[-help ]\n"; + } + + @Override + public String getLongUsage() { + final TableListing listing = getOptionDescriptionListing(); + listing.addRow("", "The command for which to get " + + "detailed help. If no command is specified, print detailed help for " + + "all commands"); + return getShortUsage() + "\n" + + "Get detailed help about a command.\n\n" + + listing.toString(); + } + + @Override + public int run(Configuration conf, List args) throws IOException { + if (args.size() == 0) { + for (Command command : COMMANDS) { + System.err.println(command.getLongUsage()); + } + return 0; + } + if (args.size() != 1) { + System.out.println("You must give exactly one argument to -help."); + return 0; + } + final String commandName = args.get(0); + // prepend a dash to match against the command names + final Command command = determineCommand("-"+commandName); + if (command == null) { + System.err.print("Sorry, I don't know the command '" + + commandName + "'.\n"); + System.err.print("Valid help command names are:\n"); + String separator = ""; + for (Command c : COMMANDS) { + System.err.print(separator + c.getName().substring(1)); + separator = ", "; + } + System.err.print("\n"); + return 1; + } + System.err.print(command.getLongUsage()); + return 0; + } + } + + private static final Command[] COMMANDS = { + new CreateZoneCommand(), + new DeleteZoneCommand(), + new ListZonesCommand(), + new HelpCommand(), + }; + + private static void printUsage(boolean longUsage) { + System.err.println( + "Usage: bin/hdfs crypto [COMMAND]"); + for (Command command : COMMANDS) { + if (longUsage) { + System.err.print(command.getLongUsage()); + } else { + System.err.print(" " + command.getShortUsage()); + } + } + System.err.println(); + } + + private static Command determineCommand(String commandName) { + for (int i = 0; i < COMMANDS.length; i++) { + if (COMMANDS[i].getName().equals(commandName)) { + return COMMANDS[i]; + } + } + return null; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java new file mode 100644 index 00000000000..32ba055caa1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli; + +import java.io.File; +import java.io.IOException; +import java.security.NoSuchAlgorithmException; +import java.util.UUID; + +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.cli.util.CLICommand; +import org.apache.hadoop.cli.util.CLICommandCryptoAdmin; +import org.apache.hadoop.cli.util.CLICommandTypes; +import org.apache.hadoop.cli.util.CLITestCmd; +import org.apache.hadoop.cli.util.CryptoAdminCmdExecutor; +import org.apache.hadoop.cli.util.CommandExecutor; +import org.apache.hadoop.cli.util.CommandExecutor.Result; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.tools.CryptoAdmin; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.xml.sax.SAXException; + +public class TestCryptoCLI extends CLITestHelperDFS { + protected MiniDFSCluster dfsCluster = null; + protected FileSystem fs = null; + protected String namenode = null; + private static File tmpDir; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, + HDFSPolicyProvider.class, PolicyProvider.class); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + + tmpDir = new File(System.getProperty("test.build.data", "target"), + UUID.randomUUID().toString()).getAbsoluteFile(); + conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); + + dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + dfsCluster.waitClusterUp(); + createAKey("mykey", conf); + namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); + + username = System.getProperty("user.name"); + + fs = dfsCluster.getFileSystem(); + assertTrue("Not an HDFS: " + fs.getUri(), + fs instanceof DistributedFileSystem); + } + + @After + @Override + public void tearDown() throws Exception { + if (fs != null) { + fs.close(); + } + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + Thread.sleep(2000); + super.tearDown(); + } + + /* Helper function to create a key in the Key Provider. */ + private void createAKey(String keyId, Configuration conf) + throws NoSuchAlgorithmException, IOException { + final KeyProvider provider = + dfsCluster.getNameNode().getNamesystem().getProvider(); + final KeyProvider.Options options = KeyProvider.options(conf); + provider.createKey(keyId, options); + provider.flush(); + } + + @Override + protected String getTestFile() { + return "testCryptoConf.xml"; + } + + @Override + protected String expandCommand(final String cmd) { + String expCmd = cmd; + expCmd = expCmd.replaceAll("NAMENODE", namenode); + expCmd = expCmd.replaceAll("#LF#", + System.getProperty("line.separator")); + expCmd = super.expandCommand(expCmd); + return expCmd; + } + + @Override + protected TestConfigFileParser getConfigParser() { + return new TestConfigFileParserCryptoAdmin(); + } + + private class TestConfigFileParserCryptoAdmin extends + CLITestHelper.TestConfigFileParser { + @Override + public void endElement(String uri, String localName, String qName) + throws SAXException { + if (qName.equals("crypto-admin-command")) { + if (testCommands != null) { + testCommands.add(new CLITestCmdCryptoAdmin(charString, + new CLICommandCryptoAdmin())); + } else if (cleanupCommands != null) { + cleanupCommands.add(new CLITestCmdCryptoAdmin(charString, + new CLICommandCryptoAdmin())); + } + } else { + super.endElement(uri, localName, qName); + } + } + } + + private class CLITestCmdCryptoAdmin extends CLITestCmd { + public CLITestCmdCryptoAdmin(String str, CLICommandTypes type) { + super(str, type); + } + + @Override + public CommandExecutor getExecutor(String tag) + throws IllegalArgumentException { + if (getType() instanceof CLICommandCryptoAdmin) { + return new CryptoAdminCmdExecutor(tag, new CryptoAdmin(conf)); + } + return super.getExecutor(tag); + } + } + + @Override + protected Result execute(CLICommand cmd) throws Exception { + return cmd.getExecutor(namenode).executeCommand(cmd.getCmd()); + } + + @Test + @Override + public void testAll () { + super.testAll(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java new file mode 100644 index 00000000000..89d28a7b502 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.cli.util; + +public class CLICommandCryptoAdmin implements CLICommandTypes { +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java new file mode 100644 index 00000000000..f781bf8d877 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.cli.util; + +import org.apache.hadoop.hdfs.tools.CryptoAdmin; +import org.apache.hadoop.util.ToolRunner; + +public class CryptoAdminCmdExecutor extends CommandExecutor { + protected String namenode = null; + protected CryptoAdmin admin = null; + + public CryptoAdminCmdExecutor(String namenode, CryptoAdmin admin) { + this.namenode = namenode; + this.admin = admin; + } + + @Override + protected void execute(final String cmd) throws Exception { + String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode); + ToolRunner.run(admin, args); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml new file mode 100644 index 00000000000..82b17ffda18 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml @@ -0,0 +1,266 @@ + + + + + + + + test + + + + + + Test basic usage + + + + + + + + SubstringComparator + Usage: bin/hdfs crypto [COMMAND] + + + + + + Test create ez, dir doesn't exist + + -fs NAMENODE -ls /test- + -createZone -path /test + + + + + + SubstringComparator + cannot find /test + + + + + + Test failure of create ez on an existing ez + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -ls /- + -createZone -path /foo + -createZone -path /foo + + + -deleteZone -path /foo + -fs NAMENODE -rmdir /foo + + + + SubstringComparator + Directory /foo is already in an encryption zone + + + + + + Test success of create ez in which a key is created + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -ls /- + -createZone -path /foo + + + -deleteZone -path /foo + -fs NAMENODE -rmdir /foo + + + + SubstringComparator + Added encryption zone /foo + + + + + + Test failure of Create EZ operation in an existing EZ. + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -ls /- + -createZone -path /foo + -fs NAMENODE -mkdir /foo/bar + -createZone -path /foo/bar + + + -fs NAMENODE -rmdir /foo/bar + -deleteZone -path /foo + -fs NAMENODE -rmdir /foo + + + + SubstringComparator + Directory /foo/bar is already in an encryption zone. (/foo) + + + + + + Test failure of creating an EZ using a non-empty directory. + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -touchz /foo/bar + -fs NAMENODE -ls /- + -createZone -path /foo + + + -fs NAMENODE -rm /foo/bar + -fs NAMENODE -rmdir /foo + + + + SubstringComparator + Attempt to create an encryption zone for a non-empty directory. + + + + + + Test failure of creating an EZ passing a key that doesn't exist. + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -ls /- + -createZone -path /foo -keyId doesntexist + + + -fs NAMENODE -rmdir /foo + + + + SubstringComparator + Key doesntexist doesn't exist. + + + + + + Test success of creating an EZ when the key exists. + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -ls /- + -createZone -path /foo -keyId mykey + + + -deleteZone -path /foo + -fs NAMENODE -rmdir /foo + + + + SubstringComparator + Added encryption zone /foo + + + + + + Test failure of deleting an EZ passing a directory that doesn't exist. + + -fs NAMENODE -ls /- + -deleteZone -path /foo + + + + + + SubstringComparator + Directory /foo is not the root of an encryption zone. + + + + + + Test failure of deleting an EZ which is not empty. + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -ls /- + -createZone -path /foo + -fs NAMENODE -touchz /foo/bar + -deleteZone -path /foo + + + -fs NAMENODE -rm /foo/bar + -deleteZone -path /foo + -fs NAMENODE -rmdir /foo + + + + SubstringComparator + Attempt to delete an encryption zone for a non-empty directory. + + + + + + Test failure of deleting an EZ on a subdir that is not the root of an EZ. + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -ls /- + -createZone -path /foo + -fs NAMENODE -mkdir /foo/bar + -deleteZone -path /foo/bar + + + -fs NAMENODE -rmdir /foo/bar + -deleteZone -path /foo + -fs NAMENODE -rmdir /foo + + + + SubstringComparator + Directory /foo/bar is not the root of an encryption zone. + + + + + + Test success of creating and deleting an encryption zone a few levels down. + + -fs NAMENODE -mkdir /foo + -fs NAMENODE -mkdir /foo/bar + -fs NAMENODE -mkdir /foo/bar/baz + -fs NAMENODE -ls /- + -createZone -path /foo/bar/baz + -deleteZone -path /foo/bar/baz + + + -fs NAMENODE -rmdir /foo/bar/baz + -fs NAMENODE -rmdir /foo/bar + -fs NAMENODE -rmdir /foo/ + + + + SubstringComparator + Deleted encryption zone /foo/bar/baz + + + + + From 6023da8ef92f56d919f4b0638eb87efaa7ed4036 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Thu, 26 Jun 2014 19:18:09 +0000 Subject: [PATCH 017/354] Removing two zero java files git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1605881 13f79535-47bb-0310-9956-ffa450edef68 --- .../main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java | 0 .../main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java | 0 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRDecryptor.java deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTREncryptor.java deleted file mode 100644 index e69de29bb2d..00000000000 From 2efea952139b30dd1c881eed0b443ffa72be6dce Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 27 Jun 2014 20:43:41 +0000 Subject: [PATCH 018/354] HDFS-6391. Get the Key/IV from the NameNode for encrypted files in DFSClient. Contributed by Charles Lamb and Andrew Wang. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1606220 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/crypto/CipherSuite.java | 62 +++++++++++ .../apache/hadoop/fs/FileEncryptionInfo.java | 83 ++++++++++++++ .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 + .../main/java/org/apache/hadoop/fs/Hdfs.java | 41 +------ .../org/apache/hadoop/hdfs/DFSClient.java | 57 +++++++++- .../apache/hadoop/hdfs/DFSInputStream.java | 26 ++--- .../apache/hadoop/hdfs/DFSOutputStream.java | 29 ++--- .../hadoop/hdfs/DistributedFileSystem.java | 32 +++--- .../hadoop/hdfs/protocol/HdfsConstants.java | 9 -- .../hadoop/hdfs/protocol/HdfsFileStatus.java | 27 ++--- .../hdfs/protocol/HdfsLocatedFileStatus.java | 6 +- .../hadoop/hdfs/protocol/LocatedBlocks.java | 22 ++-- .../SnapshottableDirectoryStatus.java | 2 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 71 +++++++++--- .../server/blockmanagement/BlockManager.java | 11 +- .../server/common/HdfsServerConstants.java | 5 + .../hdfs/server/namenode/FSDirectory.java | 101 ++++++++++++++---- .../hdfs/server/namenode/FSNamesystem.java | 19 ++-- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 4 +- .../hadoop-hdfs/src/main/proto/hdfs.proto | 20 ++-- .../hadoop/hdfs/TestDFSClientRetries.java | 6 +- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 2 +- .../org/apache/hadoop/hdfs/TestLease.java | 4 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 2 +- .../apache/hadoop/hdfs/web/TestJsonUtil.java | 2 +- 25 files changed, 442 insertions(+), 204 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java new file mode 100644 index 00000000000..6363bdb5807 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.crypto; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Defines properties of a CipherSuite. Modeled after the ciphers in + * {@link javax.crypto.Cipher}. + */ +@InterfaceAudience.Private +public enum CipherSuite { + AES_CTR_NOPADDING("AES/CTR/NoPadding", 128); + + private final String name; + private final int blockBits; + + CipherSuite(String name, int blockBits) { + this.name = name; + this.blockBits = blockBits; + } + + /** + * @return name of cipher suite, as in {@link javax.crypto.Cipher} + */ + public String getName() { + return name; + } + + /** + * @return size of an algorithm block in bits + */ + public int getNumberBlockBits() { + return blockBits; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("{"); + builder.append("name: " + getName() + ", "); + builder.append("numBlockBits: " + getNumberBlockBits()); + builder.append("}"); + return builder.toString(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java new file mode 100644 index 00000000000..53f35bde97f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import org.apache.commons.codec.binary.Hex; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.crypto.CipherSuite; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * FileEncryptionInfo encapsulates all the encryption-related information for + * an encrypted file. + */ +@InterfaceAudience.Private +public class FileEncryptionInfo { + + private final CipherSuite cipherSuite; + private final byte[] key; + private final byte[] iv; + + public FileEncryptionInfo(CipherSuite suite, byte[] key, byte[] iv) { + checkNotNull(suite); + checkNotNull(key); + checkNotNull(iv); + checkArgument(key.length == suite.getNumberBlockBits() / 8, + "Unexpected key length"); + checkArgument(iv.length == suite.getNumberBlockBits() / 8, + "Unexpected IV length"); + this.cipherSuite = suite; + this.key = key; + this.iv = iv; + } + + /** + * @return {@link org.apache.hadoop.crypto.CipherSuite} used to encrypt + * the file. + */ + public CipherSuite getCipherSuite() { + return cipherSuite; + } + + /** + * @return encrypted data encryption key for the file + */ + public byte[] getEncryptedDataEncryptionKey() { + return key; + } + + /** + * @return initialization vector for the cipher used to encrypt the file + */ + public byte[] getIV() { + return iv; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("{"); + builder.append("cipherSuite: " + cipherSuite); + builder.append(", key: " + Hex.encodeHexString(key)); + builder.append(", iv: " + Hex.encodeHexString(iv)); + builder.append("}"); + return builder.toString(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 91b0a082a54..d539ef1e1a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -23,6 +23,9 @@ fs-encryption (Unreleased) HDFS-6476. Print out the KeyProvider after finding KP successfully on startup. (Juan Yu via wang) + HDFS-6391. Get the Key/IV from the NameNode for encrypted files in + DFSClient. (Charles Lamb and wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 23a954c0f39..e779cb51d01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -31,8 +31,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoCodec; -import org.apache.hadoop.crypto.CryptoOutputStream; -import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -57,8 +55,6 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.util.Progressable; -import com.google.common.base.Preconditions; - @InterfaceAudience.Private @InterfaceStability.Evolving public class Hdfs extends AbstractFileSystem { @@ -108,23 +104,8 @@ public HdfsDataOutputStream createInternal(Path f, final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f), absolutePermission, createFlag, createParent, replication, blockSize, progress, bufferSize, checksumOpt); - final byte[] key = dfsos.getKey(); - final byte[] iv = dfsos.getIv(); - Preconditions.checkState(!(key == null ^ iv == null), - "Only one of the Key and IV were found."); - if (false && key != null) { - - /* - * The Key and IV were found. Wrap up the output stream with an encryption - * wrapper. - */ - final CryptoOutputStream cbos = - new CryptoOutputStream(dfsos, factory, key, iv); - return new HdfsDataOutputStream(cbos, getStatistics()); - } else { - /* No key/IV present so no encryption. */ - return new HdfsDataOutputStream(dfsos, getStatistics()); - } + return dfs.createWrappedOutputStream(dfsos, statistics, + dfsos.getInitialLen()); } @Override @@ -335,23 +316,7 @@ public HdfsDataInputStream open(Path f, int bufferSize) throws IOException, UnresolvedLinkException { final DFSInputStream dfsis = dfs.open(getUriPath(f), bufferSize, verifyChecksum); - final byte[] key = dfsis.getKey(); - final byte[] iv = dfsis.getIv(); - Preconditions.checkState(!(key == null ^ iv == null), - "Only one of the Key and IV were found."); - if (false && key != null) { - - /* - * The Key and IV were found. Wrap up the input stream with an encryption - * wrapper. - */ - final CryptoInputStream cbis = - new CryptoInputStream(dfsis, factory, key, iv); - return new HdfsDataInputStream(cbis); - } else { - /* No key/IV pair so no encryption. */ - return new HdfsDataInputStream(dfsis); - } + return dfs.createWrappedInputStream(dfsis); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index bf8cf2fb20c..013fbc0f4f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -94,6 +94,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoCodec; +import org.apache.hadoop.crypto.CryptoInputStream; +import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStorageLocation; import org.apache.hadoop.fs.CacheFlag; @@ -101,6 +104,7 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsStatus; @@ -241,6 +245,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory { private static final DFSHedgedReadMetrics HEDGED_READ_METRIC = new DFSHedgedReadMetrics(); private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL; + private final CryptoCodec codec; /** * DFSClient configuration @@ -573,6 +578,7 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority(); this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); + this.codec = CryptoCodec.getInstance(conf); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, @@ -1267,7 +1273,54 @@ public BlockStorageLocation[] getBlockStorageLocations( return volumeBlockLocations; } - + + /** + * Wraps the stream in a CryptoInputStream if the underlying file is + * encrypted. + */ + public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis) + throws IOException { + final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo(); + if (feInfo != null) { + // File is encrypted, wrap the stream in a crypto stream. + final CryptoInputStream cryptoIn = + new CryptoInputStream(dfsis, codec, + feInfo.getEncryptedDataEncryptionKey(), feInfo.getIV()); + return new HdfsDataInputStream(cryptoIn); + } else { + // No key/IV pair so no encryption. + return new HdfsDataInputStream(dfsis); + } + } + + /** + * Wraps the stream in a CryptoOutputStream if the underlying file is + * encrypted. + */ + public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos, + FileSystem.Statistics statistics) throws IOException { + return createWrappedOutputStream(dfsos, statistics, 0); + } + + /** + * Wraps the stream in a CryptoOutputStream if the underlying file is + * encrypted. + */ + public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos, + FileSystem.Statistics statistics, long startPos) throws IOException { + final FileEncryptionInfo feInfo = dfsos.getFileEncryptionInfo(); + if (feInfo != null) { + // File is encrypted, wrap the stream in a crypto stream. + final CryptoOutputStream cryptoOut = + new CryptoOutputStream(dfsos, codec, + feInfo.getEncryptedDataEncryptionKey(), feInfo.getIV(), startPos); + return new HdfsDataOutputStream(cryptoOut, statistics, startPos); + } else { + // No key/IV present so no encryption. + return new HdfsDataOutputStream(dfsos, statistics, startPos); + } + } + public DFSInputStream open(String src) throws IOException, UnresolvedLinkException { return open(src, dfsClientConf.ioBufferSize, true, null); @@ -1595,7 +1648,7 @@ public HdfsDataOutputStream append(final String src, final int buffersize, final Progressable progress, final FileSystem.Statistics statistics ) throws IOException { final DFSOutputStream out = append(src, buffersize, progress); - return new HdfsDataOutputStream(out, statistics, out.getInitialLen()); + return createWrappedOutputStream(out, statistics, out.getInitialLen()); } private DFSOutputStream append(String src, int buffersize, Progressable progress) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index f9763eced1a..46070ffb890 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; @@ -88,8 +89,7 @@ public class DFSInputStream extends FSInputStream private final boolean verifyChecksum; private LocatedBlocks locatedBlocks = null; private long lastBlockBeingWrittenLength = 0; - private byte[] key = null; - private byte[] iv = null; + private FileEncryptionInfo fileEncryptionInfo = null; private DatanodeInfo currentNode = null; private LocatedBlock currentLocatedBlock = null; private long pos = 0; @@ -299,8 +299,8 @@ private long fetchLocatedBlocksAndGetLastBlockLength() throws IOException { } } - key = locatedBlocks.getKey(); - iv = locatedBlocks.getIv(); + fileEncryptionInfo = locatedBlocks.getFileEncryptionInfo(); + currentNode = null; return lastBlockBeingWrittenLength; } @@ -1521,22 +1521,8 @@ public synchronized ReadStatistics getReadStatistics() { return new ReadStatistics(readStatistics); } - /** - * Get the encryption key for this stream. - * - * @return byte[] the key - */ - public synchronized byte[] getKey() { - return key; - } - - /** - * Get the encryption initialization vector (IV) for this stream. - * - * @return byte[] the initialization vector (IV). - */ - public synchronized byte[] getIv() { - return iv; + public synchronized FileEncryptionInfo getFileEncryptionInfo() { + return fileEncryptionInfo; } private synchronized void closeCurrentBlockReader() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index dde4bad7ef2..508e3bd4f66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.permission.FsPermission; @@ -154,9 +155,8 @@ public class DFSOutputStream extends FSOutputSummer private boolean shouldSyncBlock = false; // force blocks to disk upon close private final AtomicReference cachingStrategy; private boolean failPacket = false; - private byte[] key = null; - private byte[] iv = null; - + private FileEncryptionInfo fileEncryptionInfo; + private static class Packet { private static final long HEART_BEAT_SEQNO = -1L; final long seqno; // sequencenumber of buffer in block @@ -1564,8 +1564,7 @@ private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress, this.fileId = stat.getFileId(); this.blockSize = stat.getBlockSize(); this.blockReplication = stat.getReplication(); - this.key = stat.getKey(); - this.iv = stat.getIv(); + this.fileEncryptionInfo = stat.getFileEncryptionInfo(); this.progress = progress; this.cachingStrategy = new AtomicReference( dfsClient.getDefaultWriteCachingStrategy()); @@ -1654,6 +1653,7 @@ private DFSOutputStream(DFSClient dfsClient, String src, checksum.getBytesPerChecksum()); streamer = new DataStreamer(); } + this.fileEncryptionInfo = stat.getFileEncryptionInfo(); } static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src, @@ -2178,26 +2178,15 @@ synchronized void setTestFilename(String newname) { /** * Returns the size of a file as it was when this stream was opened */ - long getInitialLen() { + public long getInitialLen() { return initialFileSize; } /** - * Get the encryption key for this stream. - * - * @return byte[] the key. + * @return the FileEncryptionInfo for this stream, or null if not encrypted. */ - public byte[] getKey() { - return key; - } - - /** - * Get the encryption initialization vector (IV) for this stream. - * - * @return byte[] the initialization vector (IV). - */ - public byte[] getIv() { - return iv; + public FileEncryptionInfo getFileEncryptionInfo() { + return fileEncryptionInfo; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 0adb9202cfc..1abf85bb79b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -60,7 +60,6 @@ import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsAdmin; -import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -291,8 +290,9 @@ public FSDataInputStream open(Path f, final int bufferSize) @Override public FSDataInputStream doCall(final Path p) throws IOException, UnresolvedLinkException { - return new HdfsDataInputStream( - dfs.open(getPathName(p), bufferSize, verifyChecksum)); + final DFSInputStream dfsis = + dfs.open(getPathName(p), bufferSize, verifyChecksum); + return dfs.createWrappedInputStream(dfsis); } @Override public FSDataInputStream next(final FileSystem fs, final Path p) @@ -357,7 +357,7 @@ public HdfsDataOutputStream doCall(final Path p) : EnumSet.of(CreateFlag.CREATE), true, replication, blockSize, progress, bufferSize, null, favoredNodes); - return new HdfsDataOutputStream(out, statistics); + return dfs.createWrappedOutputStream(out, statistics); } @Override public HdfsDataOutputStream next(final FileSystem fs, final Path p) @@ -385,9 +385,10 @@ public FSDataOutputStream create(final Path f, final FsPermission permission, @Override public FSDataOutputStream doCall(final Path p) throws IOException, UnresolvedLinkException { - return new HdfsDataOutputStream(dfs.create(getPathName(p), permission, - cflags, replication, blockSize, progress, bufferSize, checksumOpt), - statistics); + final DFSOutputStream dfsos = dfs.create(getPathName(p), permission, + cflags, replication, blockSize, progress, bufferSize, + checksumOpt); + return dfs.createWrappedOutputStream(dfsos, statistics); } @Override public FSDataOutputStream next(final FileSystem fs, final Path p) @@ -404,11 +405,12 @@ protected HdfsDataOutputStream primitiveCreate(Path f, short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt) throws IOException { statistics.incrementWriteOps(1); - return new HdfsDataOutputStream(dfs.primitiveCreate( - getPathName(fixRelativePart(f)), - absolutePermission, flag, true, replication, blockSize, - progress, bufferSize, checksumOpt),statistics); - } + final DFSOutputStream dfsos = dfs.primitiveCreate( + getPathName(fixRelativePart(f)), + absolutePermission, flag, true, replication, blockSize, + progress, bufferSize, checksumOpt); + return dfs.createWrappedOutputStream(dfsos, statistics); + } /** * Same as create(), except fails if parent directory doesn't already exist. @@ -428,9 +430,9 @@ public FSDataOutputStream createNonRecursive(final Path f, @Override public FSDataOutputStream doCall(final Path p) throws IOException, UnresolvedLinkException { - return new HdfsDataOutputStream(dfs.create(getPathName(p), permission, - flag, false, replication, blockSize, progress, bufferSize, null), - statistics); + final DFSOutputStream dfsos = dfs.create(getPathName(p), permission, + flag, false, replication, blockSize, progress, bufferSize, null); + return dfs.createWrappedOutputStream(dfsos, statistics); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 06c658cb2ec..7cc8c318803 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -161,13 +161,4 @@ public static enum DatanodeReportType { public static final String SEPARATOR_DOT_SNAPSHOT_DIR = Path.SEPARATOR + DOT_SNAPSHOT_DIR; - - public static final String CRYPTO_XATTR_KEY_ID = "system.hdfs.crypto.key-id"; - public static final String CRYPTO_XATTR_KEY_VERSION_ID = - "system.hdfs.crypto.key-version-id"; - public static final String CRYPTO_XATTR_IV = "system.hdfs.crypto.iv"; - public static final int CRYPTO_KEY_SIZE = 128; - /* Temporary until we stop hard-coding these values. */ - public static final byte[] KEY = "0123456789012345".getBytes(); - public static final byte[] IV = "ABCDEFGJIJKLMNOP".getBytes(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index b7310c5c0c4..90715f76b8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -45,8 +46,7 @@ public class HdfsFileStatus { private final String group; private final long fileId; - private final byte[] key; - private final byte[] iv; + private final FileEncryptionInfo feInfo; // Used by dir, not including dot and dotdot. Always zero for a regular file. private final int childrenNum; @@ -66,20 +66,12 @@ public class HdfsFileStatus { * @param group the group of the path * @param path the local name in java UTF8 encoding the same as that in-memory * @param fileId the file id + * @param feInfo the file's encryption info */ public HdfsFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] symlink, - byte[] path, long fileId, int childrenNum) { - this(length, isdir, block_replication, blocksize, modification_time, - access_time, permission, owner, group, symlink, path, fileId, - childrenNum, HdfsConstants.KEY, HdfsConstants.IV); - } - - public HdfsFileStatus(long length, boolean isdir, int block_replication, - long blocksize, long modification_time, long access_time, - FsPermission permission, String owner, String group, byte[] symlink, - byte[] path, long fileId, int childrenNum, byte[] key, byte[] iv) { + byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo) { this.length = length; this.isdir = isdir; this.block_replication = (short)block_replication; @@ -97,8 +89,7 @@ public HdfsFileStatus(long length, boolean isdir, int block_replication, this.path = path; this.fileId = fileId; this.childrenNum = childrenNum; - this.key = key; - this.iv = iv; + this.feInfo = feInfo; } /** @@ -252,12 +243,8 @@ final public long getFileId() { return fileId; } - final public byte[] getKey() { - return key; - } - - final public byte[] getIv() { - return iv; + final public FileEncryptionInfo getFileEncryptionInfo() { + return feInfo; } final public int getChildrenNum() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java index dfe566077be..6694d85c2d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -51,15 +52,16 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus { * @param path local path name in java UTF8 format * @param fileId the file id * @param locations block locations + * @param feInfo file encryption info */ public HdfsLocatedFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] symlink, byte[] path, long fileId, LocatedBlocks locations, - int childrenNum, byte[] key, byte[] iv) { + int childrenNum, FileEncryptionInfo feInfo) { super(length, isdir, block_replication, blocksize, modification_time, access_time, permission, owner, group, symlink, path, fileId, - childrenNum, key, iv); + childrenNum, feInfo); this.locations = locations; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java index 4fc2bc064c1..436fa14e1c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileEncryptionInfo; /** * Collection of blocks with their locations and the file length. @@ -35,27 +36,23 @@ public class LocatedBlocks { private final boolean underConstruction; private LocatedBlock lastLocatedBlock = null; private boolean isLastBlockComplete = false; - private final byte[] key; - private final byte[] iv; + private FileEncryptionInfo fileEncryptionInfo = null; public LocatedBlocks() { fileLength = 0; blocks = null; underConstruction = false; - key = null; - iv = null; } public LocatedBlocks(long flength, boolean isUnderConstuction, List blks, LocatedBlock lastBlock, - boolean isLastBlockCompleted, byte[] key, byte[] iv) { + boolean isLastBlockCompleted, FileEncryptionInfo feInfo) { fileLength = flength; blocks = blks; underConstruction = isUnderConstuction; this.lastLocatedBlock = lastBlock; this.isLastBlockComplete = isLastBlockCompleted; - this.key = key; - this.iv = iv; + this.fileEncryptionInfo = feInfo; } /** @@ -103,13 +100,12 @@ public long getFileLength() { public boolean isUnderConstruction() { return underConstruction; } - - public byte[] getKey() { - return key; - } - public byte[] getIv() { - return iv; + /** + * @return the FileEncryptionInfo for the LocatedBlocks + */ + public FileEncryptionInfo getFileEncryptionInfo() { + return fileEncryptionInfo; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index b18e3f3458f..d3952833d1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -61,7 +61,7 @@ public SnapshottableDirectoryStatus(long modification_time, long access_time, int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, access_time, permission, owner, group, null, localName, inodeId, - childrenNum, null /* key */, null /* IV */); + childrenNum, null); this.snapshotNumber = snapshotNumber; this.snapshotQuota = snapshotQuota; this.parentFullPath = parentFullPath; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index b25c280febe..98ac0d3cfdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.StorageType; -import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -52,6 +51,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolStats; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -61,6 +61,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdfs.protocol.FsAclPermission; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -1133,8 +1134,8 @@ public static LocatedBlocks convert(LocatedBlocksProto lb) { PBHelper.convertLocatedBlock(lb.getBlocksList()), lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, lb.getIsLastBlockComplete(), - lb.hasKey() ? lb.getKey().toByteArray() : null, - lb.hasIv() ? lb.getIv().toByteArray() : null); + lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : + null); } public static LocatedBlocksProto convert(LocatedBlocks lb) { @@ -1146,11 +1147,8 @@ public static LocatedBlocksProto convert(LocatedBlocks lb) { if (lb.getLastLocatedBlock() != null) { builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); } - if (lb.getKey() != null) { - builder.setKey(ByteString.copyFrom(lb.getKey())); - } - if (lb.getIv() != null) { - builder.setIv(ByteString.copyFrom(lb.getIv())); + if (lb.getFileEncryptionInfo() != null) { + builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo())); } return builder.setFileLength(lb.getFileLength()) .setUnderConstruction(lb.isUnderConstruction()) @@ -1278,8 +1276,8 @@ public static HdfsFileStatus convert(HdfsFileStatusProto fs) { fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID, fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null, fs.hasChildrenNum() ? fs.getChildrenNum() : -1, - fs.hasKey() ? fs.getKey().toByteArray() : null, - fs.hasIv() ? fs.getIv().toByteArray() : null); + fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : + null); } public static SnapshottableDirectoryStatus convert( @@ -1329,11 +1327,8 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) { if (fs.isSymlink()) { builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); } - if (fs.getKey() != null) { - builder.setKey(ByteString.copyFrom(fs.getKey())); - } - if (fs.getIv() != null) { - builder.setIv(ByteString.copyFrom(fs.getIv())); + if (fs.getFileEncryptionInfo() != null) { + builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo())); } if (fs instanceof HdfsLocatedFileStatus) { LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); @@ -2280,5 +2275,49 @@ public static SlotId convert(ShortCircuitShmSlotProto slotId) { public static ShmId convert(ShortCircuitShmIdProto shmId) { return new ShmId(shmId.getHi(), shmId.getLo()); } -} + public static HdfsProtos.FileEncryptionInfoProto.CipherType + convert(CipherSuite type) { + switch (type) { + case AES_CTR_NOPADDING: + return HdfsProtos.FileEncryptionInfoProto.CipherType + .AES_CTR_NOPADDING; + default: + return null; + } + } + + public static CipherSuite convert( + HdfsProtos.FileEncryptionInfoProto.CipherType proto) { + switch (proto) { + case AES_CTR_NOPADDING: + return CipherSuite.AES_CTR_NOPADDING; + default: + return null; + } + } + + public static HdfsProtos.FileEncryptionInfoProto convert( + FileEncryptionInfo info) { + if (info == null) { + return null; + } + return HdfsProtos.FileEncryptionInfoProto.newBuilder() + .setType(convert(info.getCipherSuite())) + .setKey(getByteString(info.getEncryptedDataEncryptionKey())) + .setIv(getByteString(info.getIV())) + .build(); + } + + public static FileEncryptionInfo convert( + HdfsProtos.FileEncryptionInfoProto proto) { + if (proto == null) { + return null; + } + CipherSuite type = convert(proto.getType()); + byte[] key = proto.getKey().toByteArray(); + byte[] iv = proto.getIv().toByteArray(); + return new FileEncryptionInfo(type, key, iv); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 4d8863881c7..2b497bcdfbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -842,15 +842,15 @@ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks, final long fileSizeExcludeBlocksUnderConstruction, final boolean isFileUnderConstruction, final long offset, - final long length, final boolean needBlockToken, final boolean inSnapshot) + final long length, final boolean needBlockToken, + final boolean inSnapshot, FileEncryptionInfo feInfo) throws IOException { assert namesystem.hasReadLock(); if (blocks == null) { return null; } else if (blocks.length == 0) { return new LocatedBlocks(0, isFileUnderConstruction, - Collections.emptyList(), null, false, - null /* key */, null /* IV */); + Collections.emptyList(), null, false, null); } else { if (LOG.isDebugEnabled()) { LOG.debug("blocks = " + java.util.Arrays.asList(blocks)); @@ -875,8 +875,7 @@ public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks, } return new LocatedBlocks( fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction, - locatedblocks, lastlb, isComplete, - HdfsConstants.KEY, HdfsConstants.IV); + locatedblocks, lastlb, isComplete, feInfo); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 7a83bbf21e9..7e4841835c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -292,5 +292,10 @@ static public enum BlockUCState { public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode"; public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000; + + public static final String CRYPTO_XATTR_ENCRYPTION_ZONE = + "system.hdfs.crypto.encryption.zone"; + public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO = + "system.hdfs.crypto.file.encryption.info"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 3b1517e0733..93c6fb593c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO; import static org.apache.hadoop.util.Time.now; import java.io.Closeable; @@ -29,12 +31,13 @@ import java.util.ListIterator; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; @@ -66,6 +69,8 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotException; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -85,10 +90,6 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_KEY_ID; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_IV; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_KEY_VERSION_ID; - /** * Both FSDirectory and FSNamesystem manage the state of the namespace. * FSDirectory is a pure in-memory data structure, all of whose operations @@ -133,7 +134,6 @@ private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) { private final INodeMap inodeMap; // Synchronized by dirLock private long yieldCount = 0; // keep track of lock yield count. private final int inodeXAttrsLimit; //inode xattrs max limit - private final CryptoCodec codec; // lock to protect the directory and BlockMap private final ReentrantReadWriteLock dirLock; @@ -200,7 +200,7 @@ public int getWriteHoldCount() { this.inodeXAttrsLimit = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); - this.codec = CryptoCodec.getInstance(conf); + Preconditions.checkArgument(this.inodeXAttrsLimit >= 0, "Cannot set a negative limit on the number of xattrs per inode (%s).", DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY); @@ -1470,8 +1470,8 @@ private DirectoryListing getSnapshotsListing(String src, byte[] startAfter) * @return object containing information regarding the file * or null if file not found */ - HdfsFileStatus getFileInfo(String src, boolean resolveLink) - throws UnresolvedLinkException { + HdfsFileStatus getFileInfo(String src, boolean resolveLink) + throws UnresolvedLinkException, IOException { String srcs = normalizePath(src); readLock(); try { @@ -1480,6 +1480,8 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink) } final INodesInPath inodesInPath = getLastINodeInPath(srcs, resolveLink); final INode i = inodesInPath.getINode(0); + + final int snapshotId = inodesInPath.getPathSnapshotId(); return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i, inodesInPath.getPathSnapshotId()); } finally { @@ -1498,7 +1500,7 @@ private HdfsFileStatus getFileInfo4DotSnapshot(String src) throws UnresolvedLinkException { if (getINode4DotSnapshot(src) != null) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, - HdfsFileStatus.EMPTY_NAME, -1L, 0, null /* key */, null /* IV */); + HdfsFileStatus.EMPTY_NAME, -1L, 0, null); } return null; } @@ -2326,7 +2328,7 @@ private HdfsFileStatus createFileStatus(byte[] path, INode node, * Create FileStatus by file INode */ HdfsFileStatus createFileStatus(byte[] path, INode node, - int snapshot) { + int snapshot) throws IOException { long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; @@ -2338,7 +2340,9 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, } int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - + + FileEncryptionInfo feInfo = getFileEncryptionInfo(node, snapshot); + return new HdfsFileStatus( size, node.isDirectory(), @@ -2353,8 +2357,7 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, path, node.getId(), childrenNum, - HdfsConstants.KEY, // key - HdfsConstants.IV); // IV + feInfo); } /** @@ -2377,16 +2380,20 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, final boolean isUc = !inSnapshot && fileNode.isUnderConstruction(); final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size; + final FileEncryptionInfo feInfo = getFileEncryptionInfo(node, snapshot); + loc = getFSNamesystem().getBlockManager().createLocatedBlocks( fileNode.getBlocks(), fileSize, isUc, 0L, size, false, - inSnapshot); + inSnapshot, feInfo); if (loc == null) { loc = new LocatedBlocks(); } } int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - + + final FileEncryptionInfo feInfo = getFileEncryptionInfo(node, snapshot); + HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), @@ -2394,7 +2401,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, getPermissionForFileStatus(node, snapshot), node.getUserName(snapshot), node.getGroupName(snapshot), node.isSymlink() ? node.asSymlink().getSymlink() : null, path, - node.getId(), loc, childrenNum, null /* key */, null /* IV */); + node.getId(), loc, childrenNum, feInfo); // Set caching information for the located blocks. if (loc != null) { CacheManager cacheManager = namesystem.getCacheManager(); @@ -2665,7 +2672,7 @@ XAttr createEncryptionZone(String src, String keyId) "Attempt to create an encryption zone for a non-empty directory."); } final XAttr keyIdXAttr = - XAttrHelper.buildXAttr(CRYPTO_XATTR_KEY_ID, keyId.getBytes()); + XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); List xattrs = Lists.newArrayListWithCapacity(1); xattrs.add(keyIdXAttr); unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); @@ -2684,7 +2691,7 @@ List deleteEncryptionZone(String src) "Attempt to delete an encryption zone for a non-empty directory."); } final XAttr keyIdXAttr = - XAttrHelper.buildXAttr(CRYPTO_XATTR_KEY_ID, null); + XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null); List xattrs = Lists.newArrayListWithCapacity(1); xattrs.add(keyIdXAttr); final List removedXAttrs = unprotectedRemoveXAttrs(src, xattrs); @@ -2698,6 +2705,62 @@ List deleteEncryptionZone(String src) } } + /** + * Set the FileEncryptionInfo for an INode. + */ + void setFileEncryptionInfo(String src, FileEncryptionInfo info) + throws IOException { + // Make the PB for the xattr + final HdfsProtos.FileEncryptionInfoProto proto = PBHelper.convert(info); + final byte[] protoBytes = proto.toByteArray(); + final XAttr fileEncryptionAttr = + XAttrHelper.buildXAttr(CRYPTO_XATTR_FILE_ENCRYPTION_INFO, protoBytes); + final List xAttrs = Lists.newArrayListWithCapacity(1); + xAttrs.add(fileEncryptionAttr); + + writeLock(); + try { + unprotectedSetXAttrs(src, xAttrs, EnumSet.of(XAttrSetFlag.CREATE)); + } finally { + writeUnlock(); + } + } + + /** + * Return the FileEncryptionInfo for an INode, or null if the INode is not + * an encrypted file. + */ + FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId) + throws IOException { + if (!inode.isFile()) { + return null; + } + readLock(); + try { + List xAttrs = XAttrStorage.readINodeXAttrs(inode, snapshotId); + if (xAttrs == null) { + return null; + } + for (XAttr x : xAttrs) { + if (XAttrHelper.getPrefixName(x) + .equals(CRYPTO_XATTR_FILE_ENCRYPTION_INFO)) { + try { + HdfsProtos.FileEncryptionInfoProto proto = + HdfsProtos.FileEncryptionInfoProto.parseFrom(x.getValue()); + FileEncryptionInfo feInfo = PBHelper.convert(proto); + return feInfo; + } catch (InvalidProtocolBufferException e) { + throw new IOException("Could not parse file encryption info for " + + "inode " + inode, e); + } + } + } + return null; + } finally { + readUnlock(); + } + } + void setXAttrs(final String src, final List xAttrs, final EnumSet flag) throws IOException { writeLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7aa48e10621..da8fc00e08d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -83,9 +83,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_KEY_SIZE; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_IV; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.CRYPTO_XATTR_KEY_VERSION_ID; import static org.apache.hadoop.util.Time.now; import java.io.*; @@ -122,6 +119,8 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import org.apache.hadoop.crypto.key.KeyProviderFactory; @@ -131,6 +130,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException; import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.InvalidPathException; @@ -154,7 +154,6 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.StorageType; -import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; @@ -530,6 +529,7 @@ private void logAuditEvent(boolean succeeded, private KeyProvider.Options providerOptions = null; private final Map encryptionZones; + private final CryptoCodec codec; private volatile boolean imageLoaded = false; private final Condition cond; @@ -747,6 +747,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { throws IOException { initializeKeyProvider(conf); providerOptions = KeyProvider.options(conf); + this.codec = CryptoCodec.getInstance(conf); if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) { LOG.info("Enabling async auditlog"); @@ -1873,9 +1874,13 @@ && doAccessTime && isAccessTimeSupported()) { length = Math.min(length, fileSize - offset); isUc = false; } - LocatedBlocks blocks = + + final FileEncryptionInfo feInfo = dir.getFileEncryptionInfo(inode, + iip.getPathSnapshotId()); + + final LocatedBlocks blocks = blockManager.createLocatedBlocks(inode.getBlocks(), fileSize, - isUc, offset, length, needBlockToken, iip.isSnapshot()); + isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo); // Set caching information for the located blocks. for (LocatedBlock lb: blocks.getLocatedBlocks()) { cacheManager.setCachedLocations(lb); @@ -8296,7 +8301,7 @@ private String createNewKey(String src) final String keyId = UUID.randomUUID().toString(); // TODO pass in hdfs://HOST:PORT (HDFS-6490) providerOptions.setDescription(src); - providerOptions.setBitLength(CRYPTO_KEY_SIZE); + providerOptions.setBitLength(codec.getAlgorithmBlockSize()*8); try { provider.createKey(keyId, providerOptions); } catch (NoSuchAlgorithmException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index abfeb32c142..321630c18dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -253,7 +253,7 @@ public static HdfsFileStatus toFileStatus(final Map json, boolean includes return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication, blockSize, mTime, aTime, permission, owner, group, symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, - null /* key */, null /* IV */); + null); } /** Convert an ExtendedBlock to a Json map. */ @@ -533,7 +533,7 @@ public static LocatedBlocks toLocatedBlocks(final Map json (Map)m.get("lastLocatedBlock")); final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete"); return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks, - lastLocatedBlock, isLastBlockComplete, null /* key */, null /* IV */); + lastLocatedBlock, isLastBlockComplete, null); } /** Convert a ContentSummary to a Json string. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 6ca22a136e8..ab482a0e23d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -169,6 +169,17 @@ message DataEncryptionKeyProto { optional string encryptionAlgorithm = 6; } +/** + * Encryption information for a file. + */ +message FileEncryptionInfoProto { + enum CipherType { + AES_CTR_NOPADDING = 1; + } + required CipherType type = 1; + required bytes key = 2; + required bytes iv = 3; +} /** * A set of file blocks and their locations. @@ -179,11 +190,9 @@ message LocatedBlocksProto { required bool underConstruction = 3; optional LocatedBlockProto lastBlock = 4; required bool isLastBlockComplete = 5; - optional bytes key = 6; - optional bytes iv = 7; + optional FileEncryptionInfoProto fileEncryptionInfo = 6; } - /** * Status of a file, directory or symlink * Optionally includes a file's block locations if requested by client on the rpc call. @@ -215,9 +224,8 @@ message HdfsFileStatusProto { optional uint64 fileId = 13 [default = 0]; // default as an invalid id optional int32 childrenNum = 14 [default = -1]; - // Optional fields for key/iv for encryption - optional bytes key = 15; - optional bytes iv = 16; + // Optional field for file encryption + optional FileEncryptionInfoProto fileEncryptionInfo = 15; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index f42496920b5..ae1536a8e7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -253,12 +253,12 @@ public Object answer(InvocationOnMock invocation) Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null, null)).when(mockNN).getFileInfo(anyString()); + 1010, 0, null)).when(mockNN).getFileInfo(anyString()); Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null, null)) + 1010, 0, null)) .when(mockNN) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), @@ -495,7 +495,7 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) { badBlocks.add(badLocatedBlock); return new LocatedBlocks(goodBlockList.getFileLength(), false, badBlocks, null, true, - null /* key */, null /* IV */); + null); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 4c34681e4fe..c64ae1ca072 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -95,7 +95,7 @@ public void testLocatedBlocks2Locations() { LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true); List ls = Arrays.asList(l1, l2); - LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null); + LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null); BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index 6487843d008..63c6443f6fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -339,12 +339,12 @@ public void testFactory() throws Exception { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null, null)).when(mcp).getFileInfo(anyString()); + 1010, 0, null)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010, 0, null, null)) + 1010, 0, null)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 17df7f6ef22..93e2fc6382f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -1015,7 +1015,7 @@ public void testFsckFileNotFound() throws Exception { HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, - fileId, numChildren, null, null); + fileId, numChildren, null); Result res = new Result(conf); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index 345a75a63c5..b8150f7e357 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -64,7 +64,7 @@ public void testHdfsFileStatus() { final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), - INodeId.GRANDFATHER_INODE_ID, 0, null, null); + INodeId.GRANDFATHER_INODE_ID, 0, null); final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus); From 31617733aca2025cff1ffb841a533a5b1de016a5 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Fri, 27 Jun 2014 21:45:18 +0000 Subject: [PATCH 019/354] HDFS-6389. Rename restrictions for encryption zones. (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1606253 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../hdfs/server/namenode/FSDirectory.java | 34 +++++++ .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../hadoop/hdfs/TestEncryptionZonesAPI.java | 25 ++++- .../TestFileContextEncryptionZones.java | 70 ++++++++++++++ .../src/test/resources/testCryptoConf.xml | 94 +++++++++++++++++++ 6 files changed, 225 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextEncryptionZones.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index d539ef1e1a4..c1876d4acba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -26,6 +26,8 @@ fs-encryption (Unreleased) HDFS-6391. Get the Key/IV from the NameNode for encrypted files in DFSClient. (Charles Lamb and wang) + HDFS-6389. Rename restrictions for encryption zones. (clamb) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 93c6fb593c5..5c5ff8bb85e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FsAclPermission; @@ -544,6 +545,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) return false; } + checkEncryptionZoneMoveValidity(src, dst); // Ensure dst has quota to accommodate rename verifyFsLimitsForRename(srcIIP, dstIIP); verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); @@ -748,6 +750,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, throw new IOException(error); } + checkEncryptionZoneMoveValidity(src, dst); final INode dstInode = dstIIP.getLastINode(); List snapshottableDirs = new ArrayList(); @@ -971,6 +974,37 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, throw new IOException("rename from " + src + " to " + dst + " failed."); } + private void checkEncryptionZoneMoveValidity(String src, String dst) + throws IOException { + final EncryptionZone srcEZ = + getFSNamesystem().getEncryptionZoneForPath(src); + final EncryptionZone dstEZ = + getFSNamesystem().getEncryptionZoneForPath(dst); + final boolean srcInEZ = srcEZ != null; + final boolean dstInEZ = dstEZ != null; + if (srcInEZ) { + if (!dstInEZ) { + throw new IOException(src + " can't be moved from an encryption zone."); + } + } else { + if (dstInEZ) { + throw new IOException(src + " can't be moved into an encryption zone."); + } + } + + if (srcInEZ || dstInEZ) { + if (!srcEZ.getPath().equals(dstEZ.getPath())) { + final StringBuilder sb = new StringBuilder(src); + sb.append(" can't be moved from encryption zone "); + sb.append(srcEZ.getPath()); + sb.append(" to encryption zone "); + sb.append(dstEZ.getPath()); + sb.append("."); + throw new IOException(sb.toString()); + } + } + } + /** * Set file replication * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index da8fc00e08d..e5d879ed159 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8395,7 +8395,7 @@ List listEncryptionZones() throws IOException { } /** Lookup the encryption zone of a path. */ - private EncryptionZone getEncryptionZoneForPath(String src) { + EncryptionZone getEncryptionZoneForPath(String src) { final String[] components = INode.getPathNames(src); for (int i = components.length; i > 0; i--) { final List l = Arrays.asList(Arrays.copyOfRange(components, 0, i)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java index eb851d4dcb6..50ec77777ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java @@ -65,7 +65,11 @@ public void setUpCluster() throws IOException { conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - fs = cluster.getFileSystem(); + fs = createFileSystem(conf); + } + + protected FileSystem createFileSystem(Configuration conf) throws IOException { + return cluster.getFileSystem(); } @After @@ -401,4 +405,23 @@ public Object run() throws Exception { } }); } + + /** Test success of Rename EZ on a directory which is already an EZ. */ + @Test(timeout = 30000) + public void testRenameEncryptionZone() + throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem.mkdirs(fs, TEST_PATH_WITH_CHILD, + new FsPermission((short) 0777)); + dfsAdmin.createEncryptionZone(TEST_PATH_WITH_CHILD, null); + FileSystem.mkdirs(fs, TEST_PATH_WITH_MULTIPLE_CHILDREN, + new FsPermission((short) 0777)); + try { + fs.rename(TEST_PATH_WITH_MULTIPLE_CHILDREN, TEST_PATH); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "/test/foo/baz can't be moved from an encryption zone.", e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextEncryptionZones.java new file mode 100644 index 00000000000..da283f5f85b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextEncryptionZones.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.net.URI; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.TestEncryptionZonesAPI; +import org.junit.BeforeClass; + +/** + * Tests of encryption zone operations using FileContext APIs. + */ +public class TestFileContextEncryptionZones extends TestEncryptionZonesAPI { + + @Override + protected FileSystem createFileSystem(Configuration conf) throws IOException { + FileContextFS fcFs = new FileContextFS(); + fcFs.initialize(FileSystem.getDefaultUri(conf), conf); + return fcFs; + } + + /** + * This reuses FSXAttrBaseTest's testcases by creating a filesystem + * implementation which uses FileContext by only overriding the xattr related + * methods. Other operations will use the normal filesystem. + */ + public static class FileContextFS extends DistributedFileSystem { + + private FileContext fc; + + @Override + public void initialize(URI uri, Configuration conf) throws IOException { + super.initialize(uri, conf); + fc = FileContext.getFileContext(conf); + } + + @Override + public boolean rename(Path src, Path dst) throws IOException { + fc.rename(src, dst); + return true; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml index 82b17ffda18..1bf8f74fb14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml @@ -262,5 +262,99 @@ + + + Test failure of renaming file cross EZ's + + -fs NAMENODE -mkdir /src + -fs NAMENODE -mkdir /dst + -fs NAMENODE -ls /- + -createZone -path /src + -createZone -path /dst + -fs NAMENODE -mkdir /src/subdir + -fs NAMENODE -mv /src/subdir /dst- + + + -fs NAMENODE -rmdir /src/subdir + -deleteZone -path /src + -deleteZone -path /dst + -fs NAMENODE -rmdir /src + -fs NAMENODE -rmdir /dst + + + + SubstringComparator + /src/subdir can't be moved from encryption zone /src to encryption zone /dst. + + + + + + Test failure of renaming a non-EZ file into an EZ + + -fs NAMENODE -mkdir /src + -fs NAMENODE -mkdir /dst + -fs NAMENODE -ls /- + -createZone -path /dst + -fs NAMENODE -mv /src /dst- + + + -deleteZone -path /dst + -fs NAMENODE -rmdir /src + -fs NAMENODE -rmdir /dst + + + + SubstringComparator + /src can't be moved into an encryption zone + + + + + + Test failure of renaming a non-EZ file from an EZ + + -fs NAMENODE -mkdir /src + -fs NAMENODE -mkdir /dst + -fs NAMENODE -ls /- + -createZone -path /src + -fs NAMENODE -mv /src /dst- + + + -deleteZone -path /src + -fs NAMENODE -rmdir /src + -fs NAMENODE -rmdir /dst + + + + SubstringComparator + /src can't be moved from an encryption zone + + + + + + Test success of renaming file intra-EZ + + -fs NAMENODE -mkdir /src + -createZone -path /src + -fs NAMENODE -mkdir /src/subdir1 + -fs NAMENODE -mkdir /src/subdir2 + -fs NAMENODE -mv /src/subdir1 /src/subdir2- + + + -fs NAMENODE -rmdir /src/subdir2/subdir1 + -fs NAMENODE -rmdir /src/subdir2 + -deleteZone -path /src + -fs NAMENODE -rmdir /src + + + + SubstringComparator + + + + + From 51b97a1396a4cb32aaa08b451985a6af236c0c4b Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 2 Jul 2014 23:08:29 +0000 Subject: [PATCH 020/354] HDFS-6605.Client server negotiation of cipher suite. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1607499 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/crypto/AESCTRCryptoCodec.java | 9 +- .../org/apache/hadoop/crypto/CipherSuite.java | 33 ++++-- .../org/apache/hadoop/crypto/CryptoCodec.java | 8 +- .../hadoop/crypto/CryptoInputStream.java | 4 +- .../hadoop/crypto/CryptoOutputStream.java | 6 +- .../hadoop/crypto/CryptoStreamUtils.java | 3 +- .../hadoop/crypto/JCEAESCTRCryptoCodec.java | 4 +- .../apache/hadoop/fs/FileEncryptionInfo.java | 5 +- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../org/apache/hadoop/hdfs/DFSClient.java | 10 +- .../apache/hadoop/hdfs/DFSOutputStream.java | 17 +-- .../hdfs/UnknownCipherSuiteException.java | 38 ++++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 4 +- ...amenodeProtocolServerSideTranslatorPB.java | 3 +- .../ClientNamenodeProtocolTranslatorPB.java | 13 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 44 +++++-- .../server/blockmanagement/BlockManager.java | 2 +- .../hdfs/server/namenode/FSNamesystem.java | 112 +++++++++++++++--- .../server/namenode/NameNodeRpcServer.java | 6 +- .../main/proto/ClientNamenodeProtocol.proto | 1 + .../hadoop-hdfs/src/main/proto/hdfs.proto | 13 +- .../hadoop/hdfs/TestDFSClientRetries.java | 4 +- .../hadoop/hdfs/TestEncryptionZonesAPI.java | 71 ++++++++++- .../apache/hadoop/hdfs/TestFileCreation.java | 2 +- .../org/apache/hadoop/hdfs/TestLease.java | 5 +- .../namenode/NNThroughputBenchmark.java | 5 +- .../server/namenode/TestAddBlockRetry.java | 4 +- .../namenode/TestNamenodeRetryCache.java | 9 +- .../namenode/ha/TestRetryCacheWithHA.java | 2 +- 29 files changed, 347 insertions(+), 92 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java index 252e001e66f..e26135d2669 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java @@ -25,16 +25,19 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class AESCTRCryptoCodec extends CryptoCodec { + + protected static final CipherSuite SUITE = CipherSuite.AES_CTR_NOPADDING; + /** * For AES, the algorithm block is fixed size of 128 bits. * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard */ - private static final int AES_BLOCK_SIZE = 16; + private static final int AES_BLOCK_SIZE = SUITE.getAlgorithmBlockSize(); private static final int CTR_OFFSET = 8; @Override - public int getAlgorithmBlockSize() { - return AES_BLOCK_SIZE; + public CipherSuite getCipherSuite() { + return SUITE; } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java index 6363bdb5807..9c4b8fdd8d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java @@ -19,7 +19,6 @@ package org.apache.hadoop.crypto; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; /** * Defines properties of a CipherSuite. Modeled after the ciphers in @@ -27,14 +26,25 @@ */ @InterfaceAudience.Private public enum CipherSuite { - AES_CTR_NOPADDING("AES/CTR/NoPadding", 128); + UNKNOWN("Unknown", 0), + AES_CTR_NOPADDING("AES/CTR/NoPadding", 16); private final String name; - private final int blockBits; + private final int algoBlockSize; - CipherSuite(String name, int blockBits) { + private Integer unknownValue = null; + + CipherSuite(String name, int algoBlockSize) { this.name = name; - this.blockBits = blockBits; + this.algoBlockSize = algoBlockSize; + } + + public void setUnknownValue(int unknown) { + this.unknownValue = unknown; + } + + public int getUnknownValue() { + return unknownValue; } /** @@ -45,17 +55,20 @@ public String getName() { } /** - * @return size of an algorithm block in bits + * @return size of an algorithm block in bytes */ - public int getNumberBlockBits() { - return blockBits; + public int getAlgorithmBlockSize() { + return algoBlockSize; } @Override public String toString() { StringBuilder builder = new StringBuilder("{"); - builder.append("name: " + getName() + ", "); - builder.append("numBlockBits: " + getNumberBlockBits()); + builder.append("name: " + name); + builder.append(", algorithmBlockSize: " + algoBlockSize); + if (unknownValue != null) { + builder.append(", unknownValue: " + unknownValue); + } builder.append("}"); return builder.toString(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index b166423db0e..7d4e65ba4d3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -39,13 +39,11 @@ public static CryptoCodec getInstance(Configuration conf) { CryptoCodec.class); return ReflectionUtils.newInstance(klass, conf); } - + /** - * Get the block size of a block cipher. - * For different algorithms, the block size may be different. - * @return int the block size + * @return the CipherSuite for this codec. */ - public abstract int getAlgorithmBlockSize(); + public abstract CipherSuite getCipherSuite(); /** * Create a {@link org.apache.hadoop.crypto.Encryptor}. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java index 55c891a0ace..e8964ed6ed5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -265,11 +265,11 @@ private byte afterDecryption(Decryptor decryptor, ByteBuffer inBuffer, } private long getCounter(long position) { - return position / codec.getAlgorithmBlockSize(); + return position / codec.getCipherSuite().getAlgorithmBlockSize(); } private byte getPadding(long position) { - return (byte)(position % codec.getAlgorithmBlockSize()); + return (byte)(position % codec.getCipherSuite().getAlgorithmBlockSize()); } /** Calculate the counter and iv, update the decryptor. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java index 61eca0a4616..4f9f7f5c6c4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java @@ -194,8 +194,10 @@ private void encrypt() throws IOException { /** Update the {@link #encryptor}: calculate counter and {@link #padding}. */ private void updateEncryptor() throws IOException { - final long counter = streamOffset / codec.getAlgorithmBlockSize(); - padding = (byte)(streamOffset % codec.getAlgorithmBlockSize()); + final long counter = + streamOffset / codec.getCipherSuite().getAlgorithmBlockSize(); + padding = + (byte)(streamOffset % codec.getCipherSuite().getAlgorithmBlockSize()); inBuffer.position(padding); // Set proper position for input data. codec.calculateIV(initIV, counter, iv); encryptor.init(key, iv); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java index dfa27df172d..820d77580cd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java @@ -53,7 +53,8 @@ public static int getBufferSize(Configuration conf) { public static int checkBufferSize(CryptoCodec codec, int bufferSize) { Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, "Minimum value of buffer size is " + MIN_BUFFER_SIZE + "."); - return bufferSize - bufferSize % codec.getAlgorithmBlockSize(); + return bufferSize - bufferSize % codec.getCipherSuite() + .getAlgorithmBlockSize(); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java index a2eeea48c94..e575e5ed66c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java @@ -92,9 +92,9 @@ public JCEAESCTRCipher(int mode, String provider) throws GeneralSecurityException { this.mode = mode; if (provider == null || provider.isEmpty()) { - cipher = Cipher.getInstance("AES/CTR/NoPadding"); + cipher = Cipher.getInstance(SUITE.getName()); } else { - cipher = Cipher.getInstance("AES/CTR/NoPadding", provider); + cipher = Cipher.getInstance(SUITE.getName(), provider); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java index 53f35bde97f..77f4cdfe70d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java @@ -19,7 +19,6 @@ import org.apache.commons.codec.binary.Hex; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.crypto.CipherSuite; import static com.google.common.base.Preconditions.checkArgument; @@ -40,9 +39,9 @@ public FileEncryptionInfo(CipherSuite suite, byte[] key, byte[] iv) { checkNotNull(suite); checkNotNull(key); checkNotNull(iv); - checkArgument(key.length == suite.getNumberBlockBits() / 8, + checkArgument(key.length == suite.getAlgorithmBlockSize(), "Unexpected key length"); - checkArgument(iv.length == suite.getNumberBlockBits() / 8, + checkArgument(iv.length == suite.getAlgorithmBlockSize(), "Unexpected IV length"); this.cipherSuite = suite; this.key = key; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index c1876d4acba..dcd22697dc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -28,6 +28,8 @@ fs-encryption (Unreleased) HDFS-6389. Rename restrictions for encryption zones. (clamb) + HDFS-6605. Client server negotiation of cipher suite. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 013fbc0f4f3..1e3a179b81a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -94,6 +94,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.crypto.CryptoOutputStream; @@ -246,6 +247,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory { new DFSHedgedReadMetrics(); private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL; private final CryptoCodec codec; + @VisibleForTesting + List cipherSuites; /** * DFSClient configuration @@ -579,6 +582,8 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); this.codec = CryptoCodec.getInstance(conf); + this.cipherSuites = Lists.newArrayListWithCapacity(1); + cipherSuites.add(codec.getCipherSuite()); int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, @@ -1523,7 +1528,8 @@ public DFSOutputStream create(String src, } final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this, src, masked, flag, createParent, replication, blockSize, progress, - buffersize, dfsClientConf.createChecksum(checksumOpt), favoredNodeStrs); + buffersize, dfsClientConf.createChecksum(checksumOpt), + favoredNodeStrs, cipherSuites); beginFileLease(result.getFileId(), result); return result; } @@ -1570,7 +1576,7 @@ public DFSOutputStream primitiveCreate(String src, DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt); result = DFSOutputStream.newStreamForCreate(this, src, absPermission, flag, createParent, replication, blockSize, progress, buffersize, - checksum); + checksum, null, cipherSuites); } beginFileLease(result.getFileId(), result); return result; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 508e3bd4f66..c74f2d019e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -42,6 +42,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSOutputSummer; @@ -1605,12 +1606,13 @@ private DFSOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat, static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, FsPermission masked, EnumSet flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, - DataChecksum checksum, String[] favoredNodes) throws IOException { + DataChecksum checksum, String[] favoredNodes, + List cipherSuites) throws IOException { final HdfsFileStatus stat; try { stat = dfsClient.namenode.create(src, masked, dfsClient.clientName, new EnumSetWritable(flag), createParent, replication, - blockSize); + blockSize, cipherSuites); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, DSQuotaExceededException.class, @@ -1620,7 +1622,8 @@ static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, NSQuotaExceededException.class, SafeModeException.class, UnresolvedPathException.class, - SnapshotAccessControlException.class); + SnapshotAccessControlException.class, + UnknownCipherSuiteException.class); } final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes); @@ -1628,14 +1631,6 @@ static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, return out; } - static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, - FsPermission masked, EnumSet flag, boolean createParent, - short replication, long blockSize, Progressable progress, int buffersize, - DataChecksum checksum) throws IOException { - return newStreamForCreate(dfsClient, src, masked, flag, createParent, replication, - blockSize, progress, buffersize, checksum, null); - } - /** Construct a new output stream for append. */ private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java new file mode 100644 index 00000000000..b85edf69c48 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class UnknownCipherSuiteException extends IOException { + private static final long serialVersionUID = 8957192l; + + public UnknownCipherSuiteException() { + super(); + } + + public UnknownCipherSuiteException(String msg) { + super(msg); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 203d10f74c1..83e4278e16b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -186,7 +187,8 @@ public LocatedBlocks getBlockLocations(String src, @AtMostOnce public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable flag, - boolean createParent, short replication, long blockSize) + boolean createParent, short replication, long blockSize, + List cipherSuites) throws AccessControlException, AlreadyBeingCreatedException, DSQuotaExceededException, FileAlreadyExistsException, FileNotFoundException, NSQuotaExceededException, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 57c456b2233..3fdf5a07eb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -375,7 +375,8 @@ public CreateResponseProto create(RpcController controller, HdfsFileStatus result = server.create(req.getSrc(), PBHelper.convert(req.getMasked()), req.getClientName(), PBHelper.convertCreateFlag(req.getCreateFlag()), req.getCreateParent(), - (short) req.getReplication(), req.getBlockSize()); + (short) req.getReplication(), req.getBlockSize(), + PBHelper.convertCipherSuiteProtos(req.getCipherSuitesList())); if (result != null) { return CreateResponseProto.newBuilder().setFs(PBHelper.convert(result)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 2a7ddd5305d..81d4e432f49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; @@ -249,21 +250,25 @@ public FsServerDefaults getServerDefaults() throws IOException { @Override public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable flag, - boolean createParent, short replication, long blockSize) + boolean createParent, short replication, long blockSize, + List cipherSuites) throws AccessControlException, AlreadyBeingCreatedException, DSQuotaExceededException, FileAlreadyExistsException, FileNotFoundException, NSQuotaExceededException, ParentNotDirectoryException, SafeModeException, UnresolvedLinkException, IOException { - CreateRequestProto req = CreateRequestProto.newBuilder() + CreateRequestProto.Builder builder = CreateRequestProto.newBuilder() .setSrc(src) .setMasked(PBHelper.convert(masked)) .setClientName(clientName) .setCreateFlag(PBHelper.convertCreateFlag(flag)) .setCreateParent(createParent) .setReplication(replication) - .setBlockSize(blockSize) - .build(); + .setBlockSize(blockSize); + if (cipherSuites != null) { + builder.addAllCipherSuites(PBHelper.convertCipherSuites(cipherSuites)); + } + CreateRequestProto req = builder.build(); try { CreateResponseProto res = rpcProxy.create(null, req); return res.hasFs() ? PBHelper.convert(res.getFs()) : null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 98ac0d3cfdc..0fb764b8841 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -2276,25 +2276,49 @@ public static ShmId convert(ShortCircuitShmIdProto shmId) { return new ShmId(shmId.getHi(), shmId.getLo()); } - public static HdfsProtos.FileEncryptionInfoProto.CipherType - convert(CipherSuite type) { - switch (type) { + public static HdfsProtos.CipherSuite convert(CipherSuite suite) { + switch (suite) { + case UNKNOWN: + return HdfsProtos.CipherSuite.UNKNOWN; case AES_CTR_NOPADDING: - return HdfsProtos.FileEncryptionInfoProto.CipherType - .AES_CTR_NOPADDING; + return HdfsProtos.CipherSuite.AES_CTR_NOPADDING; default: return null; } } - public static CipherSuite convert( - HdfsProtos.FileEncryptionInfoProto.CipherType proto) { + public static CipherSuite convert(HdfsProtos.CipherSuite proto) { switch (proto) { case AES_CTR_NOPADDING: return CipherSuite.AES_CTR_NOPADDING; default: + // Set to UNKNOWN and stash the unknown enum value + CipherSuite suite = CipherSuite.UNKNOWN; + suite.setUnknownValue(proto.getNumber()); + return suite; + } + } + + public static List convertCipherSuites + (List suites) { + if (suites == null) { return null; } + List protos = + Lists.newArrayListWithCapacity(suites.size()); + for (CipherSuite suite : suites) { + protos.add(convert(suite)); + } + return protos; + } + + public static List convertCipherSuiteProtos( + List protos) { + List suites = Lists.newArrayListWithCapacity(protos.size()); + for (HdfsProtos.CipherSuite proto : protos) { + suites.add(convert(proto)); + } + return suites; } public static HdfsProtos.FileEncryptionInfoProto convert( @@ -2303,7 +2327,7 @@ public static HdfsProtos.FileEncryptionInfoProto convert( return null; } return HdfsProtos.FileEncryptionInfoProto.newBuilder() - .setType(convert(info.getCipherSuite())) + .setSuite(convert(info.getCipherSuite())) .setKey(getByteString(info.getEncryptedDataEncryptionKey())) .setIv(getByteString(info.getIV())) .build(); @@ -2314,10 +2338,10 @@ public static FileEncryptionInfo convert( if (proto == null) { return null; } - CipherSuite type = convert(proto.getType()); + CipherSuite suite = convert(proto.getSuite()); byte[] key = proto.getKey().toByteArray(); byte[] iv = proto.getIv().toByteArray(); - return new FileEncryptionInfo(type, key, iv); + return new FileEncryptionInfo(suite, key, iv); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 2b497bcdfbe..bb600f0043d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -850,7 +850,7 @@ public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks, return null; } else if (blocks.length == 0) { return new LocatedBlocks(0, isFileUnderConstruction, - Collections.emptyList(), null, false, null); + Collections.emptyList(), null, false, feInfo); } else { if (LOG.isDebugEnabled()) { LOG.debug("blocks = " + java.util.Arrays.asList(blocks)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e5d879ed159..ca793ab7acd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -122,7 +122,6 @@ import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.CacheFlag; @@ -154,6 +153,7 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.UnknownCipherSuiteException; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; @@ -2296,7 +2296,50 @@ private void verifyParentDir(String src) throws FileNotFoundException, } } } - + + /** + * If the file is within an encryption zone, select the appropriate + * CipherSuite from the list provided by the client. Since the client may + * be newer, need to handle unknown CipherSuites. + * + * @param src path of the file + * @param cipherSuites client-provided list of supported CipherSuites, + * in desired order. + * @return chosen CipherSuite, or null if file is not in an EncryptionZone + * @throws IOException + */ + private CipherSuite chooseCipherSuite(String src, List + cipherSuites) throws UnknownCipherSuiteException { + EncryptionZone zone = getEncryptionZoneForPath(src); + // Not in an EZ + if (zone == null) { + return null; + } + CipherSuite chosen = null; + for (CipherSuite c : cipherSuites) { + if (c.equals(CipherSuite.UNKNOWN)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Ignoring unknown CipherSuite provided by client: " + + c.getUnknownValue()); + } + continue; + } + for (CipherSuite supported : CipherSuite.values()) { + if (supported.equals(c)) { + chosen = c; + break; + } + } + } + if (chosen == null) { + throw new UnknownCipherSuiteException( + "No cipher suites provided by the client are supported." + + " Client provided: " + Arrays.toString(cipherSuites.toArray()) + + " NameNode supports: " + Arrays.toString(CipherSuite.values())); + } + return chosen; + } + /** * Create a new file entry in the namespace. * @@ -2306,7 +2349,8 @@ private void verifyParentDir(String src) throws FileNotFoundException, */ HdfsFileStatus startFile(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet flag, - boolean createParent, short replication, long blockSize) + boolean createParent, short replication, long blockSize, + List cipherSuites) throws AccessControlException, SafeModeException, FileAlreadyExistsException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, IOException { @@ -2319,7 +2363,8 @@ HdfsFileStatus startFile(String src, PermissionStatus permissions, try { status = startFileInt(src, permissions, holder, clientMachine, flag, - createParent, replication, blockSize, cacheEntry != null); + createParent, replication, blockSize, cipherSuites, + cacheEntry != null); } catch (AccessControlException e) { logAuditEvent(false, "create", src); throw e; @@ -2332,16 +2377,26 @@ HdfsFileStatus startFile(String src, PermissionStatus permissions, private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet flag, boolean createParent, short replication, long blockSize, - boolean logRetryCache) throws AccessControlException, SafeModeException, + List cipherSuites, boolean logRetryCache) + throws AccessControlException, SafeModeException, FileAlreadyExistsException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, IOException { if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: src=" + src - + ", holder=" + holder - + ", clientMachine=" + clientMachine - + ", createParent=" + createParent - + ", replication=" + replication - + ", createFlag=" + flag.toString()); + StringBuilder builder = new StringBuilder(); + builder.append("DIR* NameSystem.startFile: src=" + src + + ", holder=" + holder + + ", clientMachine=" + clientMachine + + ", createParent=" + createParent + + ", replication=" + replication + + ", createFlag=" + flag.toString() + + ", blockSize=" + blockSize); + builder.append(", cipherSuites="); + if (cipherSuites != null) { + builder.append(Arrays.toString(cipherSuites.toArray())); + } else { + builder.append("null"); + } + NameNode.stateChangeLog.debug(builder.toString()); } if (!DFSUtil.isValidName(src)) { throw new InvalidPathException(src); @@ -2368,7 +2423,8 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, checkNameNodeSafeMode("Cannot create file" + src); src = FSDirectory.resolvePath(src, pathComponents, dir); startFileInternal(pc, src, permissions, holder, clientMachine, create, - overwrite, createParent, replication, blockSize, logRetryCache); + overwrite, createParent, replication, blockSize, cipherSuites, + logRetryCache); stat = dir.getFileInfo(src, false); } catch (StandbyException se) { skipSync = true; @@ -2398,7 +2454,8 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, private void startFileInternal(FSPermissionChecker pc, String src, PermissionStatus permissions, String holder, String clientMachine, boolean create, boolean overwrite, boolean createParent, - short replication, long blockSize, boolean logRetryEntry) + short replication, long blockSize, List cipherSuites, + boolean logRetryEntry) throws FileAlreadyExistsException, AccessControlException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, IOException { @@ -2410,6 +2467,25 @@ private void startFileInternal(FSPermissionChecker pc, String src, throw new FileAlreadyExistsException(src + " already exists as a directory"); } + + FileEncryptionInfo feInfo = null; + CipherSuite suite = chooseCipherSuite(src, cipherSuites); + if (suite != null) { + Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN), + "Chose an UNKNOWN CipherSuite!"); + // TODO: fill in actual key/iv in HDFS-6474 + // For now, populate with dummy data + byte[] key = new byte[suite.getAlgorithmBlockSize()]; + for (int i = 0; i < key.length; i++) { + key[i] = (byte)i; + } + byte[] iv = new byte[suite.getAlgorithmBlockSize()]; + for (int i = 0; i < iv.length; i++) { + iv[i] = (byte)(3+i*2); + } + feInfo = new FileEncryptionInfo(suite, key, iv); + } + final INodeFile myFile = INodeFile.valueOf(inode, src, true); if (isPermissionEnabled) { if (overwrite && myFile != null) { @@ -2465,6 +2541,12 @@ permissions, true, now())) { leaseManager.addLease(newNode.getFileUnderConstructionFeature() .getClientName(), src); + // Set encryption attributes if necessary + if (feInfo != null) { + dir.setFileEncryptionInfo(src, feInfo); + newNode = dir.getInode(newNode.getId()).asFile(); + } + // record file record in log, record new generation stamp getEditLog().logOpenFile(src, newNode, logRetryEntry); if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -8301,7 +8383,8 @@ private String createNewKey(String src) final String keyId = UUID.randomUUID().toString(); // TODO pass in hdfs://HOST:PORT (HDFS-6490) providerOptions.setDescription(src); - providerOptions.setBitLength(codec.getAlgorithmBlockSize()*8); + providerOptions.setBitLength(codec.getCipherSuite() + .getAlgorithmBlockSize()*8); try { provider.createKey(keyId, providerOptions); } catch (NoSuchAlgorithmException e) { @@ -8396,6 +8479,7 @@ List listEncryptionZones() throws IOException { /** Lookup the encryption zone of a path. */ EncryptionZone getEncryptionZoneForPath(String src) { + assert hasReadLock(); final String[] components = INode.getPathNames(src); for (int i = components.length; i > 0; i--) { final List l = Arrays.asList(Arrays.copyOfRange(components, 0, i)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index c98e6711706..35d6266644e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -37,6 +37,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -534,7 +535,8 @@ public FsServerDefaults getServerDefaults() throws IOException { @Override // ClientProtocol public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable flag, - boolean createParent, short replication, long blockSize) + boolean createParent, short replication, long blockSize, + List cipherSuites) throws IOException { String clientMachine = getClientMachine(); if (stateChangeLog.isDebugEnabled()) { @@ -548,7 +550,7 @@ public HdfsFileStatus create(String src, FsPermission masked, HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus( getRemoteUser().getShortUserName(), null, masked), clientName, clientMachine, flag.get(), createParent, replication, - blockSize); + blockSize, cipherSuites); metrics.incrFilesCreated(); metrics.incrCreateFileOps(); return fileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 8696775981f..70455989e7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -74,6 +74,7 @@ message CreateRequestProto { required bool createParent = 5; required uint32 replication = 6; // Short: Only 16 bits used required uint64 blockSize = 7; + repeated CipherSuite cipherSuites = 8; } message CreateResponseProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index ab482a0e23d..ea0164c4253 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -169,14 +169,19 @@ message DataEncryptionKeyProto { optional string encryptionAlgorithm = 6; } +/** + * Cipher suite. + */ +enum CipherSuite { + UNKNOWN = 1; + AES_CTR_NOPADDING = 2; +} + /** * Encryption information for a file. */ message FileEncryptionInfoProto { - enum CipherType { - AES_CTR_NOPADDING = 1; - } - required CipherType type = 1; + required CipherSuite suite = 1; required bytes key = 2; required bytes iv = 3; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index ae1536a8e7f..74daccc9e2d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -24,6 +24,7 @@ import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyList; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyObject; import static org.mockito.Matchers.anyShort; @@ -51,6 +52,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; @@ -262,7 +264,7 @@ public Object answer(InvocationOnMock invocation) .when(mockNN) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), - anyShort(), anyLong()); + anyShort(), anyLong(), (List) anyList()); final DFSClient client = new DFSClient(null, mockNN, conf, null); OutputStream os = client.create("testfile", true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java index 50ec77777ee..c787da69d7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java @@ -26,15 +26,19 @@ import java.util.Set; import java.util.UUID; +import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderFactory; +import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; @@ -44,6 +48,7 @@ import com.google.common.base.Preconditions; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; public class TestEncryptionZonesAPI { @@ -56,7 +61,7 @@ public class TestEncryptionZonesAPI { private final Configuration conf = new Configuration(); private MiniDFSCluster cluster; private static File tmpDir; - private FileSystem fs; + private DistributedFileSystem fs; @Before public void setUpCluster() throws IOException { @@ -65,7 +70,7 @@ public void setUpCluster() throws IOException { conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - fs = createFileSystem(conf); + fs = (DistributedFileSystem) createFileSystem(conf); } protected FileSystem createFileSystem(Configuration conf) throws IOException { @@ -424,4 +429,66 @@ public void testRenameEncryptionZone() "/test/foo/baz can't be moved from an encryption zone.", e); } } + + @Test(timeout = 60000) + public void testCipherSuiteNegotiation() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + final Path zone = new Path("/zone"); + fs.mkdirs(zone); + dfsAdmin.createEncryptionZone(zone, null); + // Create a file in an EZ, which should succeed + DFSTestUtil.createFile(fs, new Path(zone, "success1"), 0, (short) 1, + 0xFEED); + // Pass no cipherSuites, fail + fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(0); + try { + DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, + 0xFEED); + fail("Created a file without specifying a CipherSuite!"); + } catch (UnknownCipherSuiteException e) { + GenericTestUtils.assertExceptionContains("No cipher suites", e); + } + // Pass some unknown cipherSuites, fail + fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + try { + DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, + 0xFEED); + fail("Created a file without specifying a CipherSuite!"); + } catch (UnknownCipherSuiteException e) { + GenericTestUtils.assertExceptionContains("No cipher suites", e); + } + // Pass some unknown and a good cipherSuites, success + fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); + fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + DFSTestUtil.createFile(fs, new Path(zone, "success2"), 0, (short) 1, + 0xFEED); + fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); + DFSTestUtil.createFile(fs, new Path(zone, "success3"), 4096, (short) 1, + 0xFEED); + // Check that the specified CipherSuite was correctly saved on the NN + for (int i=2; i<=3; i++) { + LocatedBlocks blocks = + fs.getClient().getLocatedBlocks(zone.toString() + "/success2", 0); + FileEncryptionInfo feInfo = blocks.getFileEncryptionInfo(); + assertEquals(feInfo.getCipherSuite(), CipherSuite.AES_CTR_NOPADDING); + // TODO: validate against actual key/iv in HDFS-6474 + byte[] key = feInfo.getEncryptedDataEncryptionKey(); + for (int j = 0; j < key.length; j++) { + assertEquals("Unexpected key byte", (byte)j, key[j]); + } + byte[] iv = feInfo.getIV(); + for (int j = 0; j < iv.length; j++) { + assertEquals("Unexpected IV byte", (byte)(3+j*2), iv[j]); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 64697bf9d82..1cbcf52bf56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -1131,7 +1131,7 @@ private void doCreateTest(CreationMethod method) throws Exception { try { nnrpc.create(pathStr, new FsPermission((short)0755), "client", new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), - true, (short)1, 128*1024*1024L); + true, (short)1, 128*1024*1024L, null); fail("Should have thrown exception when creating '" + pathStr + "'" + " by " + method); } catch (InvalidPathException ipe) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index 63c6443f6fd..28c253fd157 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs; +import static org.mockito.Matchers.anyList; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyShort; import static org.mockito.Matchers.anyLong; @@ -29,10 +30,12 @@ import java.io.DataOutputStream; import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -348,7 +351,7 @@ public void testFactory() throws Exception { .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), - anyShort(), anyLong()); + anyShort(), anyLong(), (List) anyList()); final Configuration conf = new Configuration(); final DFSClient c1 = createDFSClientAs(ugi[0], conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 7279aff9387..1fb1c1f9942 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -587,7 +587,8 @@ long executeOp(int daemonId, int inputIdx, String clientName) // dummyActionNoSynch(fileIdx); nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet - .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE); + .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, + replication, BLOCK_SIZE, null); long end = Time.now(); for(boolean written = !closeUponCreate; !written; written = nameNodeProto.complete(fileNames[daemonId][inputIdx], @@ -1133,7 +1134,7 @@ void generateInputs(int[] ignore) throws IOException { String fileName = nameGenerator.getNextFileName("ThroughputBench"); nameNodeProto.create(fileName, FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, - BLOCK_SIZE); + BLOCK_SIZE, null); ExtendedBlock lastBlock = addBlocks(fileName, clientName); nameNodeProto.complete(fileName, clientName, lastBlock, INodeId.GRANDFATHER_INODE_ID); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java index 08c44c23edb..5153e76f965 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java @@ -128,7 +128,7 @@ public DatanodeStorageInfo[] answer(InvocationOnMock invocation) nn.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), - true, (short)3, 1024); + true, (short)3, 1024, null); // start first addBlock() LOG.info("Starting first addBlock for " + src); @@ -155,7 +155,7 @@ public void testAddBlockRetryShouldReturnBlockWithLocations() // create file nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)), true, - (short) 3, 1024); + (short) 3, 1024, null); // start first addBlock() LOG.info("Starting first addBlock for " + src); LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index d6f38853474..974dd55d0ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -209,19 +209,20 @@ public void testCreate() throws Exception { // Two retried calls succeed newCall(); HdfsFileStatus status = namesystem.startFile(src, perm, "holder", - "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize); + "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, + BlockSize, null); Assert.assertEquals(status, namesystem.startFile(src, perm, "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), - true, (short) 1, BlockSize)); + true, (short) 1, BlockSize, null)); Assert.assertEquals(status, namesystem.startFile(src, perm, "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), - true, (short) 1, BlockSize)); + true, (short) 1, BlockSize, null)); // A non-retried call fails newCall(); try { namesystem.startFile(src, perm, "holder", "clientmachine", - EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize); + EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize, null); Assert.fail("testCreate - expected exception is not thrown"); } catch (IOException e) { // expected diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index a34a0365f99..8921c809b32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -394,7 +394,7 @@ void invoke() throws Exception { this.status = client.getNamenode().create(fileName, FsPermission.getFileDefault(), client.getClientName(), new EnumSetWritable(createFlag), false, DataNodes, - BlockSize); + BlockSize, null); } @Override From 2a3bccddd939ee0d6941aa2d22edc67dea85fe35 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Thu, 3 Jul 2014 00:58:45 +0000 Subject: [PATCH 021/354] HDFS-6625. Remove the Delete Encryption Zone function (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1607507 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../org/apache/hadoop/hdfs/DFSClient.java | 11 -- .../hadoop/hdfs/DistributedFileSystem.java | 5 - .../apache/hadoop/hdfs/client/HdfsAdmin.java | 21 --- .../hadoop/hdfs/protocol/ClientProtocol.java | 7 - ...amenodeProtocolServerSideTranslatorPB.java | 15 -- .../ClientNamenodeProtocolTranslatorPB.java | 12 -- .../hdfs/server/namenode/FSDirectory.java | 23 ---- .../hdfs/server/namenode/FSNamesystem.java | 73 +--------- .../server/namenode/NameNodeRpcServer.java | 5 - .../apache/hadoop/hdfs/tools/CryptoAdmin.java | 50 ------- .../main/proto/ClientNamenodeProtocol.proto | 2 - .../src/main/proto/encryption.proto | 7 - .../hadoop/hdfs/TestEncryptionZonesAPI.java | 130 +++--------------- .../src/test/resources/testCryptoConf.xml | 74 +--------- 15 files changed, 30 insertions(+), 407 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index dcd22697dc4..f6f68e433ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -30,6 +30,8 @@ fs-encryption (Unreleased) HDFS-6605. Client server negotiation of cipher suite. (wang) + HDFS-6625. Remove the Delete Encryption Zone function (clamb) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 1e3a179b81a..47ac88c75c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2848,17 +2848,6 @@ public void createEncryptionZone(String src, String keyId) } } - public void deleteEncryptionZone(String src) throws IOException { - checkOpen(); - try { - namenode.deleteEncryptionZone(src); - } catch (RemoteException re) { - throw re.unwrapRemoteException(AccessControlException.class, - SafeModeException.class, - UnresolvedPathException.class); - } - } - public List listEncryptionZones() throws IOException { checkOpen(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 1abf85bb79b..c348a1b3a95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1804,11 +1804,6 @@ public void createEncryptionZone(Path path, String keyId) dfs.createEncryptionZone(getPathName(path), keyId); } - /* HDFS only */ - public void deleteEncryptionZone(Path path) throws IOException { - dfs.deleteEncryptionZone(getPathName(path)); - } - /* HDFS only */ public List listEncryptionZones() throws IOException { return dfs.listEncryptionZones(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index a13edfe5c5a..018fce4e797 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -258,27 +258,6 @@ public void createEncryptionZone(Path path, String keyId) dfs.createEncryptionZone(path, keyId); } - /** - * Delete the encryption zone rooted at path. Path must refer to an existing, - * empty directory. Otherwise, an IOException is thrown. This method removes - * those extended attributes on the directory which indicate that it is part - * of an encryption zone. Following successful completion of this call, any - * new files created in the directory (or it's children) will not be - * encrypted. The directory is not removed by this method. - * - * @param path The path of the root of the encryption zone. - * - * @throws IOException if there was a general IO exception - * - * @throws AccessControlException if the caller does not have access to path - * - * @throws FileNotFoundException if the path does not exist - */ - public void deleteEncryptionZone(Path path) - throws IOException, AccessControlException, FileNotFoundException { - dfs.deleteEncryptionZone(path); - } - /** * Return a list of all {@EncryptionZone}s in the HDFS hierarchy which are * visible to the caller. If the caller is the HDFS admin, then the returned diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 83e4278e16b..a307520fc05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1266,13 +1266,6 @@ public void removeAclEntries(String src, List aclSpec) public void createEncryptionZone(String src, String keyId) throws IOException; - /** - * Delete an encryption zone - */ - @AtMostOnce - public void deleteEncryptionZone(String src) - throws IOException; - /** * Return a list of all {@EncryptionZone}s in the HDFS hierarchy which are * visible to the caller. If the caller is the HDFS admin, then the returned diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 3fdf5a07eb8..570386e04e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -173,14 +173,11 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.DeleteEncryptionZoneResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.DeleteEncryptionZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; @@ -1297,18 +1294,6 @@ public CreateEncryptionZoneResponseProto createEncryptionZone( } } - @Override - public DeleteEncryptionZoneResponseProto deleteEncryptionZone( - RpcController controller, DeleteEncryptionZoneRequestProto req) - throws ServiceException { - try { - server.deleteEncryptionZone(req.getSrc()); - return DeleteEncryptionZoneResponseProto.newBuilder().build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - @Override public ListEncryptionZonesResponseProto listEncryptionZones( RpcController controller, ListEncryptionZonesRequestProto req) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 81d4e432f49..6a15ac57752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -146,7 +146,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.DeleteEncryptionZoneRequestProto; import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; @@ -1301,17 +1300,6 @@ public void createEncryptionZone(String src, String keyId) } } - @Override - public void deleteEncryptionZone(String src) throws IOException { - final DeleteEncryptionZoneRequestProto req = - DeleteEncryptionZoneRequestProto.newBuilder().setSrc(src).build(); - try { - rpcProxy.deleteEncryptionZone(null, req); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - } - @Override public List listEncryptionZones() throws IOException { final ListEncryptionZonesRequestProto req = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 5c5ff8bb85e..391e6a733dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2716,29 +2716,6 @@ XAttr createEncryptionZone(String src, String keyId) } } - List deleteEncryptionZone(String src) - throws IOException { - writeLock(); - try { - if (isNonEmptyDirectory(src)) { - throw new IOException( - "Attempt to delete an encryption zone for a non-empty directory."); - } - final XAttr keyIdXAttr = - XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null); - List xattrs = Lists.newArrayListWithCapacity(1); - xattrs.add(keyIdXAttr); - final List removedXAttrs = unprotectedRemoveXAttrs(src, xattrs); - if (removedXAttrs == null || removedXAttrs.isEmpty()) { - throw new IOException( - src + " does not appear to be the root of an encryption zone"); - } - return removedXAttrs; - } finally { - writeUnlock(); - } - } - /** * Set the FileEncryptionInfo for an INode. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ca793ab7acd..5a7cb12304c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3650,6 +3650,12 @@ private boolean deleteInternal(String src, boolean recursive, checkPermission(pc, src, false, null, FsAction.WRITE, null, FsAction.ALL, true, false); } + + final EncryptionZone ez = getEncryptionZoneForPath(src); + if (ez != null) { + encryptionZones.remove(src); + } + long mtime = now(); // Unlink the target directory from directory tree long filesRemoved = dir.delete(src, collectedBlocks, removedINodes, @@ -8393,73 +8399,8 @@ private String createNewKey(String src) return keyId; } - /** - * Delete the encryption zone on directory src. - * - * @param src the path of a directory which is the root of the encryption - * zone. The directory must be empty and must be marked as an encryption - * zone. - * - * @throws AccessControlException if the caller is not the superuser. - * - * @throws UnresolvedLinkException if the path can't be resolved. - * - * @throws SafeModeException if the Namenode is in safe mode. - */ - void deleteEncryptionZone(final String src) - throws IOException, UnresolvedLinkException, - SafeModeException, AccessControlException { - final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); - if (cacheEntry != null && cacheEntry.isSuccess()) { - return; // Return previous response - } - - boolean success = false; - try { - deleteEncryptionZoneInt(src, cacheEntry != null); - encryptionZones.remove(src); - success = true; - } catch (AccessControlException e) { - logAuditEvent(false, "deleteEncryptionZone", src); - throw e; - } finally { - RetryCache.setState(cacheEntry, success); - } - } - - private void deleteEncryptionZoneInt(final String srcArg, - final boolean logRetryCache) throws IOException { - String src = srcArg; - HdfsFileStatus resultingStat = null; - checkSuperuserPrivilege(); - checkOperation(OperationCategory.WRITE); - final byte[][] pathComponents = - FSDirectory.getPathComponentsForReservedPath(src); - writeLock(); - try { - checkSuperuserPrivilege(); - checkOperation(OperationCategory.WRITE); - checkNameNodeSafeMode("Cannot delete encryption zone on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); - final EncryptionZone ez = encryptionZones.get(src); - if (ez == null) { - throw new IOException("Directory " + src + - " is not the root of an encryption zone."); - } - final List removedXAttrs = dir.deleteEncryptionZone(src); - if (removedXAttrs != null && !removedXAttrs.isEmpty()) { - getEditLog().logRemoveXAttrs(src, removedXAttrs); - } - encryptionZones.remove(src); - resultingStat = getAuditFileInfo(src, false); - } finally { - writeUnlock(); - } - getEditLog().logSync(); - logAuditEvent(true, "deleteEncryptionZone", src, null, resultingStat); - } - List listEncryptionZones() throws IOException { + boolean success = false; checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 35d6266644e..efbc92ddc66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1413,11 +1413,6 @@ public void createEncryptionZone(String src, String keyId) namesystem.createEncryptionZone(src, keyId); } - @Override - public void deleteEncryptionZone(String src) throws IOException { - namesystem.deleteEncryptionZone(src); - } - @Override public List listEncryptionZones() throws IOException { return namesystem.listEncryptionZones(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java index 9347d4de5d9..f1ec3cbfc57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java @@ -32,8 +32,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; -import com.google.common.base.Joiner; - /** * This class implements crypto command-line operations. */ @@ -169,53 +167,6 @@ public int run(Configuration conf, List args) throws IOException { } } - private static class DeleteZoneCommand implements Command { - @Override - public String getName() { - return "-deleteZone"; - } - - @Override - public String getShortUsage() { - return "[" + getName() + " -path " + "]\n"; - } - - @Override - public String getLongUsage() { - final TableListing listing = getOptionDescriptionListing(); - listing.addRow("", "The path of the encryption zone to delete. " + - "It must be an empty directory and an existing encryption zone."); - return getShortUsage() + "\n" + - "Delete an encryption zone.\n\n" + - listing.toString(); - } - - @Override - public int run(Configuration conf, List args) throws IOException { - final String path = StringUtils.popOptionWithArgument("-path", args); - if (path == null) { - System.err.println("You must specify a path with -path."); - return 1; - } - - if (!args.isEmpty()) { - System.err.println("Can't understand argument: " + args.get(0)); - return 1; - } - - final DistributedFileSystem dfs = getDFS(conf); - try { - dfs.deleteEncryptionZone(new Path(path)); - System.out.println("Deleted encryption zone " + path); - } catch (IOException e) { - System.err.println(prettifyException(e)); - return 2; - } - - return 0; - } - } - private static class ListZonesCommand implements Command { @Override public String getName() { @@ -315,7 +266,6 @@ public int run(Configuration conf, List args) throws IOException { private static final Command[] COMMANDS = { new CreateZoneCommand(), - new DeleteZoneCommand(), new ListZonesCommand(), new HelpCommand(), }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 70455989e7f..d0e2fcaba6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -772,8 +772,6 @@ service ClientNamenodeProtocol { returns(RemoveXAttrResponseProto); rpc createEncryptionZone(CreateEncryptionZoneRequestProto) returns(CreateEncryptionZoneResponseProto); - rpc deleteEncryptionZone(DeleteEncryptionZoneRequestProto) - returns(DeleteEncryptionZoneResponseProto); rpc listEncryptionZones(ListEncryptionZonesRequestProto) returns(ListEncryptionZonesResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto index 748f2cb40c2..6b091a572be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto @@ -41,13 +41,6 @@ message CreateEncryptionZoneRequestProto { message CreateEncryptionZoneResponseProto { } -message DeleteEncryptionZoneRequestProto { - required string src = 1; -} - -message DeleteEncryptionZoneResponseProto { -} - message ListEncryptionZonesRequestProto { } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java index c787da69d7f..85f7ff57013 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java @@ -85,7 +85,7 @@ public void shutDownCluster() { } /** Test failure of Create EZ on a directory that doesn't exist. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testCreateEncryptionZoneDirectoryDoesntExist() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); @@ -98,7 +98,7 @@ public void testCreateEncryptionZoneDirectoryDoesntExist() throws Exception { } /** Test failure of Create EZ on a directory which is already an EZ. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testCreateEncryptionZoneWhichAlreadyExists() throws Exception { final HdfsAdmin dfsAdmin = @@ -114,7 +114,7 @@ public void testCreateEncryptionZoneWhichAlreadyExists() } /** Test success of Create EZ in which a key is created. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testCreateEncryptionZoneAndGenerateKeyDirectoryEmpty() throws Exception { final HdfsAdmin dfsAdmin = @@ -124,7 +124,7 @@ public void testCreateEncryptionZoneAndGenerateKeyDirectoryEmpty() } /** Test failure of Create EZ operation in an existing EZ. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testCreateEncryptionZoneInExistingEncryptionZone() throws Exception { final HdfsAdmin dfsAdmin = @@ -142,7 +142,7 @@ public void testCreateEncryptionZoneInExistingEncryptionZone() } /** Test failure of creating an EZ using a non-empty directory. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testCreateEncryptionZoneAndGenerateKeyDirectoryNotEmpty() throws Exception { final HdfsAdmin dfsAdmin = @@ -159,7 +159,7 @@ public void testCreateEncryptionZoneAndGenerateKeyDirectoryNotEmpty() } /** Test failure of creating an EZ passing a key that doesn't exist. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testCreateEncryptionZoneKeyDoesntExist() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); @@ -174,7 +174,7 @@ public void testCreateEncryptionZoneKeyDoesntExist() throws Exception { } /** Test success of creating an EZ when they key exists. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testCreateEncryptionZoneKeyExist() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); @@ -198,9 +198,9 @@ private void createAKey(String keyId) provider.flush(); } - /** Test failure of create/delete encryption zones as a non super user. */ - @Test(timeout = 30000) - public void testCreateAndDeleteEncryptionZoneAsNonSuperUser() + /** Test failure of create encryption zones as a non super user. */ + @Test(timeout = 60000) + public void testCreateEncryptionZoneAsNonSuperUser() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); @@ -225,120 +225,28 @@ public Object run() throws Exception { return null; } }); - dfsAdmin.createEncryptionZone(TEST_PATH, null); - - user.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - final HdfsAdmin userAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - try { - userAdmin.deleteEncryptionZone(TEST_PATH); - fail("deleteEncryptionZone is superuser-only operation"); - } catch (AccessControlException e) { - GenericTestUtils.assertExceptionContains( - "Superuser privilege is required", e); - } - return null; - } - }); - } - - /** Test failure of deleting an EZ passing a directory that doesn't exist. */ - @Test(timeout = 30000) - public void testDeleteEncryptionZoneDirectoryDoesntExist() throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - try { - dfsAdmin.deleteEncryptionZone(TEST_PATH); - fail("Directory doesn't exist"); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains( - "is not the root of an encryption zone", e); - } - } - - /** Test failure of deleting an EZ which is not empty. */ - @Test(timeout = 30000) - public void testDeleteEncryptionZoneAndGenerateKeyDirectoryNotEmpty() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH, null); - FileSystem.create(fs, new Path("/test/foo"), - new FsPermission((short) 0777)); - try { - dfsAdmin.deleteEncryptionZone(TEST_PATH); - fail("Directory not empty"); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("non-empty directory", e); - } - } - - /** Test success of deleting an EZ. */ - @Test(timeout = 30000) - public void testDeleteEncryptionZone() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH, null); - List zones = dfsAdmin.listEncryptionZones(); - Preconditions.checkState(zones.size() == 1, "More than one zone found?"); - dfsAdmin.deleteEncryptionZone(TEST_PATH); - zones = dfsAdmin.listEncryptionZones(); - Preconditions.checkState(zones.size() == 0, "More than one zone found?"); } /** - * Test failure of deleting an EZ on a subdir that is not the root of an EZ. + * Test success of creating an encryption zone a few levels down. */ - @Test(timeout = 30000) - public void testDeleteEncryptionZoneInExistingEncryptionZone() + @Test(timeout = 60000) + public void testCreateEncryptionZoneDownAFewLevels() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH, null); - FileSystem.mkdirs(fs, TEST_PATH_WITH_CHILD, new FsPermission((short) 0777)); - try { - dfsAdmin.deleteEncryptionZone(TEST_PATH_WITH_CHILD); - fail("EZ in an EZ"); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains( - "is not the root of an encryption zone", e); - } - } - - /** - * Test success of creating and deleting an encryption zone a few levels down. - */ - @Test(timeout = 30000) - public void testCreateAndDeleteEncryptionZoneDownAFewLevels() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH, null); FileSystem.mkdirs(fs, TEST_PATH_WITH_MULTIPLE_CHILDREN, new FsPermission((short) 0777)); - try { - dfsAdmin.deleteEncryptionZone(TEST_PATH_WITH_MULTIPLE_CHILDREN); - fail("EZ in an EZ"); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains( - "is not the root of an encryption zone", e); - } + dfsAdmin.createEncryptionZone(TEST_PATH_WITH_MULTIPLE_CHILDREN, null); final List zones = dfsAdmin.listEncryptionZones(); Preconditions.checkState(zones.size() == 1, "More than one zone found?"); final EncryptionZone ez = zones.get(0); GenericTestUtils.assertMatches(ez.toString(), - "EncryptionZone \\[path=/test, keyId="); + "EncryptionZone \\[path=/test/foo/baz, keyId="); } /** Test failure of creating an EZ using a non-empty directory. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testCreateFileInEncryptionZone() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); @@ -353,7 +261,7 @@ public void testCreateFileInEncryptionZone() throws Exception { } /** Test listing encryption zones. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testListEncryptionZones() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); @@ -377,7 +285,7 @@ public void testListEncryptionZones() throws Exception { } /** Test listing encryption zones as a non super user. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testListEncryptionZonesAsNonSuperUser() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); @@ -412,7 +320,7 @@ public Object run() throws Exception { } /** Test success of Rename EZ on a directory which is already an EZ. */ - @Test(timeout = 30000) + @Test(timeout = 60000) public void testRenameEncryptionZone() throws Exception { final HdfsAdmin dfsAdmin = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml index 1bf8f74fb14..36df642b104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml @@ -71,7 +71,6 @@ -createZone -path /foo - -deleteZone -path /foo -fs NAMENODE -rmdir /foo @@ -90,7 +89,6 @@ -createZone -path /foo - -deleteZone -path /foo -fs NAMENODE -rmdir /foo @@ -112,7 +110,6 @@ -fs NAMENODE -rmdir /foo/bar - -deleteZone -path /foo -fs NAMENODE -rmdir /foo @@ -169,7 +166,6 @@ -createZone -path /foo -keyId mykey - -deleteZone -path /foo -fs NAMENODE -rmdir /foo @@ -181,74 +177,13 @@ - Test failure of deleting an EZ passing a directory that doesn't exist. - - -fs NAMENODE -ls /- - -deleteZone -path /foo - - - - - - SubstringComparator - Directory /foo is not the root of an encryption zone. - - - - - - Test failure of deleting an EZ which is not empty. - - -fs NAMENODE -mkdir /foo - -fs NAMENODE -ls /- - -createZone -path /foo - -fs NAMENODE -touchz /foo/bar - -deleteZone -path /foo - - - -fs NAMENODE -rm /foo/bar - -deleteZone -path /foo - -fs NAMENODE -rmdir /foo - - - - SubstringComparator - Attempt to delete an encryption zone for a non-empty directory. - - - - - - Test failure of deleting an EZ on a subdir that is not the root of an EZ. - - -fs NAMENODE -mkdir /foo - -fs NAMENODE -ls /- - -createZone -path /foo - -fs NAMENODE -mkdir /foo/bar - -deleteZone -path /foo/bar - - - -fs NAMENODE -rmdir /foo/bar - -deleteZone -path /foo - -fs NAMENODE -rmdir /foo - - - - SubstringComparator - Directory /foo/bar is not the root of an encryption zone. - - - - - - Test success of creating and deleting an encryption zone a few levels down. + Test success of creating an encryption zone a few levels down. -fs NAMENODE -mkdir /foo -fs NAMENODE -mkdir /foo/bar -fs NAMENODE -mkdir /foo/bar/baz -fs NAMENODE -ls /- -createZone -path /foo/bar/baz - -deleteZone -path /foo/bar/baz -fs NAMENODE -rmdir /foo/bar/baz @@ -258,7 +193,7 @@ SubstringComparator - Deleted encryption zone /foo/bar/baz + Added encryption zone /foo/bar/baz @@ -276,8 +211,6 @@ -fs NAMENODE -rmdir /src/subdir - -deleteZone -path /src - -deleteZone -path /dst -fs NAMENODE -rmdir /src -fs NAMENODE -rmdir /dst @@ -299,7 +232,6 @@ -fs NAMENODE -mv /src /dst- - -deleteZone -path /dst -fs NAMENODE -rmdir /src -fs NAMENODE -rmdir /dst @@ -321,7 +253,6 @@ -fs NAMENODE -mv /src /dst- - -deleteZone -path /src -fs NAMENODE -rmdir /src -fs NAMENODE -rmdir /dst @@ -345,7 +276,6 @@ -fs NAMENODE -rmdir /src/subdir2/subdir1 -fs NAMENODE -rmdir /src/subdir2 - -deleteZone -path /src -fs NAMENODE -rmdir /src From bfef9807a08435822546dd66dc72531465f34743 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 3 Jul 2014 23:40:31 +0000 Subject: [PATCH 022/354] HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL (hitliuyi via cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1607768 13f79535-47bb-0310-9956-ffa450edef68 --- BUILDING.txt | 21 ++ hadoop-common-project/hadoop-common/pom.xml | 17 +- .../hadoop-common/src/CMakeLists.txt | 33 +++ .../hadoop-common/src/config.h.cmake | 1 + .../hadoop/crypto/AESCTRCryptoCodec.java | 71 ------- .../org/apache/hadoop/crypto/CryptoCodec.java | 2 +- .../hadoop/crypto/JCEAESCTRCryptoCodec.java | 159 -------------- .../apache/hadoop/util/NativeCodeLoader.java | 5 + .../hadoop/util/NativeLibraryChecker.java | 19 +- .../org/apache/hadoop/util/NativeCodeLoader.c | 10 + .../apache/hadoop/crypto/TestCryptoCodec.java | 195 ++++++++++++++---- .../hadoop/util/TestNativeCodeLoader.java | 4 + .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 + hadoop-project-dist/pom.xml | 12 ++ hadoop-project/pom.xml | 2 + 15 files changed, 272 insertions(+), 282 deletions(-) diff --git a/BUILDING.txt b/BUILDING.txt index bfb0e0852e7..7b99537a260 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -81,6 +81,27 @@ Maven build goals: the final tar file. This option requires that -Dsnappy.lib is also given, and it ignores the -Dsnappy.prefix option. + OpenSSL build options: + + OpenSSL includes a crypto library that can be utilized by the native code. + It is currently an optional component, meaning that Hadoop can be built with + or without this dependency. + + * Use -Drequire.openssl to fail the build if libcrypto.so is not found. + If this option is not specified and the openssl library is missing, + we silently build a version of libhadoop.so that cannot make use of + openssl. This option is recommended if you plan on making use of openssl + and want to get more repeatable builds. + * Use -Dopenssl.prefix to specify a nonstandard location for the libcrypto + header files and library files. You do not need this option if you have + installed openssl using a package manager. + * Use -Dopenssl.lib to specify a nonstandard location for the libcrypto library + files. Similarly to openssl.prefix, you do not need this option if you have + installed openssl using a package manager. + * Use -Dbundle.openssl to copy the contents of the openssl.lib directory into + the final tar file. This option requires that -Dopenssl.lib is also given, + and it ignores the -Dopenssl.prefix option. + Tests options: * Use -DskipTests to skip tests when running the following Maven goals: diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index ce14b21c388..13b0c3b369b 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -483,6 +483,10 @@ false + + + + false @@ -532,6 +536,7 @@ org.apache.hadoop.io.compress.snappy.SnappyDecompressor org.apache.hadoop.io.compress.lz4.Lz4Compressor org.apache.hadoop.io.compress.lz4.Lz4Decompressor + org.apache.hadoop.crypto.OpensslCipher org.apache.hadoop.util.NativeCrc32 org.apache.hadoop.net.unix.DomainSocket org.apache.hadoop.net.unix.DomainSocketWatcher @@ -552,7 +557,7 @@ - + @@ -596,6 +601,11 @@ false true + + + + false + true @@ -641,6 +651,7 @@ org.apache.hadoop.io.compress.snappy.SnappyDecompressor org.apache.hadoop.io.compress.lz4.Lz4Compressor org.apache.hadoop.io.compress.lz4.Lz4Decompressor + org.apache.hadoop.crypto.OpensslCipher org.apache.hadoop.util.NativeCrc32 ${project.build.directory}/native/javah @@ -685,6 +696,10 @@ /p:CustomSnappyLib=${snappy.lib} /p:CustomSnappyInclude=${snappy.include} /p:RequireSnappy=${require.snappy} + /p:CustomOpensslPrefix=${openssl.prefix} + /p:CustomOpensslLib=${openssl.lib} + /p:CustomOpensslInclude=${openssl.include} + /p:RequireOpenssl=${require.openssl} diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt index dec63c45d7a..9ad049e0853 100644 --- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt +++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt @@ -145,6 +145,37 @@ else (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR) ENDIF(REQUIRE_SNAPPY) endif (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR) +SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES) +set_find_shared_library_version("1.0.0") +SET(OPENSSL_NAME "crypto") +IF(${CMAKE_SYSTEM_NAME} MATCHES "Windows") + SET(OPENSSL_NAME "eay32") +ENDIF() +find_library(OPENSSL_LIBRARY + NAMES ${OPENSSL_NAME} + PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/lib + ${CUSTOM_OPENSSL_PREFIX}/lib64 ${CUSTOM_OPENSSL_LIB} NO_DEFAULT_PATH) +find_library(OPENSSL_LIBRARY + NAMES ${OPENSSL_NAME}) +SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES) +find_path(OPENSSL_INCLUDE_DIR + NAMES openssl/evp.h + PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/include + ${CUSTOM_OPENSSL_INCLUDE} NO_DEFAULT_PATH) +find_path(OPENSSL_INCLUDE_DIR + NAMES openssl/evp.h) +if (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR) + GET_FILENAME_COMPONENT(HADOOP_OPENSSL_LIBRARY ${OPENSSL_LIBRARY} NAME) + SET(OPENSSL_SOURCE_FILES + "${D}/crypto/OpensslCipher.c") +else (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR) + SET(OPENSSL_INCLUDE_DIR "") + SET(OPENSSL_SOURCE_FILES "") + IF(REQUIRE_OPENSSL) + MESSAGE(FATAL_ERROR "Required openssl library could not be found. OPENSSL_LIBRARY=${OPENSSL_LIBRARY}, OPENSSL_INCLUDE_DIR=${OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_INCLUDE_DIR=${CUSTOM_OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_PREFIX=${CUSTOM_OPENSSL_PREFIX}, CUSTOM_OPENSSL_INCLUDE=${CUSTOM_OPENSSL_INCLUDE}") + ENDIF(REQUIRE_OPENSSL) +endif (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR) + include_directories( ${GENERATED_JAVAH} main/native/src @@ -155,6 +186,7 @@ include_directories( ${ZLIB_INCLUDE_DIRS} ${BZIP2_INCLUDE_DIR} ${SNAPPY_INCLUDE_DIR} + ${OPENSSL_INCLUDE_DIR} ${D}/util ) CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h) @@ -172,6 +204,7 @@ add_dual_library(hadoop ${D}/io/compress/lz4/lz4.c ${D}/io/compress/lz4/lz4hc.c ${SNAPPY_SOURCE_FILES} + ${OPENSSL_SOURCE_FILES} ${D}/io/compress/zlib/ZlibCompressor.c ${D}/io/compress/zlib/ZlibDecompressor.c ${BZIP2_SOURCE_FILES} diff --git a/hadoop-common-project/hadoop-common/src/config.h.cmake b/hadoop-common-project/hadoop-common/src/config.h.cmake index 020017c02fa..d71271dd3ee 100644 --- a/hadoop-common-project/hadoop-common/src/config.h.cmake +++ b/hadoop-common-project/hadoop-common/src/config.h.cmake @@ -21,6 +21,7 @@ #cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@" #cmakedefine HADOOP_BZIP2_LIBRARY "@HADOOP_BZIP2_LIBRARY@" #cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@" +#cmakedefine HADOOP_OPENSSL_LIBRARY "@HADOOP_OPENSSL_LIBRARY@" #cmakedefine HAVE_SYNC_FILE_RANGE #cmakedefine HAVE_POSIX_FADVISE diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java index e26135d2669..e69de29bb2d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.crypto; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import com.google.common.base.Preconditions; - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public abstract class AESCTRCryptoCodec extends CryptoCodec { - - protected static final CipherSuite SUITE = CipherSuite.AES_CTR_NOPADDING; - - /** - * For AES, the algorithm block is fixed size of 128 bits. - * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard - */ - private static final int AES_BLOCK_SIZE = SUITE.getAlgorithmBlockSize(); - private static final int CTR_OFFSET = 8; - - @Override - public CipherSuite getCipherSuite() { - return SUITE; - } - - /** - * The IV is produced by adding the initial IV to the counter. IV length - * should be the same as {@link #AES_BLOCK_SIZE} - */ - @Override - public void calculateIV(byte[] initIV, long counter, byte[] IV) { - Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE); - Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE); - - System.arraycopy(initIV, 0, IV, 0, CTR_OFFSET); - long l = (initIV[CTR_OFFSET + 0] << 56) - + ((initIV[CTR_OFFSET + 1] & 0xFF) << 48) - + ((initIV[CTR_OFFSET + 2] & 0xFF) << 40) - + ((initIV[CTR_OFFSET + 3] & 0xFF) << 32) - + ((initIV[CTR_OFFSET + 4] & 0xFF) << 24) - + ((initIV[CTR_OFFSET + 5] & 0xFF) << 16) - + ((initIV[CTR_OFFSET + 6] & 0xFF) << 8) - + (initIV[CTR_OFFSET + 7] & 0xFF); - l += counter; - IV[CTR_OFFSET + 0] = (byte) (l >>> 56); - IV[CTR_OFFSET + 1] = (byte) (l >>> 48); - IV[CTR_OFFSET + 2] = (byte) (l >>> 40); - IV[CTR_OFFSET + 3] = (byte) (l >>> 32); - IV[CTR_OFFSET + 4] = (byte) (l >>> 24); - IV[CTR_OFFSET + 5] = (byte) (l >>> 16); - IV[CTR_OFFSET + 6] = (byte) (l >>> 8); - IV[CTR_OFFSET + 7] = (byte) (l); - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index 7d4e65ba4d3..80e15cd6b70 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -35,7 +35,7 @@ public abstract class CryptoCodec implements Configurable { public static CryptoCodec getInstance(Configuration conf) { final Class klass = conf.getClass( - HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, JCEAESCTRCryptoCodec.class, + HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, JceAesCtrCryptoCodec.class, CryptoCodec.class); return ReflectionUtils.newInstance(klass, conf); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java index e575e5ed66c..e69de29bb2d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.crypto; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.security.GeneralSecurityException; -import java.security.SecureRandom; - -import javax.crypto.Cipher; -import javax.crypto.spec.IvParameterSpec; -import javax.crypto.spec.SecretKeySpec; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; - -import com.google.common.base.Preconditions; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT; - -/** - * Implement the AES-CTR crypto codec using JCE provider. - */ -@InterfaceAudience.Private -public class JCEAESCTRCryptoCodec extends AESCTRCryptoCodec { - private Configuration conf; - private String provider; - private SecureRandom random; - - public JCEAESCTRCryptoCodec() { - } - - @Override - public Configuration getConf() { - return conf; - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - provider = conf.get(HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY); - final String secureRandomAlg = conf.get( - HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY, - HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT); - try { - random = (provider != null) ? - SecureRandom.getInstance(secureRandomAlg, provider) : - SecureRandom.getInstance(secureRandomAlg); - } catch (GeneralSecurityException e) { - throw new IllegalArgumentException(e); - } - } - - @Override - public Encryptor createEncryptor() throws GeneralSecurityException { - return new JCEAESCTRCipher(Cipher.ENCRYPT_MODE, provider); - } - - @Override - public Decryptor createDecryptor() throws GeneralSecurityException { - return new JCEAESCTRCipher(Cipher.DECRYPT_MODE, provider); - } - - @Override - public void generateSecureRandom(byte[] bytes) { - random.nextBytes(bytes); - } - - private static class JCEAESCTRCipher implements Encryptor, Decryptor { - private final Cipher cipher; - private final int mode; - private boolean contextReset = false; - - public JCEAESCTRCipher(int mode, String provider) - throws GeneralSecurityException { - this.mode = mode; - if (provider == null || provider.isEmpty()) { - cipher = Cipher.getInstance(SUITE.getName()); - } else { - cipher = Cipher.getInstance(SUITE.getName(), provider); - } - } - - @Override - public void init(byte[] key, byte[] iv) throws IOException { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(iv); - contextReset = false; - try { - cipher.init(mode, new SecretKeySpec(key, "AES"), - new IvParameterSpec(iv)); - } catch (Exception e) { - throw new IOException(e); - } - } - - /** - * AES-CTR will consume all of the input data. It requires enough space in - * the destination buffer to encrypt entire input buffer. - */ - @Override - public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) - throws IOException { - process(inBuffer, outBuffer); - } - - /** - * AES-CTR will consume all of the input data. It requires enough space in - * the destination buffer to decrypt entire input buffer. - */ - @Override - public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) - throws IOException { - process(inBuffer, outBuffer); - } - - private void process(ByteBuffer inBuffer, ByteBuffer outBuffer) - throws IOException { - try { - int inputSize = inBuffer.remaining(); - // Cipher#update will maintain crypto context. - int n = cipher.update(inBuffer, outBuffer); - if (n < inputSize) { - /** - * Typically code will not get here. Cipher#update will consume all - * input data and put result in outBuffer. - * Cipher#doFinal will reset the crypto context. - */ - contextReset = true; - cipher.doFinal(inBuffer, outBuffer); - } - } catch (Exception e) { - throw new IOException(e); - } - } - - @Override - public boolean isContextReset() { - return contextReset; - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java index 5667d98b3e7..533fc07f8db 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java @@ -78,6 +78,11 @@ public static boolean isNativeCodeLoaded() { * Returns true only if this build was compiled with support for snappy. */ public static native boolean buildSupportsSnappy(); + + /** + * Returns true only if this build was compiled with support for openssl. + */ + public static native boolean buildSupportsOpenssl(); public static native String getLibraryName(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java index 84117e2002e..4891f03cbb8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java @@ -20,6 +20,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.OpensslCipher; import org.apache.hadoop.io.compress.Lz4Codec; import org.apache.hadoop.io.compress.SnappyCodec; import org.apache.hadoop.io.compress.bzip2.Bzip2Factory; @@ -57,12 +58,14 @@ public static void main(String[] args) { boolean nativeHadoopLoaded = NativeCodeLoader.isNativeCodeLoaded(); boolean zlibLoaded = false; boolean snappyLoaded = false; + boolean opensslLoaded = false; // lz4 is linked within libhadoop boolean lz4Loaded = nativeHadoopLoaded; boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf); String hadoopLibraryName = ""; String zlibLibraryName = ""; String snappyLibraryName = ""; + String opensslLibraryName = ""; String lz4LibraryName = ""; String bzip2LibraryName = ""; if (nativeHadoopLoaded) { @@ -76,6 +79,11 @@ public static void main(String[] args) { if (snappyLoaded && NativeCodeLoader.buildSupportsSnappy()) { snappyLibraryName = SnappyCodec.getLibraryName(); } + opensslLoaded = NativeCodeLoader.buildSupportsOpenssl() && + OpensslCipher.isNativeCodeLoaded(); + if (opensslLoaded) { + opensslLibraryName = OpensslCipher.getLibraryName(); + } if (lz4Loaded) { lz4LibraryName = Lz4Codec.getLibraryName(); } @@ -84,11 +92,12 @@ public static void main(String[] args) { } } System.out.println("Native library checking:"); - System.out.printf("hadoop: %b %s\n", nativeHadoopLoaded, hadoopLibraryName); - System.out.printf("zlib: %b %s\n", zlibLoaded, zlibLibraryName); - System.out.printf("snappy: %b %s\n", snappyLoaded, snappyLibraryName); - System.out.printf("lz4: %b %s\n", lz4Loaded, lz4LibraryName); - System.out.printf("bzip2: %b %s\n", bzip2Loaded, bzip2LibraryName); + System.out.printf("hadoop: %b %s\n", nativeHadoopLoaded, hadoopLibraryName); + System.out.printf("zlib: %b %s\n", zlibLoaded, zlibLibraryName); + System.out.printf("snappy: %b %s\n", snappyLoaded, snappyLibraryName); + System.out.printf("lz4: %b %s\n", lz4Loaded, lz4LibraryName); + System.out.printf("bzip2: %b %s\n", bzip2Loaded, bzip2LibraryName); + System.out.printf("openssl: %b %s\n", opensslLoaded, opensslLibraryName); if ((!nativeHadoopLoaded) || (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) { // return 1 to indicated check failed diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c index d03050c591b..36251123113 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c @@ -39,6 +39,16 @@ JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSup #endif } +JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSupportsOpenssl + (JNIEnv *env, jclass clazz) +{ +#ifdef HADOOP_OPENSSL_LIBRARY + return JNI_TRUE; +#else + return JNI_FALSE; +#endif +} + JNIEXPORT jstring JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName (JNIEnv *env, jclass clazz) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java index f4a34a18549..d95052815cf 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java @@ -17,38 +17,165 @@ */ package org.apache.hadoop.crypto; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.junit.AfterClass; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.RandomDatum; +import org.apache.hadoop.util.NativeCodeLoader; +import org.apache.hadoop.util.ReflectionUtils; import org.junit.Assert; -import org.junit.BeforeClass; import org.junit.Test; public class TestCryptoCodec { - private static CryptoCodec codec; + private static final Log LOG= LogFactory.getLog(TestCryptoCodec.class); + private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16}; + private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}; + private static final int bufferSize = 4096; - @BeforeClass - public static void init() throws Exception { - Configuration conf = new Configuration(); - codec = CryptoCodec.getInstance(conf); - } - - @AfterClass - public static void shutdown() throws Exception { - } + private Configuration conf = new Configuration(); + private int count = 10000; + private int seed = new Random().nextInt(); @Test(timeout=120000) - public void testSecureRandom() throws Exception { - // len = 16 - checkSecureRandom(16); - - // len = 32 - checkSecureRandom(32); - - // len = 128 - checkSecureRandom(128); + public void testJceAesCtrCryptoCodec() throws Exception { + cryptoCodecTest(conf, seed, 0, + "org.apache.hadoop.crypto.JceAesCtrCryptoCodec"); + cryptoCodecTest(conf, seed, count, + "org.apache.hadoop.crypto.JceAesCtrCryptoCodec"); } - private void checkSecureRandom(int len) { + @Test(timeout=1200000) + public void testOpensslAesCtrCryptoCodec() throws Exception { + if (NativeCodeLoader.buildSupportsOpenssl()) { + Assert.assertTrue(OpensslCipher.isNativeCodeLoaded()); + } + if (OpensslCipher.isNativeCodeLoaded()) { + cryptoCodecTest(conf, seed, 0, + "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); + cryptoCodecTest(conf, seed, count, + "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); + } + } + + private void cryptoCodecTest(Configuration conf, int seed, int count, + String codecClass) throws IOException, GeneralSecurityException { + CryptoCodec codec = null; + try { + codec = (CryptoCodec)ReflectionUtils.newInstance( + conf.getClassByName(codecClass), conf); + } catch (ClassNotFoundException cnfe) { + throw new IOException("Illegal crypto codec!"); + } + LOG.info("Created a Codec object of type: " + codecClass); + + // Generate data + DataOutputBuffer data = new DataOutputBuffer(); + RandomDatum.Generator generator = new RandomDatum.Generator(seed); + for(int i = 0; i < count; ++i) { + generator.next(); + RandomDatum key = generator.getKey(); + RandomDatum value = generator.getValue(); + + key.write(data); + value.write(data); + } + LOG.info("Generated " + count + " records"); + + // Encrypt data + DataOutputBuffer encryptedDataBuffer = new DataOutputBuffer(); + CryptoOutputStream out = new CryptoOutputStream(encryptedDataBuffer, + codec, bufferSize, key, iv); + out.write(data.getData(), 0, data.getLength()); + out.flush(); + out.close(); + LOG.info("Finished encrypting data"); + + // Decrypt data + DataInputBuffer decryptedDataBuffer = new DataInputBuffer(); + decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0, + encryptedDataBuffer.getLength()); + CryptoInputStream in = new CryptoInputStream(decryptedDataBuffer, + codec, bufferSize, key, iv); + DataInputStream dataIn = new DataInputStream(new BufferedInputStream(in)); + + // Check + DataInputBuffer originalData = new DataInputBuffer(); + originalData.reset(data.getData(), 0, data.getLength()); + DataInputStream originalIn = new DataInputStream( + new BufferedInputStream(originalData)); + + for(int i=0; i < count; ++i) { + RandomDatum k1 = new RandomDatum(); + RandomDatum v1 = new RandomDatum(); + k1.readFields(originalIn); + v1.readFields(originalIn); + + RandomDatum k2 = new RandomDatum(); + RandomDatum v2 = new RandomDatum(); + k2.readFields(dataIn); + v2.readFields(dataIn); + assertTrue("original and encrypted-then-decrypted-output not equal", + k1.equals(k2) && v1.equals(v2)); + + // original and encrypted-then-decrypted-output have the same hashCode + Map m = new HashMap(); + m.put(k1, k1.toString()); + m.put(v1, v1.toString()); + String result = m.get(k2); + assertEquals("k1 and k2 hashcode not equal", result, k1.toString()); + result = m.get(v2); + assertEquals("v1 and v2 hashcode not equal", result, v1.toString()); + } + + // Decrypt data byte-at-a-time + originalData.reset(data.getData(), 0, data.getLength()); + decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0, + encryptedDataBuffer.getLength()); + in = new CryptoInputStream(decryptedDataBuffer, + codec, bufferSize, key, iv); + + // Check + originalIn = new DataInputStream(new BufferedInputStream(originalData)); + int expected; + do { + expected = originalIn.read(); + assertEquals("Decrypted stream read by byte does not match", + expected, in.read()); + } while (expected != -1); + + LOG.info("SUCCESS! Completed checking " + count + " records"); + + // Check secure random generator + testSecureRandom(codec); + } + + /** Test secure random generator */ + private void testSecureRandom(CryptoCodec codec) { + // len = 16 + checkSecureRandom(codec, 16); + // len = 32 + checkSecureRandom(codec, 32); + // len = 128 + checkSecureRandom(codec, 128); + } + + private void checkSecureRandom(CryptoCodec codec, int len) { byte[] rand = new byte[len]; byte[] rand1 = new byte[len]; codec.generateSecureRandom(rand); @@ -56,28 +183,6 @@ private void checkSecureRandom(int len) { Assert.assertEquals(len, rand.length); Assert.assertEquals(len, rand1.length); - Assert.assertFalse(bytesArrayEquals(rand, rand1)); - } - - private boolean bytesArrayEquals(byte[] expected, byte[] actual) { - if ((expected == null && actual != null) || - (expected != null && actual == null)) { - return false; - } - if (expected == null && actual == null) { - return true; - } - - if (expected.length != actual.length) { - return false; - } - - for (int i = 0; i < expected.length; i++) { - if (expected[i] != actual[i]) { - return false; - } - } - - return true; + Assert.assertFalse(Arrays.equals(rand, rand1)); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java index 9efaca95c89..473c17738eb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.crypto.OpensslCipher; import org.apache.hadoop.io.compress.Lz4Codec; import org.apache.hadoop.io.compress.SnappyCodec; import org.apache.hadoop.io.compress.zlib.ZlibFactory; @@ -54,6 +55,9 @@ public void testNativeCodeLoaded() { if (NativeCodeLoader.buildSupportsSnappy()) { assertFalse(SnappyCodec.getLibraryName().isEmpty()); } + if (NativeCodeLoader.buildSupportsOpenssl()) { + assertFalse(OpensslCipher.getLibraryName().isEmpty()); + } assertFalse(Lz4Codec.getLibraryName().isEmpty()); LOG.info("TestNativeCodeLoader: libhadoop.so is loaded."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index f6f68e433ce..7c057bb8bc2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -10,6 +10,9 @@ fs-encryption (Unreleased) IMPROVEMENTS + HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL + (hitliuyi via cmccabe) + HDFS-6387. HDFS CLI admin tool for creating & deleting an encryption zone. (clamb) diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index dcb3875a12c..e6dc75d3ca0 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -41,6 +41,8 @@ UNDEF false false + false + false @@ -349,6 +351,10 @@ cd "${snappy.lib}" $$TAR *snappy* | (cd $${TARGET_DIR}/; $$UNTAR) fi + if [ "${bundle.openssl}" = "true" ] ; then + cd "${openssl.lib}" + $$TAR *crypto* | (cd $${TARGET_DIR}/; $$UNTAR) + fi fi BIN_DIR="${BUILD_DIR}/bin" if [ -d $${BIN_DIR} ] ; then @@ -362,6 +368,12 @@ $$TAR *snappy* | (cd $${TARGET_BIN_DIR}/; $$UNTAR) fi fi + if [ "${bundle.openssl.in.bin}" = "true" ] ; then + if [ "${bundle.openssl}" = "true" ] ; then + cd "${openssl.lib}" + $$TAR *crypto* | (cd $${TARGET_BIN_DIR}/; $$UNTAR) + fi + fi fi diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 472febf87a2..ed20742c327 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1031,6 +1031,7 @@ file:/dev/urandom true + true @@ -1041,6 +1042,7 @@ ${env.PATH};${hadoop.common.build.dir}/bin;${snappy.lib} + ${env.PATH};${hadoop.common.build.dir}/bin;${openssl.lib} From 03c858dad46e440028c58a716630160338db4b14 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 3 Jul 2014 23:51:50 +0000 Subject: [PATCH 023/354] HADOOP-10693: add files left out of previous checkin (cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1607769 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/crypto/AesCtrCryptoCodec.java | 71 ++++ .../hadoop/crypto/JceAesCtrCryptoCodec.java | 159 ++++++++ .../crypto/OpensslAesCtrCryptoCodec.java | 130 ++++++ .../apache/hadoop/crypto/OpensslCipher.java | 213 ++++++++++ .../org/apache/hadoop/crypto/OpensslCipher.c | 382 ++++++++++++++++++ .../hadoop/crypto/org_apache_hadoop_crypto.h | 61 +++ ...toStreamsWithOpensslAesCtrCryptoCodec.java | 35 ++ .../hadoop/crypto/TestOpensslCipher.java | 120 ++++++ 8 files changed, 1171 insertions(+) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/org_apache_hadoop_crypto.h create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java new file mode 100644 index 00000000000..b469fddaf2a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import com.google.common.base.Preconditions; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public abstract class AesCtrCryptoCodec extends CryptoCodec { + + protected static final CipherSuite SUITE = CipherSuite.AES_CTR_NOPADDING; + + /** + * For AES, the algorithm block is fixed size of 128 bits. + * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard + */ + private static final int AES_BLOCK_SIZE = SUITE.getAlgorithmBlockSize(); + private static final int CTR_OFFSET = 8; + + @Override + public CipherSuite getCipherSuite() { + return SUITE; + } + + /** + * The IV is produced by adding the initial IV to the counter. IV length + * should be the same as {@link #AES_BLOCK_SIZE} + */ + @Override + public void calculateIV(byte[] initIV, long counter, byte[] IV) { + Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE); + Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE); + + System.arraycopy(initIV, 0, IV, 0, CTR_OFFSET); + long l = (initIV[CTR_OFFSET + 0] << 56) + + ((initIV[CTR_OFFSET + 1] & 0xFF) << 48) + + ((initIV[CTR_OFFSET + 2] & 0xFF) << 40) + + ((initIV[CTR_OFFSET + 3] & 0xFF) << 32) + + ((initIV[CTR_OFFSET + 4] & 0xFF) << 24) + + ((initIV[CTR_OFFSET + 5] & 0xFF) << 16) + + ((initIV[CTR_OFFSET + 6] & 0xFF) << 8) + + (initIV[CTR_OFFSET + 7] & 0xFF); + l += counter; + IV[CTR_OFFSET + 0] = (byte) (l >>> 56); + IV[CTR_OFFSET + 1] = (byte) (l >>> 48); + IV[CTR_OFFSET + 2] = (byte) (l >>> 40); + IV[CTR_OFFSET + 3] = (byte) (l >>> 32); + IV[CTR_OFFSET + 4] = (byte) (l >>> 24); + IV[CTR_OFFSET + 5] = (byte) (l >>> 16); + IV[CTR_OFFSET + 6] = (byte) (l >>> 8); + IV[CTR_OFFSET + 7] = (byte) (l); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java new file mode 100644 index 00000000000..2482586720c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.GeneralSecurityException; +import java.security.SecureRandom; + +import javax.crypto.Cipher; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.SecretKeySpec; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; + +import com.google.common.base.Preconditions; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT; + +/** + * Implement the AES-CTR crypto codec using JCE provider. + */ +@InterfaceAudience.Private +public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec { + private Configuration conf; + private String provider; + private SecureRandom random; + + public JceAesCtrCryptoCodec() { + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + provider = conf.get(HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY); + final String secureRandomAlg = conf.get( + HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY, + HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT); + try { + random = (provider != null) ? + SecureRandom.getInstance(secureRandomAlg, provider) : + SecureRandom.getInstance(secureRandomAlg); + } catch (GeneralSecurityException e) { + throw new IllegalArgumentException(e); + } + } + + @Override + public Encryptor createEncryptor() throws GeneralSecurityException { + return new JceAesCtrCipher(Cipher.ENCRYPT_MODE, provider); + } + + @Override + public Decryptor createDecryptor() throws GeneralSecurityException { + return new JceAesCtrCipher(Cipher.DECRYPT_MODE, provider); + } + + @Override + public void generateSecureRandom(byte[] bytes) { + random.nextBytes(bytes); + } + + private static class JceAesCtrCipher implements Encryptor, Decryptor { + private final Cipher cipher; + private final int mode; + private boolean contextReset = false; + + public JceAesCtrCipher(int mode, String provider) + throws GeneralSecurityException { + this.mode = mode; + if (provider == null || provider.isEmpty()) { + cipher = Cipher.getInstance(SUITE.getName()); + } else { + cipher = Cipher.getInstance(SUITE.getName(), provider); + } + } + + @Override + public void init(byte[] key, byte[] iv) throws IOException { + Preconditions.checkNotNull(key); + Preconditions.checkNotNull(iv); + contextReset = false; + try { + cipher.init(mode, new SecretKeySpec(key, "AES"), + new IvParameterSpec(iv)); + } catch (Exception e) { + throw new IOException(e); + } + } + + /** + * AES-CTR will consume all of the input data. It requires enough space in + * the destination buffer to encrypt entire input buffer. + */ + @Override + public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + process(inBuffer, outBuffer); + } + + /** + * AES-CTR will consume all of the input data. It requires enough space in + * the destination buffer to decrypt entire input buffer. + */ + @Override + public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + process(inBuffer, outBuffer); + } + + private void process(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + try { + int inputSize = inBuffer.remaining(); + // Cipher#update will maintain crypto context. + int n = cipher.update(inBuffer, outBuffer); + if (n < inputSize) { + /** + * Typically code will not get here. Cipher#update will consume all + * input data and put result in outBuffer. + * Cipher#doFinal will reset the crypto context. + */ + contextReset = true; + cipher.doFinal(inBuffer, outBuffer); + } + } catch (Exception e) { + throw new IOException(e); + } + } + + @Override + public boolean isContextReset() { + return contextReset; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java new file mode 100644 index 00000000000..669271fa9ef --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java @@ -0,0 +1,130 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.GeneralSecurityException; +import java.security.SecureRandom; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; + +import com.google.common.base.Preconditions; + +/** + * Implement the AES-CTR crypto codec using JNI into OpenSSL. + */ +@InterfaceAudience.Private +public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec { + private Configuration conf; + private SecureRandom random = new SecureRandom(); + + public OpensslAesCtrCryptoCodec() { + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public Encryptor createEncryptor() throws GeneralSecurityException { + return new OpensslAesCtrCipher(OpensslCipher.ENCRYPT_MODE); + } + + @Override + public Decryptor createDecryptor() throws GeneralSecurityException { + return new OpensslAesCtrCipher(OpensslCipher.DECRYPT_MODE); + } + + @Override + public void generateSecureRandom(byte[] bytes) { + random.nextBytes(bytes); + } + + private static class OpensslAesCtrCipher implements Encryptor, Decryptor { + private final OpensslCipher cipher; + private final int mode; + private boolean contextReset = false; + + public OpensslAesCtrCipher(int mode) throws GeneralSecurityException { + this.mode = mode; + cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, + OpensslCipher.PADDING_NOPADDING); + } + + @Override + public void init(byte[] key, byte[] iv) throws IOException { + Preconditions.checkNotNull(key); + Preconditions.checkNotNull(iv); + contextReset = false; + cipher.init(mode, key, iv); + } + + /** + * AES-CTR will consume all of the input data. It requires enough space in + * the destination buffer to encrypt entire input buffer. + */ + @Override + public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + process(inBuffer, outBuffer); + } + + /** + * AES-CTR will consume all of the input data. It requires enough space in + * the destination buffer to decrypt entire input buffer. + */ + @Override + public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + process(inBuffer, outBuffer); + } + + private void process(ByteBuffer inBuffer, ByteBuffer outBuffer) + throws IOException { + try { + int inputSize = inBuffer.remaining(); + // OpensslCipher#update will maintain crypto context. + int n = cipher.update(inBuffer, outBuffer); + if (n < inputSize) { + /** + * Typically code will not get here. OpensslCipher#update will + * consume all input data and put result in outBuffer. + * OpensslCipher#doFinal will reset the crypto context. + */ + contextReset = true; + cipher.doFinal(outBuffer); + } + } catch (Exception e) { + throw new IOException(e); + } + } + + @Override + public boolean isContextReset() { + return contextReset; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java new file mode 100644 index 00000000000..c0a4e9bdb45 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java @@ -0,0 +1,213 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.nio.ByteBuffer; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.BadPaddingException; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.ShortBufferException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.NativeCodeLoader; + +import com.google.common.base.Preconditions; + +/** + * OpenSSL cipher using JNI. + * Currently only AES-CTR is supported. It's flexible to add + * other crypto algorithms/modes. + */ +@InterfaceAudience.Private +public final class OpensslCipher { + private static final Log LOG = + LogFactory.getLog(OpensslCipher.class.getName()); + public static final int ENCRYPT_MODE = 1; + public static final int DECRYPT_MODE = 0; + + /** Currently only support AES/CTR/NoPadding. */ + public static final int AES_CTR = 0; + public static final int PADDING_NOPADDING = 0; + + private long context = 0; + private final int algorithm; + private final int padding; + + private static boolean nativeCipherLoaded = false; + static { + if (NativeCodeLoader.isNativeCodeLoaded() && + NativeCodeLoader.buildSupportsOpenssl()) { + try { + initIDs(); + nativeCipherLoaded = true; + } catch (Throwable t) { + LOG.error("Failed to load OpenSSL Cipher.", t); + } + } + } + + public static boolean isNativeCodeLoaded() { + return nativeCipherLoaded; + } + + private OpensslCipher(long context, int algorithm, int padding) { + this.context = context; + this.algorithm = algorithm; + this.padding = padding; + } + + /** + * Return an OpensslCipher object that implements the specified + * algorithm. + * + * @param algorithm currently only supports {@link #AES_CTR} + * @param padding currently only supports {@link #PADDING_NOPADDING} + * @return OpensslCipher an OpensslCipher object + * @throws NoSuchAlgorithmException + * @throws NoSuchPaddingException + */ + public static final OpensslCipher getInstance(int algorithm, + int padding) throws NoSuchAlgorithmException, NoSuchPaddingException { + long context = initContext(algorithm, padding); + return new OpensslCipher(context, algorithm, padding); + } + + /** + * Initialize this cipher with a key and IV. + * + * @param mode {@link #ENCRYPT_MODE} or {@link #DECRYPT_MODE} + * @param key crypto key + * @param iv crypto iv + */ + public void init(int mode, byte[] key, byte[] iv) { + context = init(context, mode, algorithm, padding, key, iv); + } + + /** + * Continues a multiple-part encryption or decryption operation. The data + * is encrypted or decrypted, depending on how this cipher was initialized. + *

+ * + * All input.remaining() bytes starting at + * input.position() are processed. The result is stored in + * the output buffer. + *

+ * + * Upon return, the input buffer's position will be equal to its limit; + * its limit will not have changed. The output buffer's position will have + * advanced by n, when n is the value returned by this method; the output + * buffer's limit will not have changed. + *

+ * + * If output.remaining() bytes are insufficient to hold the + * result, a ShortBufferException is thrown. + * + * @param input the input ByteBuffer + * @param output the output ByteBuffer + * @return int number of bytes stored in output + * @throws ShortBufferException if there is insufficient space in the + * output buffer + */ + public int update(ByteBuffer input, ByteBuffer output) + throws ShortBufferException { + checkState(); + Preconditions.checkArgument(input.isDirect() && output.isDirect(), + "Direct buffers are required."); + int len = update(context, input, input.position(), input.remaining(), + output, output.position(), output.remaining()); + input.position(input.limit()); + output.position(output.position() + len); + return len; + } + + /** + * Finishes a multiple-part operation. The data is encrypted or decrypted, + * depending on how this cipher was initialized. + *

+ * + * The result is stored in the output buffer. Upon return, the output buffer's + * position will have advanced by n, where n is the value returned by this + * method; the output buffer's limit will not have changed. + *

+ * + * If output.remaining() bytes are insufficient to hold the result, + * a ShortBufferException is thrown. + *

+ * + * Upon finishing, this method resets this cipher object to the state it was + * in when previously initialized. That is, the object is available to encrypt + * or decrypt more data. + *

+ * + * If any exception is thrown, this cipher object need to be reset before it + * can be used again. + * + * @param output the output ByteBuffer + * @return int number of bytes stored in output + * @throws ShortBufferException + * @throws IllegalBlockSizeException + * @throws BadPaddingException + */ + public int doFinal(ByteBuffer output) throws ShortBufferException, + IllegalBlockSizeException, BadPaddingException { + checkState(); + Preconditions.checkArgument(output.isDirect(), "Direct buffer is required."); + int len = doFinal(context, output, output.position(), output.remaining()); + output.position(output.position() + len); + return len; + } + + /** Forcibly clean the context. */ + public void clean() { + if (context != 0) { + clean(context); + context = 0; + } + } + + /** Check whether context is initialized. */ + private void checkState() { + Preconditions.checkState(context != 0); + } + + @Override + protected void finalize() throws Throwable { + clean(); + } + + private native static void initIDs(); + + private native static long initContext(int alg, int padding); + + private native long init(long context, int mode, int alg, int padding, + byte[] key, byte[] iv); + + private native int update(long context, ByteBuffer input, int inputOffset, + int inputLength, ByteBuffer output, int outputOffset, int maxOutputLength); + + private native int doFinal(long context, ByteBuffer output, int offset, + int maxOutputLength); + + private native void clean(long context); + + public native static String getLibraryName(); +} diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c new file mode 100644 index 00000000000..5cb5bba9aee --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c @@ -0,0 +1,382 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_crypto.h" + +#include +#include +#include + +#include "org_apache_hadoop_crypto_OpensslCipher.h" + +#ifdef UNIX +static EVP_CIPHER_CTX * (*dlsym_EVP_CIPHER_CTX_new)(void); +static void (*dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *); +static int (*dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *); +static void (*dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *); +static int (*dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int); +static int (*dlsym_EVP_CipherInit_ex)(EVP_CIPHER_CTX *, const EVP_CIPHER *, \ + ENGINE *, const unsigned char *, const unsigned char *, int); +static int (*dlsym_EVP_CipherUpdate)(EVP_CIPHER_CTX *, unsigned char *, \ + int *, const unsigned char *, int); +static int (*dlsym_EVP_CipherFinal_ex)(EVP_CIPHER_CTX *, unsigned char *, int *); +static EVP_CIPHER * (*dlsym_EVP_aes_256_ctr)(void); +static EVP_CIPHER * (*dlsym_EVP_aes_128_ctr)(void); +static void *openssl; +#endif + +#ifdef WINDOWS +typedef EVP_CIPHER_CTX * (__cdecl *__dlsym_EVP_CIPHER_CTX_new)(void); +typedef void (__cdecl *__dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *); +typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *); +typedef void (__cdecl *__dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *); +typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int); +typedef int (__cdecl *__dlsym_EVP_CipherInit_ex)(EVP_CIPHER_CTX *, \ + const EVP_CIPHER *, ENGINE *, const unsigned char *, \ + const unsigned char *, int); +typedef int (__cdecl *__dlsym_EVP_CipherUpdate)(EVP_CIPHER_CTX *, \ + unsigned char *, int *, const unsigned char *, int); +typedef int (__cdecl *__dlsym_EVP_CipherFinal_ex)(EVP_CIPHER_CTX *, \ + unsigned char *, int *); +typedef EVP_CIPHER * (__cdecl *__dlsym_EVP_aes_256_ctr)(void); +typedef EVP_CIPHER * (__cdecl *__dlsym_EVP_aes_128_ctr)(void); +static __dlsym_EVP_CIPHER_CTX_new dlsym_EVP_CIPHER_CTX_new; +static __dlsym_EVP_CIPHER_CTX_free dlsym_EVP_CIPHER_CTX_free; +static __dlsym_EVP_CIPHER_CTX_cleanup dlsym_EVP_CIPHER_CTX_cleanup; +static __dlsym_EVP_CIPHER_CTX_init dlsym_EVP_CIPHER_CTX_init; +static __dlsym_EVP_CIPHER_CTX_set_padding dlsym_EVP_CIPHER_CTX_set_padding; +static __dlsym_EVP_CipherInit_ex dlsym_EVP_CipherInit_ex; +static __dlsym_EVP_CipherUpdate dlsym_EVP_CipherUpdate; +static __dlsym_EVP_CipherFinal_ex dlsym_EVP_CipherFinal_ex; +static __dlsym_EVP_aes_256_ctr dlsym_EVP_aes_256_ctr; +static __dlsym_EVP_aes_128_ctr dlsym_EVP_aes_128_ctr; +static HMODULE openssl; +#endif + +static void loadAesCtr(JNIEnv *env) +{ +#ifdef UNIX + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_aes_256_ctr, env, openssl, "EVP_aes_256_ctr"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_aes_128_ctr, env, openssl, "EVP_aes_128_ctr"); +#endif + +#ifdef WINDOWS + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_aes_256_ctr, dlsym_EVP_aes_256_ctr, \ + env, openssl, "EVP_aes_256_ctr"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_aes_128_ctr, dlsym_EVP_aes_128_ctr, \ + env, openssl, "EVP_aes_128_ctr"); +#endif +} + +JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs + (JNIEnv *env, jclass clazz) +{ + char msg[1000]; +#ifdef UNIX + openssl = dlopen(HADOOP_OPENSSL_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); +#endif + +#ifdef WINDOWS + openssl = LoadLibrary(HADOOP_OPENSSL_LIBRARY); +#endif + + if (!openssl) { + snprintf(msg, sizeof(msg), "Cannot load %s (%s)!", HADOOP_OPENSSL_LIBRARY, \ + dlerror()); + THROW(env, "java/lang/UnsatisfiedLinkError", msg); + return; + } + +#ifdef UNIX + dlerror(); // Clear any existing error + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_new, env, openssl, \ + "EVP_CIPHER_CTX_new"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_free, env, openssl, \ + "EVP_CIPHER_CTX_free"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_cleanup, env, openssl, \ + "EVP_CIPHER_CTX_cleanup"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_init, env, openssl, \ + "EVP_CIPHER_CTX_init"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_set_padding, env, openssl, \ + "EVP_CIPHER_CTX_set_padding"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherInit_ex, env, openssl, \ + "EVP_CipherInit_ex"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherUpdate, env, openssl, \ + "EVP_CipherUpdate"); + LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherFinal_ex, env, openssl, \ + "EVP_CipherFinal_ex"); +#endif + +#ifdef WINDOWS + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_new, dlsym_EVP_CIPHER_CTX_new, \ + env, openssl, "EVP_CIPHER_CTX_new"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_free, dlsym_EVP_CIPHER_CTX_free, \ + env, openssl, "EVP_CIPHER_CTX_free"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_cleanup, \ + dlsym_EVP_CIPHER_CTX_cleanup, env, + openssl, "EVP_CIPHER_CTX_cleanup"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_init, dlsym_EVP_CIPHER_CTX_init, \ + env, openssl, "EVP_CIPHER_CTX_init"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_set_padding, \ + dlsym_EVP_CIPHER_CTX_set_padding, env, \ + openssl, "EVP_CIPHER_CTX_set_padding"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherInit_ex, dlsym_EVP_CipherInit_ex, \ + env, openssl, "EVP_CipherInit_ex"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherUpdate, dlsym_EVP_CipherUpdate, \ + env, openssl, "EVP_CipherUpdate"); + LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherFinal_ex, dlsym_EVP_CipherFinal_ex, \ + env, openssl, "EVP_CipherFinal_ex"); +#endif + + loadAesCtr(env); + jthrowable jthr = (*env)->ExceptionOccurred(env); + if (jthr) { + (*env)->DeleteLocalRef(env, jthr); + THROW(env, "java/lang/UnsatisfiedLinkError", \ + "Cannot find AES-CTR support, is your version of Openssl new enough?"); + return; + } +} + +JNIEXPORT jlong JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initContext + (JNIEnv *env, jclass clazz, jint alg, jint padding) +{ + if (alg != AES_CTR) { + THROW(env, "java/security/NoSuchAlgorithmException", NULL); + return (jlong)0; + } + if (padding != NOPADDING) { + THROW(env, "javax/crypto/NoSuchPaddingException", NULL); + return (jlong)0; + } + + if (dlsym_EVP_aes_256_ctr == NULL || dlsym_EVP_aes_128_ctr == NULL) { + THROW(env, "java/security/NoSuchAlgorithmException", \ + "Doesn't support AES CTR."); + return (jlong)0; + } + + // Create and initialize a EVP_CIPHER_CTX + EVP_CIPHER_CTX *context = dlsym_EVP_CIPHER_CTX_new(); + if (!context) { + THROW(env, "java/lang/OutOfMemoryError", NULL); + return (jlong)0; + } + + return JLONG(context); +} + +// Only supports AES-CTR currently +static EVP_CIPHER * getEvpCipher(int alg, int keyLen) +{ + EVP_CIPHER *cipher = NULL; + if (alg == AES_CTR) { + if (keyLen == KEY_LENGTH_256) { + cipher = dlsym_EVP_aes_256_ctr(); + } else if (keyLen == KEY_LENGTH_128) { + cipher = dlsym_EVP_aes_128_ctr(); + } + } + return cipher; +} + +JNIEXPORT jlong JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_init + (JNIEnv *env, jobject object, jlong ctx, jint mode, jint alg, jint padding, + jbyteArray key, jbyteArray iv) +{ + int jKeyLen = (*env)->GetArrayLength(env, key); + int jIvLen = (*env)->GetArrayLength(env, iv); + if (jKeyLen != KEY_LENGTH_128 && jKeyLen != KEY_LENGTH_256) { + THROW(env, "java/lang/IllegalArgumentException", "Invalid key length."); + return (jlong)0; + } + if (jIvLen != IV_LENGTH) { + THROW(env, "java/lang/IllegalArgumentException", "Invalid iv length."); + return (jlong)0; + } + + EVP_CIPHER_CTX *context = CONTEXT(ctx); + if (context == 0) { + // Create and initialize a EVP_CIPHER_CTX + context = dlsym_EVP_CIPHER_CTX_new(); + if (!context) { + THROW(env, "java/lang/OutOfMemoryError", NULL); + return (jlong)0; + } + } + + jbyte *jKey = (*env)->GetByteArrayElements(env, key, NULL); + if (jKey == NULL) { + THROW(env, "java/lang/InternalError", "Cannot get bytes array for key."); + return (jlong)0; + } + jbyte *jIv = (*env)->GetByteArrayElements(env, iv, NULL); + if (jIv == NULL) { + (*env)->ReleaseByteArrayElements(env, key, jKey, 0); + THROW(env, "java/lang/InternalError", "Cannot get bytes array for iv."); + return (jlong)0; + } + + int rc = dlsym_EVP_CipherInit_ex(context, getEvpCipher(alg, jKeyLen), \ + NULL, (unsigned char *)jKey, (unsigned char *)jIv, mode == ENCRYPT_MODE); + (*env)->ReleaseByteArrayElements(env, key, jKey, 0); + (*env)->ReleaseByteArrayElements(env, iv, jIv, 0); + if (rc == 0) { + dlsym_EVP_CIPHER_CTX_cleanup(context); + THROW(env, "java/lang/InternalError", "Error in EVP_CipherInit_ex."); + return (jlong)0; + } + + if (padding == NOPADDING) { + dlsym_EVP_CIPHER_CTX_set_padding(context, 0); + } + + return JLONG(context); +} + +// https://www.openssl.org/docs/crypto/EVP_EncryptInit.html +static int check_update_max_output_len(EVP_CIPHER_CTX *context, int input_len, + int max_output_len) +{ + if (context->flags & EVP_CIPH_NO_PADDING) { + if (max_output_len >= input_len) { + return 1; + } + return 0; + } else { + int b = context->cipher->block_size; + if (context->encrypt) { + if (max_output_len >= input_len + b - 1) { + return 1; + } + } else { + if (max_output_len >= input_len + b) { + return 1; + } + } + + return 0; + } +} + +JNIEXPORT jint JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_update + (JNIEnv *env, jobject object, jlong ctx, jobject input, jint input_offset, + jint input_len, jobject output, jint output_offset, jint max_output_len) +{ + EVP_CIPHER_CTX *context = CONTEXT(ctx); + if (!check_update_max_output_len(context, input_len, max_output_len)) { + THROW(env, "javax/crypto/ShortBufferException", \ + "Output buffer is not sufficient."); + return 0; + } + unsigned char *input_bytes = (*env)->GetDirectBufferAddress(env, input); + unsigned char *output_bytes = (*env)->GetDirectBufferAddress(env, output); + if (input_bytes == NULL || output_bytes == NULL) { + THROW(env, "java/lang/InternalError", "Cannot get buffer address."); + return 0; + } + input_bytes = input_bytes + input_offset; + output_bytes = output_bytes + output_offset; + + int output_len = 0; + if (!dlsym_EVP_CipherUpdate(context, output_bytes, &output_len, \ + input_bytes, input_len)) { + dlsym_EVP_CIPHER_CTX_cleanup(context); + THROW(env, "java/lang/InternalError", "Error in EVP_CipherUpdate."); + return 0; + } + return output_len; +} + +// https://www.openssl.org/docs/crypto/EVP_EncryptInit.html +static int check_doFinal_max_output_len(EVP_CIPHER_CTX *context, + int max_output_len) +{ + if (context->flags & EVP_CIPH_NO_PADDING) { + return 1; + } else { + int b = context->cipher->block_size; + if (max_output_len >= b) { + return 1; + } + + return 0; + } +} + +JNIEXPORT jint JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_doFinal + (JNIEnv *env, jobject object, jlong ctx, jobject output, jint offset, + jint max_output_len) +{ + EVP_CIPHER_CTX *context = CONTEXT(ctx); + if (!check_doFinal_max_output_len(context, max_output_len)) { + THROW(env, "javax/crypto/ShortBufferException", \ + "Output buffer is not sufficient."); + return 0; + } + unsigned char *output_bytes = (*env)->GetDirectBufferAddress(env, output); + if (output_bytes == NULL) { + THROW(env, "java/lang/InternalError", "Cannot get buffer address."); + return 0; + } + output_bytes = output_bytes + offset; + + int output_len = 0; + if (!dlsym_EVP_CipherFinal_ex(context, output_bytes, &output_len)) { + dlsym_EVP_CIPHER_CTX_cleanup(context); + THROW(env, "java/lang/InternalError", "Error in EVP_CipherFinal_ex."); + return 0; + } + return output_len; +} + +JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_clean + (JNIEnv *env, jobject object, jlong ctx) +{ + EVP_CIPHER_CTX *context = CONTEXT(ctx); + if (context) { + dlsym_EVP_CIPHER_CTX_free(context); + } +} + +JNIEXPORT jstring JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_getLibraryName + (JNIEnv *env, jclass clazz) +{ +#ifdef UNIX + if (dlsym_EVP_CIPHER_CTX_init) { + Dl_info dl_info; + if(dladdr( + dlsym_EVP_CIPHER_CTX_init, + &dl_info)) { + return (*env)->NewStringUTF(env, dl_info.dli_fname); + } + } + + return (*env)->NewStringUTF(env, HADOOP_OPENSSL_LIBRARY); +#endif + +#ifdef WINDOWS + LPWSTR filename = NULL; + GetLibraryName(dlsym_EVP_CIPHER_CTX_init, &filename); + if (filename != NULL) { + return (*env)->NewString(env, filename, (jsize) wcslen(filename)); + } else { + return (*env)->NewStringUTF(env, "Unavailable"); + } +#endif +} diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/org_apache_hadoop_crypto.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/org_apache_hadoop_crypto.h new file mode 100644 index 00000000000..0afab021dac --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/org_apache_hadoop_crypto.h @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ORG_APACHE_HADOOP_CRYPTO_H +#define ORG_APACHE_HADOOP_CRYPTO_H + +#include "org_apache_hadoop.h" + +#ifdef UNIX +#include +#include "config.h" +#endif + +#ifdef WINDOWS +#include "winutils.h" +#endif + +#include +#include +#include + +/** + * A helper macro to convert the java 'context-handle' + * to a EVP_CIPHER_CTX pointer. + */ +#define CONTEXT(context) ((EVP_CIPHER_CTX*)((ptrdiff_t)(context))) + +/** + * A helper macro to convert the EVP_CIPHER_CTX pointer to the + * java 'context-handle'. + */ +#define JLONG(context) ((jlong)((ptrdiff_t)(context))) + +#define KEY_LENGTH_128 16 +#define KEY_LENGTH_256 32 +#define IV_LENGTH 16 + +#define ENCRYPT_MODE 1 +#define DECRYPT_MODE 0 + +/** Currently only support AES/CTR/NoPadding. */ +#define AES_CTR 0 +#define NOPADDING 0 +#define PKCSPADDING 1 + +#endif //ORG_APACHE_HADOOP_CRYPTO_H \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java new file mode 100644 index 00000000000..8150d57ee66 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import org.apache.hadoop.conf.Configuration; +import org.junit.BeforeClass; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY; + +public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec + extends TestCryptoStreams { + + @BeforeClass + public static void init() throws Exception { + Configuration conf = new Configuration(); + conf.set(HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, + OpensslAesCtrCryptoCodec.class.getName()); + codec = CryptoCodec.getInstance(conf); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java new file mode 100644 index 00000000000..739e53fdc8f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto; + +import java.nio.ByteBuffer; +import java.security.NoSuchAlgorithmException; + +import javax.crypto.NoSuchPaddingException; +import javax.crypto.ShortBufferException; + +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assert; +import org.junit.Test; + +public class TestOpensslCipher { + private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16}; + private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}; + + @Test(timeout=120000) + public void testGetInstance() throws Exception { + if (!OpensslCipher.isNativeCodeLoaded()) { + return; + } + OpensslCipher cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, + OpensslCipher.PADDING_NOPADDING); + Assert.assertTrue(cipher != null); + + try { + cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR + 100, + OpensslCipher.PADDING_NOPADDING); + Assert.fail("Should specify correct algorithm."); + } catch (NoSuchAlgorithmException e) { + // Expect NoSuchAlgorithmException + } + + try { + cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, + OpensslCipher.PADDING_NOPADDING + 100); + Assert.fail("Should specify correct padding."); + } catch (NoSuchPaddingException e) { + // Expect NoSuchPaddingException + } + } + + @Test(timeout=120000) + public void testUpdateArguments() throws Exception { + if (!OpensslCipher.isNativeCodeLoaded()) { + return; + } + OpensslCipher cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, + OpensslCipher.PADDING_NOPADDING); + Assert.assertTrue(cipher != null); + + cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv); + + // Require direct buffers + ByteBuffer input = ByteBuffer.allocate(1024); + ByteBuffer output = ByteBuffer.allocate(1024); + + try { + cipher.update(input, output); + Assert.fail("Input and output buffer should be direct buffer."); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains( + "Direct buffers are required", e); + } + + // Output buffer length should be sufficient to store output data + input = ByteBuffer.allocateDirect(1024); + output = ByteBuffer.allocateDirect(1000); + try { + cipher.update(input, output); + Assert.fail("Output buffer length should be sufficient " + + "to store output data"); + } catch (ShortBufferException e) { + GenericTestUtils.assertExceptionContains( + "Output buffer is not sufficient", e); + } + } + + @Test(timeout=120000) + public void testDoFinalArguments() throws Exception { + if (!OpensslCipher.isNativeCodeLoaded()) { + return; + } + OpensslCipher cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, + OpensslCipher.PADDING_NOPADDING); + Assert.assertTrue(cipher != null); + + cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv); + + // Require direct buffer + ByteBuffer output = ByteBuffer.allocate(1024); + + try { + cipher.doFinal(output); + Assert.fail("Output buffer should be direct buffer."); + } catch (IllegalArgumentException e) { + GenericTestUtils.assertExceptionContains( + "Direct buffer is required", e); + } + } +} From c38665282884122d3c82b6f68376cce036aee748 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Fri, 4 Jul 2014 00:24:28 +0000 Subject: [PATCH 024/354] HDFS-6516. List of Encryption Zones should be based on inodes (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1607770 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../hdfs/server/namenode/FSDirectory.java | 148 ++++++++++++++++-- .../hdfs/server/namenode/FSNamesystem.java | 45 +----- 3 files changed, 142 insertions(+), 53 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 7c057bb8bc2..e1bab08693e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -35,6 +35,8 @@ fs-encryption (Unreleased) HDFS-6625. Remove the Delete Encryption Zone function (clamb) + HDFS-6516. List of Encryption Zones should be based on inodes (clamb) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 391e6a733dc..53994f91af9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -27,8 +27,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.ListIterator; +import java.util.Map; import java.util.concurrent.locks.ReentrantReadWriteLock; import com.google.protobuf.InvalidProtocolBufferException; @@ -124,6 +126,9 @@ private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) { public final static String DOT_INODES_STRING = ".inodes"; public final static byte[] DOT_INODES = DFSUtil.string2Bytes(DOT_INODES_STRING); + private final XAttr KEYID_XATTR = + XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null); + INodeDirectory rootDir; FSImage fsImage; private final FSNamesystem namesystem; @@ -136,6 +141,35 @@ private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) { private long yieldCount = 0; // keep track of lock yield count. private final int inodeXAttrsLimit; //inode xattrs max limit + /* + * EncryptionZoneInt is the internal representation of an encryption + * zone. The external representation of an EZ is embodied in an + * EncryptionZone and contains the EZ's pathname. + */ + private class EncryptionZoneInt { + private final String keyId; + private final long inodeId; + + EncryptionZoneInt(String keyId, long inodeId) { + this.keyId = keyId; + this.inodeId = inodeId; + } + + String getKeyId() { + return keyId; + } + + long getINodeId() { + return inodeId; + } + + String getFullPathName() { + return getInode(inodeId).getFullPathName(); + } + } + + private final Map encryptionZones; + // lock to protect the directory and BlockMap private final ReentrantReadWriteLock dirLock; @@ -221,6 +255,7 @@ public int getWriteHoldCount() { + " times"); nameCache = new NameCache(threshold); namesystem = ns; + encryptionZones = new HashMap(); } private FSNamesystem getFSNamesystem() { @@ -545,7 +580,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) return false; } - checkEncryptionZoneMoveValidity(src, dst); + checkEncryptionZoneMoveValidity(srcIIP, dstIIP, src); // Ensure dst has quota to accommodate rename verifyFsLimitsForRename(srcIIP, dstIIP); verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); @@ -750,7 +785,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, throw new IOException(error); } - checkEncryptionZoneMoveValidity(src, dst); + checkEncryptionZoneMoveValidity(srcIIP, dstIIP, src); final INode dstInode = dstIIP.getLastINode(); List snapshottableDirs = new ArrayList(); @@ -974,14 +1009,36 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, throw new IOException("rename from " + src + " to " + dst + " failed."); } - private void checkEncryptionZoneMoveValidity(String src, String dst) + boolean isInAnEZ(INodesInPath iip) + throws UnresolvedLinkException, SnapshotAccessControlException { + readLock(); + try { + return (getEncryptionZoneForPath(iip) != null); + } finally { + readUnlock(); + } + } + + private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) { + Preconditions.checkNotNull(iip); + final INode[] inodes = iip.getINodes(); + for (int i = inodes.length -1; i >= 0; i--) { + final INode inode = inodes[i]; + if (inode != null) { + final EncryptionZoneInt ezi = encryptionZones.get(inode.getId()); + if (ezi != null) { + return ezi; + } + } + } + return null; + } + + private void checkEncryptionZoneMoveValidity(INodesInPath srcIIP, + INodesInPath dstIIP, String src) throws IOException { - final EncryptionZone srcEZ = - getFSNamesystem().getEncryptionZoneForPath(src); - final EncryptionZone dstEZ = - getFSNamesystem().getEncryptionZoneForPath(dst); - final boolean srcInEZ = srcEZ != null; - final boolean dstInEZ = dstEZ != null; + final boolean srcInEZ = (getEncryptionZoneForPath(srcIIP) != null); + final boolean dstInEZ = (getEncryptionZoneForPath(dstIIP) != null); if (srcInEZ) { if (!dstInEZ) { throw new IOException(src + " can't be moved from an encryption zone."); @@ -993,12 +1050,18 @@ private void checkEncryptionZoneMoveValidity(String src, String dst) } if (srcInEZ || dstInEZ) { - if (!srcEZ.getPath().equals(dstEZ.getPath())) { + final EncryptionZoneInt srcEZI = getEncryptionZoneForPath(srcIIP); + final EncryptionZoneInt dstEZI = getEncryptionZoneForPath(dstIIP); + Preconditions.checkArgument(srcEZI != null, "couldn't find src EZ?"); + Preconditions.checkArgument(dstEZI != null, "couldn't find dst EZ?"); + if (srcEZI != dstEZI) { + final String srcEZPath = srcEZI.getFullPathName(); + final String dstEZPath = dstEZI.getFullPathName(); final StringBuilder sb = new StringBuilder(src); sb.append(" can't be moved from encryption zone "); - sb.append(srcEZ.getPath()); + sb.append(srcEZPath); sb.append(" to encryption zone "); - sb.append(dstEZ.getPath()); + sb.append(dstEZPath); sb.append("."); throw new IOException(sb.toString()); } @@ -2167,6 +2230,18 @@ public INodeMap getINodeMap() { public final void addToInodeMap(INode inode) { if (inode instanceof INodeWithAdditionalFields) { inodeMap.put(inode); + final XAttrFeature xaf = inode.getXAttrFeature(); + if (xaf != null) { + final List xattrs = xaf.getXAttrs(); + for (XAttr xattr : xattrs) { + final String xaName = XAttrHelper.getPrefixName(xattr); + if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { + encryptionZones.put(inode.getId(), + new EncryptionZoneInt(new String(xattr.getValue()), + inode.getId())); + } + } + } } } @@ -2178,6 +2253,7 @@ public final void removeFromInodeMap(List inodes) { for (INode inode : inodes) { if (inode != null && inode instanceof INodeWithAdditionalFields) { inodeMap.remove(inode); + encryptionZones.remove(inode.getId()); } } } @@ -2682,6 +2758,8 @@ List filterINodeXAttrs(final List existingXAttrs, for (ListIterator it = toFilter.listIterator(); it.hasNext() ;) { XAttr filter = it.next(); + Preconditions.checkArgument(!KEYID_XATTR.equalsIgnoreValue(filter), + "The encryption zone xattr should never be deleted."); if (a.equalsIgnoreValue(filter)) { add = false; it.remove(); @@ -2705,17 +2783,42 @@ XAttr createEncryptionZone(String src, String keyId) throw new IOException( "Attempt to create an encryption zone for a non-empty directory."); } + + final INodesInPath srcIIP = getINodesInPath4Write(src, false); + final EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP); + if (ezi != null) { + throw new IOException("Directory " + src + + " is already in an encryption zone. (" + ezi.getFullPathName() + ")"); + } + final XAttr keyIdXAttr = XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); - List xattrs = Lists.newArrayListWithCapacity(1); + final List xattrs = Lists.newArrayListWithCapacity(1); xattrs.add(keyIdXAttr); - unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); + final INode inode = unprotectedSetXAttrs(src, xattrs, + EnumSet.of(XAttrSetFlag.CREATE)); + encryptionZones.put(inode.getId(), + new EncryptionZoneInt(keyId, inode.getId())); return keyIdXAttr; } finally { writeUnlock(); } } + List listEncryptionZones() throws IOException { + readLock(); + try { + final List ret = + Lists.newArrayListWithExpectedSize(encryptionZones.size()); + for (EncryptionZoneInt ezi : encryptionZones.values()) { + ret.add(new EncryptionZone(ezi.getFullPathName(), ezi.getKeyId())); + } + return ret; + } finally { + readUnlock(); + } + } + /** * Set the FileEncryptionInfo for an INode. */ @@ -2782,7 +2885,7 @@ void setXAttrs(final String src, final List xAttrs, } } - void unprotectedSetXAttrs(final String src, final List xAttrs, + INode unprotectedSetXAttrs(final String src, final List xAttrs, final EnumSet flag) throws QuotaExceededException, IOException { assert hasWriteLock(); @@ -2791,7 +2894,22 @@ void unprotectedSetXAttrs(final String src, final List xAttrs, int snapshotId = iip.getLatestSnapshotId(); List existingXAttrs = XAttrStorage.readINodeXAttrs(inode); List newXAttrs = setINodeXAttrs(existingXAttrs, xAttrs, flag); + + /* + * If we're adding the encryption zone xattr, then add src to the list + * of encryption zones. + */ + for (XAttr xattr : newXAttrs) { + final String xaName = XAttrHelper.getPrefixName(xattr); + if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { + final EncryptionZoneInt ez = + new EncryptionZoneInt(new String(xattr.getValue()), inode.getId()); + encryptionZones.put(inode.getId(), ez); + } + } + XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId); + return inode; } List setINodeXAttrs(final List existingXAttrs, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 5a7cb12304c..efa153a5edb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -270,7 +270,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; -import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; @@ -528,7 +527,6 @@ private void logAuditEvent(boolean succeeded, private KeyProvider provider = null; private KeyProvider.Options providerOptions = null; - private final Map encryptionZones; private final CryptoCodec codec; private volatile boolean imageLoaded = false; @@ -855,7 +853,6 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { auditLoggers.get(0) instanceof DefaultAuditLogger; this.retryCache = ignoreRetryCache ? null : initRetryCache(conf); this.nnConf = new NNConf(conf); - this.encryptionZones = new HashMap(); } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); @@ -2308,11 +2305,12 @@ private void verifyParentDir(String src) throws FileNotFoundException, * @return chosen CipherSuite, or null if file is not in an EncryptionZone * @throws IOException */ - private CipherSuite chooseCipherSuite(String src, List - cipherSuites) throws UnknownCipherSuiteException { - EncryptionZone zone = getEncryptionZoneForPath(src); + private CipherSuite chooseCipherSuite(INodesInPath srcIIP, List + cipherSuites) + throws UnknownCipherSuiteException, UnresolvedLinkException, + SnapshotAccessControlException { // Not in an EZ - if (zone == null) { + if (!dir.isInAnEZ(srcIIP)) { return null; } CipherSuite chosen = null; @@ -2469,7 +2467,7 @@ private void startFileInternal(FSPermissionChecker pc, String src, } FileEncryptionInfo feInfo = null; - CipherSuite suite = chooseCipherSuite(src, cipherSuites); + CipherSuite suite = chooseCipherSuite(iip, cipherSuites); if (suite != null) { Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN), "Chose an UNKNOWN CipherSuite!"); @@ -3651,11 +3649,6 @@ private boolean deleteInternal(String src, boolean recursive, FsAction.ALL, true, false); } - final EncryptionZone ez = getEncryptionZoneForPath(src); - if (ez != null) { - encryptionZones.remove(src); - } - long mtime = now(); // Unlink the target directory from directory tree long filesRemoved = dir.delete(src, collectedBlocks, removedINodes, @@ -8365,17 +8358,10 @@ private void createEncryptionZoneInt(final String srcArg, String keyId, checkNameNodeSafeMode("Cannot create encryption zone on " + src); src = FSDirectory.resolvePath(src, pathComponents, dir); - EncryptionZone ez = getEncryptionZoneForPath(src); - if (ez != null) { - throw new IOException("Directory " + src + - " is already in an encryption zone. (" + ez.getPath() + ")"); - } - final XAttr keyIdXAttr = dir.createEncryptionZone(src, keyId); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(keyIdXAttr); getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); - encryptionZones.put(src, new EncryptionZone(src, keyId)); resultingStat = getAuditFileInfo(src, false); } finally { writeUnlock(); @@ -8400,7 +8386,6 @@ private String createNewKey(String src) } List listEncryptionZones() throws IOException { - boolean success = false; checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); @@ -8408,8 +8393,7 @@ List listEncryptionZones() throws IOException { try { checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); - final List ret = - Lists.newArrayList(encryptionZones.values()); + final List ret = dir.listEncryptionZones(); success = true; return ret; } finally { @@ -8418,21 +8402,6 @@ List listEncryptionZones() throws IOException { } } - /** Lookup the encryption zone of a path. */ - EncryptionZone getEncryptionZoneForPath(String src) { - assert hasReadLock(); - final String[] components = INode.getPathNames(src); - for (int i = components.length; i > 0; i--) { - final List l = Arrays.asList(Arrays.copyOfRange(components, 0, i)); - String p = Joiner.on(Path.SEPARATOR).join(l); - final EncryptionZone ret = encryptionZones.get(p); - if (ret != null) { - return ret; - } - } - return null; - } - /** * Set xattr for a file or directory. * From 83702b070709a2d9ccdc233f96d5e4cd442dfe9b Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Mon, 7 Jul 2014 09:51:49 +0000 Subject: [PATCH 025/354] HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1608389 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 ++ .../hdfs/server/namenode/FSDirectory.java | 19 ++++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index e1bab08693e..75eace4d865 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -37,6 +37,8 @@ fs-encryption (Unreleased) HDFS-6516. List of Encryption Zones should be based on inodes (clamb) + HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 53994f91af9..d8bf08d6120 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2230,15 +2230,16 @@ public INodeMap getINodeMap() { public final void addToInodeMap(INode inode) { if (inode instanceof INodeWithAdditionalFields) { inodeMap.put(inode); - final XAttrFeature xaf = inode.getXAttrFeature(); - if (xaf != null) { - final List xattrs = xaf.getXAttrs(); - for (XAttr xattr : xattrs) { - final String xaName = XAttrHelper.getPrefixName(xattr); - if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { - encryptionZones.put(inode.getId(), - new EncryptionZoneInt(new String(xattr.getValue()), - inode.getId())); + if (!inode.isSymlink()) { + final XAttrFeature xaf = inode.getXAttrFeature(); + if (xaf != null) { + final List xattrs = xaf.getXAttrs(); + for (XAttr xattr : xattrs) { + final String xaName = XAttrHelper.getPrefixName(xattr); + if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { + encryptionZones.put(inode.getId(), new EncryptionZoneInt( + new String(xattr.getValue()), inode.getId())); + } } } } From ceed0f6c46736323bd1b077125ff4e217c649208 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 8 Jul 2014 04:34:04 +0000 Subject: [PATCH 026/354] HDFS-6635. Refactor encryption zone functionality into new EncryptionZoneManager class. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1608657 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 + .../namenode/EncryptionZoneManager.java | 180 ++++++++++++++++++ .../hdfs/server/namenode/FSDirectory.java | 132 ++----------- 3 files changed, 197 insertions(+), 118 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 75eace4d865..e6db71fde45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -39,6 +39,9 @@ fs-encryption (Unreleased) HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao) + HDFS-6635. Refactor encryption zone functionality into new + EncryptionZoneManager class. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java new file mode 100644 index 00000000000..a43273dfe34 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -0,0 +1,180 @@ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.hdfs.XAttrHelper; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; + + +import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants + .CRYPTO_XATTR_ENCRYPTION_ZONE; + +/** + * Manages the list of encryption zones in the filesystem. Relies on the + * FSDirectory lock for synchronization. + */ +public class EncryptionZoneManager { + + /** + * EncryptionZoneInt is the internal representation of an encryption zone. The + * external representation of an EZ is embodied in an EncryptionZone and + * contains the EZ's pathname. + */ + private class EncryptionZoneInt { + private final String keyId; + private final long inodeId; + + EncryptionZoneInt(long inodeId, String keyId) { + this.keyId = keyId; + this.inodeId = inodeId; + } + + String getKeyId() { + return keyId; + } + + long getINodeId() { + return inodeId; + } + + String getFullPathName() { + return dir.getInode(inodeId).getFullPathName(); + } + } + + private final Map encryptionZones; + + private final FSDirectory dir; + + /** + * Construct a new EncryptionZoneManager. + * + * @param dir Enclosing FSDirectory + */ + public EncryptionZoneManager(FSDirectory dir) { + this.dir = dir; + encryptionZones = new HashMap(); + } + + /** + * Add a new encryption zone. + * + * @param inodeId of the encryption zone + * @param keyId encryption zone key id + */ + void addEncryptionZone(Long inodeId, String keyId) { + final EncryptionZoneInt ez = new EncryptionZoneInt(inodeId, keyId); + encryptionZones.put(inodeId, ez); + } + + void removeEncryptionZone(Long inodeId) { + encryptionZones.remove(inodeId); + } + + /** + * Returns true if an IIP is within an encryption zone. + */ + boolean isInAnEZ(INodesInPath iip) + throws UnresolvedLinkException, SnapshotAccessControlException { + return (getEncryptionZoneForPath(iip) != null); + } + + private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) { + Preconditions.checkNotNull(iip); + final INode[] inodes = iip.getINodes(); + for (int i = inodes.length - 1; i >= 0; i--) { + final INode inode = inodes[i]; + if (inode != null) { + final EncryptionZoneInt ezi = encryptionZones.get(inode.getId()); + if (ezi != null) { + return ezi; + } + } + } + return null; + } + + /** + * Throws an exception if the provided inode cannot be renamed into the + * destination because of differing encryption zones. + * + * @param srcIIP source IIP + * @param dstIIP destination IIP + * @param src source path, used for debugging + * @throws IOException if the src cannot be renamed to the dst + */ + void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src) + throws IOException { + final boolean srcInEZ = (getEncryptionZoneForPath(srcIIP) != null); + final boolean dstInEZ = (getEncryptionZoneForPath(dstIIP) != null); + if (srcInEZ) { + if (!dstInEZ) { + throw new IOException(src + " can't be moved from an encryption zone."); + } + } else { + if (dstInEZ) { + throw new IOException(src + " can't be moved into an encryption zone."); + } + } + + if (srcInEZ || dstInEZ) { + final EncryptionZoneInt srcEZI = getEncryptionZoneForPath(srcIIP); + final EncryptionZoneInt dstEZI = getEncryptionZoneForPath(dstIIP); + Preconditions.checkArgument(srcEZI != null, "couldn't find src EZ?"); + Preconditions.checkArgument(dstEZI != null, "couldn't find dst EZ?"); + if (srcEZI != dstEZI) { + final String srcEZPath = srcEZI.getFullPathName(); + final String dstEZPath = dstEZI.getFullPathName(); + final StringBuilder sb = new StringBuilder(src); + sb.append(" can't be moved from encryption zone "); + sb.append(srcEZPath); + sb.append(" to encryption zone "); + sb.append(dstEZPath); + sb.append("."); + throw new IOException(sb.toString()); + } + } + } + + XAttr createEncryptionZone(String src, String keyId) throws IOException { + if (dir.isNonEmptyDirectory(src)) { + throw new IOException( + "Attempt to create an encryption zone for a non-empty directory."); + } + + final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false); + final EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP); + if (ezi != null) { + throw new IOException("Directory " + src + + " is already in an encryption zone. (" + ezi.getFullPathName() + ")"); + } + + final XAttr keyIdXAttr = + XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); + final List xattrs = Lists.newArrayListWithCapacity(1); + xattrs.add(keyIdXAttr); + final INode inode = + dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); + addEncryptionZone(inode.getId(), keyId); + return keyIdXAttr; + } + + List listEncryptionZones() throws IOException { + final List ret = + Lists.newArrayListWithExpectedSize(encryptionZones.size()); + for (EncryptionZoneInt ezi : encryptionZones.values()) { + ret.add(new EncryptionZone(ezi.getFullPathName(), ezi.getKeyId())); + } + return ret; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index ec015d94f13..81dcf9ef95b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -27,10 +27,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; import java.util.ListIterator; -import java.util.Map; import java.util.concurrent.locks.ReentrantReadWriteLock; import com.google.protobuf.InvalidProtocolBufferException; @@ -139,35 +137,6 @@ private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) { private long yieldCount = 0; // keep track of lock yield count. private final int inodeXAttrsLimit; //inode xattrs max limit - /* - * EncryptionZoneInt is the internal representation of an encryption - * zone. The external representation of an EZ is embodied in an - * EncryptionZone and contains the EZ's pathname. - */ - private class EncryptionZoneInt { - private final String keyId; - private final long inodeId; - - EncryptionZoneInt(String keyId, long inodeId) { - this.keyId = keyId; - this.inodeId = inodeId; - } - - String getKeyId() { - return keyId; - } - - long getINodeId() { - return inodeId; - } - - String getFullPathName() { - return getInode(inodeId).getFullPathName(); - } - } - - private final Map encryptionZones; - // lock to protect the directory and BlockMap private final ReentrantReadWriteLock dirLock; @@ -204,6 +173,8 @@ public int getWriteHoldCount() { return this.dirLock.getWriteHoldCount(); } + final EncryptionZoneManager ezManager; + /** * Caches frequently used file names used in {@link INode} to reuse * byte[] objects and reduce heap usage. @@ -252,7 +223,8 @@ public int getWriteHoldCount() { + " times"); nameCache = new NameCache(threshold); namesystem = ns; - encryptionZones = new HashMap(); + + ezManager = new EncryptionZoneManager(this); } private FSNamesystem getFSNamesystem() { @@ -550,7 +522,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) return false; } - checkEncryptionZoneMoveValidity(srcIIP, dstIIP, src); + ezManager.checkMoveValidity(srcIIP, dstIIP, src); // Ensure dst has quota to accommodate rename verifyFsLimitsForRename(srcIIP, dstIIP); verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); @@ -629,7 +601,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, throw new IOException(error); } - checkEncryptionZoneMoveValidity(srcIIP, dstIIP, src); + ezManager.checkMoveValidity(srcIIP, dstIIP, src); final INode dstInode = dstIIP.getLastINode(); List snapshottableDirs = new ArrayList(); @@ -937,61 +909,12 @@ boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, SnapshotAccessControlException { readLock(); try { - return (getEncryptionZoneForPath(iip) != null); + return ezManager.isInAnEZ(iip); } finally { readUnlock(); } } - private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) { - Preconditions.checkNotNull(iip); - final INode[] inodes = iip.getINodes(); - for (int i = inodes.length -1; i >= 0; i--) { - final INode inode = inodes[i]; - if (inode != null) { - final EncryptionZoneInt ezi = encryptionZones.get(inode.getId()); - if (ezi != null) { - return ezi; - } - } - } - return null; - } - - private void checkEncryptionZoneMoveValidity(INodesInPath srcIIP, - INodesInPath dstIIP, String src) - throws IOException { - final boolean srcInEZ = (getEncryptionZoneForPath(srcIIP) != null); - final boolean dstInEZ = (getEncryptionZoneForPath(dstIIP) != null); - if (srcInEZ) { - if (!dstInEZ) { - throw new IOException(src + " can't be moved from an encryption zone."); - } - } else { - if (dstInEZ) { - throw new IOException(src + " can't be moved into an encryption zone."); - } - } - - if (srcInEZ || dstInEZ) { - final EncryptionZoneInt srcEZI = getEncryptionZoneForPath(srcIIP); - final EncryptionZoneInt dstEZI = getEncryptionZoneForPath(dstIIP); - Preconditions.checkArgument(srcEZI != null, "couldn't find src EZ?"); - Preconditions.checkArgument(dstEZI != null, "couldn't find dst EZ?"); - if (srcEZI != dstEZI) { - final String srcEZPath = srcEZI.getFullPathName(); - final String dstEZPath = dstEZI.getFullPathName(); - final StringBuilder sb = new StringBuilder(src); - sb.append(" can't be moved from encryption zone "); - sb.append(srcEZPath); - sb.append(" to encryption zone "); - sb.append(dstEZPath); - sb.append("."); - throw new IOException(sb.toString()); - } - } - } - /** * Set file replication * @@ -2157,8 +2080,8 @@ public final void addToInodeMap(INode inode) { for (XAttr xattr : xattrs) { final String xaName = XAttrHelper.getPrefixName(xattr); if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { - encryptionZones.put(inode.getId(), new EncryptionZoneInt( - new String(xattr.getValue()), inode.getId())); + ezManager.addEncryptionZone(inode.getId(), + new String(xattr.getValue())); } } } @@ -2174,7 +2097,7 @@ public final void removeFromInodeMap(List inodes) { for (INode inode : inodes) { if (inode != null && inode instanceof INodeWithAdditionalFields) { inodeMap.remove(inode); - encryptionZones.remove(inode.getId()); + ezManager.removeEncryptionZone(inode.getId()); } } } @@ -2700,27 +2623,7 @@ XAttr createEncryptionZone(String src, String keyId) throws IOException { writeLock(); try { - if (isNonEmptyDirectory(src)) { - throw new IOException( - "Attempt to create an encryption zone for a non-empty directory."); - } - - final INodesInPath srcIIP = getINodesInPath4Write(src, false); - final EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP); - if (ezi != null) { - throw new IOException("Directory " + src + - " is already in an encryption zone. (" + ezi.getFullPathName() + ")"); - } - - final XAttr keyIdXAttr = - XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); - final List xattrs = Lists.newArrayListWithCapacity(1); - xattrs.add(keyIdXAttr); - final INode inode = unprotectedSetXAttrs(src, xattrs, - EnumSet.of(XAttrSetFlag.CREATE)); - encryptionZones.put(inode.getId(), - new EncryptionZoneInt(keyId, inode.getId())); - return keyIdXAttr; + return ezManager.createEncryptionZone(src, keyId); } finally { writeUnlock(); } @@ -2729,12 +2632,7 @@ XAttr createEncryptionZone(String src, String keyId) List listEncryptionZones() throws IOException { readLock(); try { - final List ret = - Lists.newArrayListWithExpectedSize(encryptionZones.size()); - for (EncryptionZoneInt ezi : encryptionZones.values()) { - ret.add(new EncryptionZone(ezi.getFullPathName(), ezi.getKeyId())); - } - return ret; + return ezManager.listEncryptionZones(); } finally { readUnlock(); } @@ -2823,9 +2721,7 @@ INode unprotectedSetXAttrs(final String src, final List xAttrs, for (XAttr xattr : newXAttrs) { final String xaName = XAttrHelper.getPrefixName(xattr); if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { - final EncryptionZoneInt ez = - new EncryptionZoneInt(new String(xattr.getValue()), inode.getId()); - encryptionZones.put(inode.getId(), ez); + ezManager.addEncryptionZone(inode.getId(), new String(xattr.getValue())); } } @@ -3084,7 +2980,7 @@ private INode getINode4Write(String src, boolean resolveLink) * @throws UnresolvedLinkException if symlink can't be resolved * @throws SnapshotAccessControlException if path is in RO snapshot */ - private INodesInPath getINodesInPath4Write(String src, boolean resolveLink) + INodesInPath getINodesInPath4Write(String src, boolean resolveLink) throws UnresolvedLinkException, SnapshotAccessControlException { final byte[][] components = INode.getPathComponents(src); INodesInPath inodesInPath = INodesInPath.resolve(rootDir, components, From 7aa07912dcf48e363555f6dbb0727cf466259334 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 8 Jul 2014 04:34:52 +0000 Subject: [PATCH 027/354] Move something down in CHANGES-fs-encryption.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1608658 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index e6db71fde45..dfc8055c6f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -6,8 +6,6 @@ fs-encryption (Unreleased) NEW FEATURES - HDFS-6388. HDFS integration with KeyProvider. (clamb) - IMPROVEMENTS HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL @@ -18,6 +16,8 @@ fs-encryption (Unreleased) HDFS-6386. HDFS Encryption Zones (clamb) + HDFS-6388. HDFS integration with KeyProvider. (clamb) + HDFS-6473. Protocol and API for Encryption Zones (clamb) HDFS-6392. Wire crypto streams for encrypted files in From d90671137e005e05b3f41ee1f66387dee95b609a Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 10 Jul 2014 06:27:52 +0000 Subject: [PATCH 028/354] HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name format. (yliu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1609403 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 6 ++ .../crypto/OpensslAesCtrCryptoCodec.java | 3 +- .../apache/hadoop/crypto/OpensslCipher.java | 101 +++++++++++++++--- .../hadoop/crypto/TestOpensslCipher.java | 15 +-- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 - 5 files changed, 97 insertions(+), 31 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 8983f8e73cb..2ea4420bce2 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -28,6 +28,12 @@ fs-encryption (Unreleased) HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[]. (wang via yliu) + HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL. + (Yi Liu via cmccabe) + + HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name + format. (Yi Liu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java index 669271fa9ef..ee11f50683b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java @@ -70,8 +70,7 @@ private static class OpensslAesCtrCipher implements Encryptor, Decryptor { public OpensslAesCtrCipher(int mode) throws GeneralSecurityException { this.mode = mode; - cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, - OpensslCipher.PADDING_NOPADDING); + cipher = OpensslCipher.getInstance(SUITE.getName()); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java index c0a4e9bdb45..652a8b4c324 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java @@ -19,6 +19,7 @@ import java.nio.ByteBuffer; import java.security.NoSuchAlgorithmException; +import java.util.StringTokenizer; import javax.crypto.BadPaddingException; import javax.crypto.IllegalBlockSizeException; @@ -45,11 +46,34 @@ public final class OpensslCipher { public static final int DECRYPT_MODE = 0; /** Currently only support AES/CTR/NoPadding. */ - public static final int AES_CTR = 0; - public static final int PADDING_NOPADDING = 0; + private static enum AlgMode { + AES_CTR; + + static int get(String algorithm, String mode) + throws NoSuchAlgorithmException { + try { + return AlgMode.valueOf(algorithm + "_" + mode).ordinal(); + } catch (Exception e) { + throw new NoSuchAlgorithmException("Doesn't support algorithm: " + + algorithm + " and mode: " + mode); + } + } + } + + private static enum Padding { + NoPadding; + + static int get(String padding) throws NoSuchPaddingException { + try { + return Padding.valueOf(padding).ordinal(); + } catch (Exception e) { + throw new NoSuchPaddingException("Doesn't support padding: " + padding); + } + } + } private long context = 0; - private final int algorithm; + private final int alg; private final int padding; private static boolean nativeCipherLoaded = false; @@ -69,26 +93,71 @@ public static boolean isNativeCodeLoaded() { return nativeCipherLoaded; } - private OpensslCipher(long context, int algorithm, int padding) { + private OpensslCipher(long context, int alg, int padding) { this.context = context; - this.algorithm = algorithm; + this.alg = alg; this.padding = padding; } /** * Return an OpensslCipher object that implements the specified - * algorithm. + * transformation. * - * @param algorithm currently only supports {@link #AES_CTR} - * @param padding currently only supports {@link #PADDING_NOPADDING} - * @return OpensslCipher an OpensslCipher object - * @throws NoSuchAlgorithmException - * @throws NoSuchPaddingException + * @param transformation the name of the transformation, e.g., + * AES/CTR/NoPadding. + * @return OpensslCipher an OpensslCipher object + * @throws NoSuchAlgorithmException if transformation is null, + * empty, in an invalid format, or if Openssl doesn't implement the + * specified algorithm. + * @throws NoSuchPaddingException if transformation contains + * a padding scheme that is not available. */ - public static final OpensslCipher getInstance(int algorithm, - int padding) throws NoSuchAlgorithmException, NoSuchPaddingException { - long context = initContext(algorithm, padding); - return new OpensslCipher(context, algorithm, padding); + public static final OpensslCipher getInstance(String transformation) + throws NoSuchAlgorithmException, NoSuchPaddingException { + Transform transform = tokenizeTransformation(transformation); + int algMode = AlgMode.get(transform.alg, transform.mode); + int padding = Padding.get(transform.padding); + long context = initContext(algMode, padding); + return new OpensslCipher(context, algMode, padding); + } + + /** Nested class for algorithm, mode and padding. */ + private static class Transform { + final String alg; + final String mode; + final String padding; + + public Transform(String alg, String mode, String padding) { + this.alg = alg; + this.mode = mode; + this.padding = padding; + } + } + + private static Transform tokenizeTransformation(String transformation) + throws NoSuchAlgorithmException { + if (transformation == null) { + throw new NoSuchAlgorithmException("No transformation given."); + } + + /* + * Array containing the components of a Cipher transformation: + * + * index 0: algorithm (e.g., AES) + * index 1: mode (e.g., CTR) + * index 2: padding (e.g., NoPadding) + */ + String[] parts = new String[3]; + int count = 0; + StringTokenizer parser = new StringTokenizer(transformation, "/"); + while (parser.hasMoreTokens() && count < 3) { + parts[count++] = parser.nextToken().trim(); + } + if (count != 3 || parser.hasMoreTokens()) { + throw new NoSuchAlgorithmException("Invalid transformation format: " + + transformation); + } + return new Transform(parts[0], parts[1], parts[2]); } /** @@ -99,7 +168,7 @@ public static final OpensslCipher getInstance(int algorithm, * @param iv crypto iv */ public void init(int mode, byte[] key, byte[] iv) { - context = init(context, mode, algorithm, padding, key, iv); + context = init(context, mode, alg, padding, key, iv); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java index 739e53fdc8f..b3a894a164f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java @@ -38,21 +38,18 @@ public void testGetInstance() throws Exception { if (!OpensslCipher.isNativeCodeLoaded()) { return; } - OpensslCipher cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, - OpensslCipher.PADDING_NOPADDING); + OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); try { - cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR + 100, - OpensslCipher.PADDING_NOPADDING); + cipher = OpensslCipher.getInstance("AES2/CTR/NoPadding"); Assert.fail("Should specify correct algorithm."); } catch (NoSuchAlgorithmException e) { // Expect NoSuchAlgorithmException } try { - cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, - OpensslCipher.PADDING_NOPADDING + 100); + cipher = OpensslCipher.getInstance("AES/CTR/NoPadding2"); Assert.fail("Should specify correct padding."); } catch (NoSuchPaddingException e) { // Expect NoSuchPaddingException @@ -64,8 +61,7 @@ public void testUpdateArguments() throws Exception { if (!OpensslCipher.isNativeCodeLoaded()) { return; } - OpensslCipher cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, - OpensslCipher.PADDING_NOPADDING); + OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv); @@ -100,8 +96,7 @@ public void testDoFinalArguments() throws Exception { if (!OpensslCipher.isNativeCodeLoaded()) { return; } - OpensslCipher cipher = OpensslCipher.getInstance(OpensslCipher.AES_CTR, - OpensslCipher.PADDING_NOPADDING); + OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index dfc8055c6f1..4c2a60ef4b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -8,9 +8,6 @@ fs-encryption (Unreleased) IMPROVEMENTS - HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL - (hitliuyi via cmccabe) - HDFS-6387. HDFS CLI admin tool for creating & deleting an encryption zone. (clamb) From c30872a4ee694c8866fff1bc22b8a1afa88f3137 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Thu, 10 Jul 2014 22:11:01 +0000 Subject: [PATCH 029/354] Temporary removing files incorrect case in SVN that break OSX git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1609581 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java | 0 .../main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java | 0 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AESCTRCryptoCodec.java deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JCEAESCTRCryptoCodec.java deleted file mode 100644 index e69de29bb2d..00000000000 From 95986dd2fb4527c43fa4c088c61fb7b4bd794d23 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 11 Jul 2014 00:43:03 +0000 Subject: [PATCH 030/354] MAPREDUCE-5890. Support for encrypting Intermediate data and spills in local filesystem. (asuresh via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1609597 13f79535-47bb-0310-9956-ffa450edef68 --- .../CHANGES-fs-encryption.txt | 15 ++ .../org/apache/hadoop/mapred/BackupStore.java | 6 +- .../java/org/apache/hadoop/mapred/IFile.java | 21 +- .../org/apache/hadoop/mapred/MapTask.java | 29 +- .../java/org/apache/hadoop/mapred/Merger.java | 15 +- .../apache/hadoop/mapreduce/CryptoUtils.java | 199 ++++++++++++++ .../apache/hadoop/mapreduce/JobSubmitter.java | 9 +- .../apache/hadoop/mapreduce/MRJobConfig.java | 14 + .../hadoop/mapreduce/task/reduce/Fetcher.java | 12 +- .../mapreduce/task/reduce/LocalFetcher.java | 4 + .../task/reduce/MergeManagerImpl.java | 32 ++- .../task/reduce/OnDiskMapOutput.java | 3 +- .../mapreduce/task/reduce/TestMerger.java | 148 +++++++--- .../org/apache/hadoop/mapred/TestIFile.java | 11 +- .../TestMRIntermediateDataEncryption.java | 254 ++++++++++++++++++ .../apache/hadoop/mapred/TestReduceTask.java | 2 +- .../mapred/pipes/TestPipeApplication.java | 12 +- 17 files changed, 692 insertions(+), 94 deletions(-) create mode 100644 hadoop-mapreduce-project/CHANGES-fs-encryption.txt create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java diff --git a/hadoop-mapreduce-project/CHANGES-fs-encryption.txt b/hadoop-mapreduce-project/CHANGES-fs-encryption.txt new file mode 100644 index 00000000000..83fdc1ef804 --- /dev/null +++ b/hadoop-mapreduce-project/CHANGES-fs-encryption.txt @@ -0,0 +1,15 @@ +Hadoop MapReduce Change Log + +fs-encryption (Unreleased) + + INCOMPATIBLE CHANGES + + NEW FEATURES + + MAPREDUCE-5890. Support for encrypting Intermediate + data and spills in local filesystem. (asuresh via tucu) + + IMPROVEMENTS + + BUG FIXES + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java index cfcf0f2c6c7..be7fe181f90 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java @@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.Path; @@ -43,6 +44,7 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.CryptoUtils; /** * BackupStore is an utility class that is used to support @@ -572,7 +574,9 @@ private Writer createSpillFile() throws IOException { file = lDirAlloc.getLocalPathForWrite(tmp.toUri().getPath(), -1, conf); - return new Writer(conf, fs, file); + FSDataOutputStream out = fs.create(file); + out = CryptoUtils.wrapIfNecessary(conf, out); + return new Writer(conf, out, null, null, null, null, true); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java index a410c975578..30ebd6b8ca3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java @@ -90,13 +90,11 @@ public static class Writer { DataOutputBuffer buffer = new DataOutputBuffer(); - public Writer(Configuration conf, FileSystem fs, Path file, - Class keyClass, Class valueClass, - CompressionCodec codec, - Counters.Counter writesCounter) throws IOException { - this(conf, fs.create(file), keyClass, valueClass, codec, - writesCounter); - ownOutputStream = true; + public Writer(Configuration conf, FSDataOutputStream out, + Class keyClass, Class valueClass, + CompressionCodec codec, Counters.Counter writesCounter) + throws IOException { + this(conf, out, keyClass, valueClass, codec, writesCounter, false); } protected Writer(Counters.Counter writesCounter) { @@ -105,7 +103,8 @@ protected Writer(Counters.Counter writesCounter) { public Writer(Configuration conf, FSDataOutputStream out, Class keyClass, Class valueClass, - CompressionCodec codec, Counters.Counter writesCounter) + CompressionCodec codec, Counters.Counter writesCounter, + boolean ownOutputStream) throws IOException { this.writtenRecordsCounter = writesCounter; this.checksumOut = new IFileOutputStream(out); @@ -137,11 +136,7 @@ public Writer(Configuration conf, FSDataOutputStream out, this.valueSerializer = serializationFactory.getSerializer(valueClass); this.valueSerializer.open(buffer); } - } - - public Writer(Configuration conf, FileSystem fs, Path file) - throws IOException { - this(conf, fs, file, null, null, null, null); + this.ownOutputStream = ownOutputStream; } public void close() throws IOException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index 84fdd92cc5d..b533ebe8e47 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -66,6 +66,7 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter; import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex; import org.apache.hadoop.mapreduce.task.MapContextImpl; +import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.util.IndexedSortable; import org.apache.hadoop.util.IndexedSorter; import org.apache.hadoop.util.Progress; @@ -1580,7 +1581,8 @@ private void sortAndSpill() throws IOException, ClassNotFoundException, IFile.Writer writer = null; try { long segmentStart = out.getPos(); - writer = new Writer(job, out, keyClass, valClass, codec, + FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out); + writer = new Writer(job, partitionOut, keyClass, valClass, codec, spilledRecordsCounter); if (combinerRunner == null) { // spill directly @@ -1617,8 +1619,8 @@ private void sortAndSpill() throws IOException, ClassNotFoundException, // record offsets rec.startOffset = segmentStart; - rec.rawLength = writer.getRawLength(); - rec.partLength = writer.getCompressedLength(); + rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job); + rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job); spillRec.putIndex(rec, i); writer = null; @@ -1668,7 +1670,8 @@ private void spillSingleRecord(final K key, final V value, try { long segmentStart = out.getPos(); // Create a new codec, don't care! - writer = new IFile.Writer(job, out, keyClass, valClass, codec, + FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out); + writer = new IFile.Writer(job, partitionOut, keyClass, valClass, codec, spilledRecordsCounter); if (i == partition) { @@ -1682,8 +1685,8 @@ private void spillSingleRecord(final K key, final V value, // record offsets rec.startOffset = segmentStart; - rec.rawLength = writer.getRawLength(); - rec.partLength = writer.getCompressedLength(); + rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job); + rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job); spillRec.putIndex(rec, i); writer = null; @@ -1825,12 +1828,13 @@ private void mergeParts() throws IOException, InterruptedException, try { for (int i = 0; i < partitions; i++) { long segmentStart = finalOut.getPos(); + FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut); Writer writer = - new Writer(job, finalOut, keyClass, valClass, codec, null); + new Writer(job, finalPartitionOut, keyClass, valClass, codec, null); writer.close(); rec.startOffset = segmentStart; - rec.rawLength = writer.getRawLength(); - rec.partLength = writer.getCompressedLength(); + rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job); + rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job); sr.putIndex(rec, i); } sr.writeToFile(finalIndexFile, job); @@ -1879,8 +1883,9 @@ private void mergeParts() throws IOException, InterruptedException, //write merged output to disk long segmentStart = finalOut.getPos(); + FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut); Writer writer = - new Writer(job, finalOut, keyClass, valClass, codec, + new Writer(job, finalPartitionOut, keyClass, valClass, codec, spilledRecordsCounter); if (combinerRunner == null || numSpills < minSpillsForCombine) { Merger.writeFile(kvIter, writer, reporter, job); @@ -1896,8 +1901,8 @@ private void mergeParts() throws IOException, InterruptedException, // record offsets rec.startOffset = segmentStart; - rec.rawLength = writer.getRawLength(); - rec.partLength = writer.getCompressedLength(); + rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job); + rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job); spillRec.putIndex(rec, parts); } spillRec.writeToFile(finalIndexFile, job); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java index 9493871138d..92855169c82 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.ChecksumFileSystem; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.Path; @@ -40,6 +41,7 @@ import org.apache.hadoop.mapred.IFile.Writer; import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.util.PriorityQueue; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Progressable; @@ -298,8 +300,12 @@ public Segment(Reader reader, boolean preserve, void init(Counters.Counter readsCounter) throws IOException { if (reader == null) { FSDataInputStream in = fs.open(file); + in.seek(segmentOffset); - reader = new Reader(conf, in, segmentLength, codec, readsCounter); + in = CryptoUtils.wrapIfNecessary(conf, in); + reader = new Reader(conf, in, + segmentLength - CryptoUtils.cryptoPadding(conf), + codec, readsCounter); } if (mapOutputsCounter != null) { @@ -714,9 +720,10 @@ RawKeyValueIterator merge(Class keyClass, Class valueClass, tmpFilename.toString(), approxOutputSize, conf); - Writer writer = - new Writer(conf, fs, outputFile, keyClass, valueClass, codec, - writesCounter); + FSDataOutputStream out = fs.create(outputFile); + out = CryptoUtils.wrapIfNecessary(conf, out); + Writer writer = new Writer(conf, out, keyClass, valueClass, + codec, writesCounter, true); writeFile(this, writer, reporter, conf); writer.close(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java new file mode 100644 index 00000000000..7d8a4962c65 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoCodec; +import org.apache.hadoop.crypto.CryptoInputStream; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.crypto.CryptoFSDataInputStream; +import org.apache.hadoop.fs.crypto.CryptoFSDataOutputStream; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.security.TokenCache; +import org.apache.hadoop.security.UserGroupInformation; + +import com.google.common.io.LimitInputStream; + +/** + * This class provides utilities to make it easier to work with Cryptographic + * Streams. Specifically for dealing with encrypting intermediate data such + * MapReduce spill files. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class CryptoUtils { + + private static final Log LOG = LogFactory.getLog(CryptoUtils.class); + + public static boolean isShuffleEncrypted(Configuration conf) { + return conf.getBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, + MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA); + } + + /** + * This method creates and initializes an IV (Initialization Vector) + * + * @param conf + * @return byte[] + * @throws IOException + */ + public static byte[] createIV(Configuration conf) throws IOException { + CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf); + if (isShuffleEncrypted(conf)) { + byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()]; + cryptoCodec.generateSecureRandom(iv); + return iv; + } else { + return null; + } + } + + public static int cryptoPadding(Configuration conf) { + // Sizeof(IV) + long(start-offset) + return isShuffleEncrypted(conf) ? CryptoCodec.getInstance(conf) + .getCipherSuite().getAlgorithmBlockSize() + 8 : 0; + } + + private static byte[] getEncryptionKey() throws IOException { + return TokenCache.getShuffleSecretKey(UserGroupInformation.getCurrentUser() + .getCredentials()); + } + + private static int getBufferSize(Configuration conf) { + return conf.getInt(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB, + MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB) * 1024; + } + + /** + * Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the + * data buffer required for the stream is specified by the + * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration + * variable. + * + * @param conf + * @param out + * @return FSDataOutputStream + * @throws IOException + */ + public static FSDataOutputStream wrapIfNecessary(Configuration conf, + FSDataOutputStream out) throws IOException { + if (isShuffleEncrypted(conf)) { + out.write(ByteBuffer.allocate(8).putLong(out.getPos()).array()); + byte[] iv = createIV(conf); + out.write(iv); + if (LOG.isDebugEnabled()) { + LOG.debug("IV written to Stream [" + + Base64.encodeBase64URLSafeString(iv) + "]"); + } + return new CryptoFSDataOutputStream(out, CryptoCodec.getInstance(conf), + getBufferSize(conf), getEncryptionKey(), iv); + } else { + return out; + } + } + + /** + * Wraps a given InputStream with a CryptoInputStream. The size of the data + * buffer required for the stream is specified by the + * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration + * variable. + * + * If the value of 'length' is > -1, The InputStream is additionally wrapped + * in a LimitInputStream. CryptoStreams are late buffering in nature. This + * means they will always try to read ahead if they can. The LimitInputStream + * will ensure that the CryptoStream does not read past the provided length + * from the given Input Stream. + * + * @param conf + * @param in + * @param length + * @return InputStream + * @throws IOException + */ + public static InputStream wrapIfNecessary(Configuration conf, InputStream in, + long length) throws IOException { + if (isShuffleEncrypted(conf)) { + int bufferSize = getBufferSize(conf); + if (length > -1) { + in = new LimitInputStream(in, length); + } + byte[] offsetArray = new byte[8]; + IOUtils.readFully(in, offsetArray, 0, 8); + long offset = ByteBuffer.wrap(offsetArray).getLong(); + CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf); + byte[] iv = + new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()]; + IOUtils.readFully(in, iv, 0, + cryptoCodec.getCipherSuite().getAlgorithmBlockSize()); + if (LOG.isDebugEnabled()) { + LOG.debug("IV read from [" + + Base64.encodeBase64URLSafeString(iv) + "]"); + } + return new CryptoInputStream(in, cryptoCodec, bufferSize, + getEncryptionKey(), iv, offset + cryptoPadding(conf)); + } else { + return in; + } + } + + /** + * Wraps a given FSDataInputStream with a CryptoInputStream. The size of the + * data buffer required for the stream is specified by the + * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration + * variable. + * + * @param conf + * @param in + * @return FSDataInputStream + * @throws IOException + */ + public static FSDataInputStream wrapIfNecessary(Configuration conf, + FSDataInputStream in) throws IOException { + if (isShuffleEncrypted(conf)) { + CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf); + int bufferSize = getBufferSize(conf); + // Not going to be used... but still has to be read... + // Since the O/P stream always writes it.. + IOUtils.readFully(in, new byte[8], 0, 8); + byte[] iv = + new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()]; + IOUtils.readFully(in, iv, 0, + cryptoCodec.getCipherSuite().getAlgorithmBlockSize()); + if (LOG.isDebugEnabled()) { + LOG.debug("IV read from Stream [" + + Base64.encodeBase64URLSafeString(iv) + "]"); + } + return new CryptoFSDataInputStream(in, cryptoCodec, bufferSize, + getEncryptionKey(), iv); + } else { + return in; + } + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 94e71257498..0734e7f2953 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -291,7 +291,7 @@ private void copyJar(Path originalJarPath, Path submitJarFile, /** * configure the jobconf of the user with the command line options of * -libjars, -files, -archives. - * @param conf + * @param job * @throws IOException */ private void copyAndConfigureFiles(Job job, Path jobSubmitDir) @@ -376,8 +376,13 @@ JobStatus submitJobInternal(Job job, Cluster cluster) if (TokenCache.getShuffleSecretKey(job.getCredentials()) == null) { KeyGenerator keyGen; try { + + int keyLen = CryptoUtils.isShuffleEncrypted(conf) + ? conf.getInt(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS, + MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS) + : SHUFFLE_KEY_LENGTH; keyGen = KeyGenerator.getInstance(SHUFFLE_KEYGEN_ALGORITHM); - keyGen.init(SHUFFLE_KEY_LENGTH); + keyGen.init(keyLen); } catch (NoSuchAlgorithmException e) { throw new IOException("Error generating shuffle secret key", e); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java index 4795af78d2a..e4ded57a49c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java @@ -762,4 +762,18 @@ public interface MRJobConfig { public static final String TASK_PREEMPTION = "mapreduce.job.preemption"; + + public static final String MR_ENCRYPTED_INTERMEDIATE_DATA = + "mapreduce.job.encrypted-intermediate-data"; + public static final boolean DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA = false; + + public static final String MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS = + "mapreduce.job.encrypted-intermediate-data-key-size-bits"; + public static final int DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS = + 128; + + public static final String MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB = + "mapreduce.job.encrypted-intermediate-data.buffer.kb"; + public static final int DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB = + 128; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java index 00d4764e665..20db9dc7e5e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java @@ -19,6 +19,7 @@ import java.io.DataInputStream; import java.io.IOException; +import java.io.InputStream; import java.net.ConnectException; import java.net.HttpURLConnection; import java.net.MalformedURLException; @@ -43,6 +44,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.security.SecureShuffleUtils; +import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.security.ssl.SSLFactory; import com.google.common.annotations.VisibleForTesting; @@ -65,6 +67,7 @@ private static enum ShuffleErrors{IO_ERROR, WRONG_LENGTH, BAD_ID, WRONG_MAP, CONNECTION, WRONG_REDUCE} private final static String SHUFFLE_ERR_GRP_NAME = "Shuffle Errors"; + private final JobConf jobConf; private final Counters.Counter connectionErrs; private final Counters.Counter ioErrs; private final Counters.Counter wrongLengthErrs; @@ -104,6 +107,7 @@ public Fetcher(JobConf job, TaskAttemptID reduceId, Reporter reporter, ShuffleClientMetrics metrics, ExceptionReporter exceptionReporter, SecretKey shuffleKey, int id) { + this.jobConf = job; this.reporter = reporter; this.scheduler = scheduler; this.merger = merger; @@ -394,7 +398,11 @@ private TaskAttemptID[] copyMapOutput(MapHost host, return remaining.toArray(new TaskAttemptID[remaining.size()]); } - + InputStream is = input; + is = CryptoUtils.wrapIfNecessary(jobConf, is, compressedLength); + compressedLength -= CryptoUtils.cryptoPadding(jobConf); + decompressedLength -= CryptoUtils.cryptoPadding(jobConf); + // Do some basic sanity verification if (!verifySanity(compressedLength, decompressedLength, forReduce, remaining, mapId)) { @@ -431,7 +439,7 @@ private TaskAttemptID[] copyMapOutput(MapHost host, LOG.info("fetcher#" + id + " about to shuffle output of map " + mapOutput.getMapId() + " decomp: " + decompressedLength + " len: " + compressedLength + " to " + mapOutput.getDescription()); - mapOutput.shuffle(host, input, compressedLength, decompressedLength, + mapOutput.shuffle(host, is, compressedLength, decompressedLength, metrics, reporter); } catch (java.lang.InternalError e) { LOG.warn("Failed to shuffle for fetcher#"+id, e); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java index 52796524da5..98256c2d65b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java @@ -36,6 +36,7 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.SpillRecord; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.CryptoUtils; /** * LocalFetcher is used by LocalJobRunner to perform a local filesystem @@ -145,6 +146,9 @@ private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException { // now read the file, seek to the appropriate section, and send it. FileSystem localFs = FileSystem.getLocal(job).getRaw(); FSDataInputStream inStream = localFs.open(mapOutputFileName); + + inStream = CryptoUtils.wrapIfNecessary(job, inStream); + try { inStream.seek(ir.startOffset); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java index a821e4d1b8a..1fa1da0f704 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ChecksumFileSystem; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.LocalFileSystem; @@ -54,6 +55,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; +import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.ReflectionUtils; @@ -227,6 +229,10 @@ protected MergeThread, K,V> createInMemoryMerger() { return new InMemoryMerger(this); } + protected MergeThread createOnDiskMerger() { + return new OnDiskMerger(this); + } + TaskAttemptID getReduceId() { return reduceId; } @@ -452,11 +458,10 @@ public void merge(List> inputs) throws IOException { mergeOutputSize).suffix( Task.MERGED_OUTPUT_PREFIX); - Writer writer = - new Writer(jobConf, rfs, outputPath, - (Class) jobConf.getMapOutputKeyClass(), - (Class) jobConf.getMapOutputValueClass(), - codec, null); + FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath)); + Writer writer = new Writer(jobConf, out, + (Class) jobConf.getMapOutputKeyClass(), + (Class) jobConf.getMapOutputValueClass(), codec, null, true); RawKeyValueIterator rIter = null; CompressAwarePath compressAwarePath; @@ -536,11 +541,12 @@ public void merge(List inputs) throws IOException { Path outputPath = localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(), approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX); - Writer writer = - new Writer(jobConf, rfs, outputPath, - (Class) jobConf.getMapOutputKeyClass(), - (Class) jobConf.getMapOutputValueClass(), - codec, null); + + FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath)); + Writer writer = new Writer(jobConf, out, + (Class) jobConf.getMapOutputKeyClass(), + (Class) jobConf.getMapOutputValueClass(), codec, null, true); + RawKeyValueIterator iter = null; CompressAwarePath compressAwarePath; Path tmpDir = new Path(reduceId.toString()); @@ -716,8 +722,10 @@ private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs, keyClass, valueClass, memDiskSegments, numMemDiskSegments, tmpDir, comparator, reporter, spilledRecordsCounter, null, mergePhase); - Writer writer = new Writer(job, fs, outputPath, - keyClass, valueClass, codec, null); + + FSDataOutputStream out = CryptoUtils.wrapIfNecessary(job, fs.create(outputPath)); + Writer writer = new Writer(job, out, keyClass, valueClass, + codec, null, true); try { Merger.writeFile(rIter, writer, reporter, job); writer.close(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java index 59bb04a9dea..6e0e92bd4df 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java @@ -37,6 +37,7 @@ import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath; import com.google.common.annotations.VisibleForTesting; @@ -75,7 +76,7 @@ public OnDiskMapOutput(TaskAttemptID mapId, TaskAttemptID reduceId, this.merger = merger; this.outputPath = outputPath; tmpOutputPath = getTempPath(outputPath, fetcher); - disk = fs.create(tmpOutputPath); + disk = CryptoUtils.wrapIfNecessary(conf, fs.create(tmpOutputPath)); } @VisibleForTesting diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java index 1aea5004a58..c5ab420b81f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java @@ -24,14 +24,16 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.fs.FSDataInputStream; import org.junit.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -51,10 +53,16 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.MRConfig; +import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.mapreduce.security.TokenCache; +import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl; +import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Progressable; import org.junit.After; @@ -63,40 +71,48 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import com.google.common.collect.Lists; + public class TestMerger { private Configuration conf; private JobConf jobConf; private FileSystem fs; - + @Before public void setup() throws IOException { conf = new Configuration(); jobConf = new JobConf(); fs = FileSystem.getLocal(conf); } - - @After - public void cleanup() throws IOException { - fs.delete(new Path(jobConf.getLocalDirs()[0]), true); - } - + + @Test - public void testInMemoryMerger() throws Throwable { + public void testEncryptedMerger() throws Throwable { + jobConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); + conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); + Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); + TokenCache.setShuffleSecretKey(new byte[16], credentials); + UserGroupInformation.getCurrentUser().addCredentials(credentials); + testInMemoryAndOnDiskMerger(); + } + + @Test + public void testInMemoryAndOnDiskMerger() throws Throwable { JobID jobId = new JobID("a", 0); - TaskAttemptID reduceId = new TaskAttemptID( + TaskAttemptID reduceId1 = new TaskAttemptID( new TaskID(jobId, TaskType.REDUCE, 0), 0); TaskAttemptID mapId1 = new TaskAttemptID( new TaskID(jobId, TaskType.MAP, 1), 0); TaskAttemptID mapId2 = new TaskAttemptID( new TaskID(jobId, TaskType.MAP, 2), 0); - + LocalDirAllocator lda = new LocalDirAllocator(MRConfig.LOCAL_DIR); - + MergeManagerImpl mergeManager = new MergeManagerImpl( - reduceId, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, + reduceId1, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, null, null, new Progress(), new MROutputFiles()); - + // write map outputs Map map1 = new TreeMap(); map1.put("apple", "disgusting"); @@ -113,32 +129,88 @@ public void testInMemoryMerger() throws Throwable { mapOutputBytes1.length); System.arraycopy(mapOutputBytes2, 0, mapOutput2.getMemory(), 0, mapOutputBytes2.length); - + // create merger and run merge MergeThread, Text, Text> inMemoryMerger = mergeManager.createInMemoryMerger(); - List> mapOutputs = + List> mapOutputs1 = new ArrayList>(); - mapOutputs.add(mapOutput1); - mapOutputs.add(mapOutput2); - - inMemoryMerger.merge(mapOutputs); - + mapOutputs1.add(mapOutput1); + mapOutputs1.add(mapOutput2); + + inMemoryMerger.merge(mapOutputs1); + Assert.assertEquals(1, mergeManager.onDiskMapOutputs.size()); - Path outPath = mergeManager.onDiskMapOutputs.iterator().next(); - + + TaskAttemptID reduceId2 = new TaskAttemptID( + new TaskID(jobId, TaskType.REDUCE, 3), 0); + TaskAttemptID mapId3 = new TaskAttemptID( + new TaskID(jobId, TaskType.MAP, 4), 0); + TaskAttemptID mapId4 = new TaskAttemptID( + new TaskID(jobId, TaskType.MAP, 5), 0); + // write map outputs + Map map3 = new TreeMap(); + map3.put("apple", "awesome"); + map3.put("carrot", "amazing"); + Map map4 = new TreeMap(); + map4.put("banana", "bla"); + byte[] mapOutputBytes3 = writeMapOutput(conf, map3); + byte[] mapOutputBytes4 = writeMapOutput(conf, map4); + InMemoryMapOutput mapOutput3 = new InMemoryMapOutput( + conf, mapId3, mergeManager, mapOutputBytes3.length, null, true); + InMemoryMapOutput mapOutput4 = new InMemoryMapOutput( + conf, mapId4, mergeManager, mapOutputBytes4.length, null, true); + System.arraycopy(mapOutputBytes3, 0, mapOutput3.getMemory(), 0, + mapOutputBytes3.length); + System.arraycopy(mapOutputBytes4, 0, mapOutput4.getMemory(), 0, + mapOutputBytes4.length); + +// // create merger and run merge + MergeThread, Text, Text> inMemoryMerger2 = + mergeManager.createInMemoryMerger(); + List> mapOutputs2 = + new ArrayList>(); + mapOutputs2.add(mapOutput3); + mapOutputs2.add(mapOutput4); + + inMemoryMerger2.merge(mapOutputs2); + + Assert.assertEquals(2, mergeManager.onDiskMapOutputs.size()); + + List paths = new ArrayList(); + Iterator iterator = mergeManager.onDiskMapOutputs.iterator(); List keys = new ArrayList(); List values = new ArrayList(); - readOnDiskMapOutput(conf, fs, outPath, keys, values); - Assert.assertEquals(keys, Arrays.asList("apple", "banana", "carrot")); - Assert.assertEquals(values, Arrays.asList("disgusting", "pretty good", "delicious")); + while (iterator.hasNext()) { + CompressAwarePath next = iterator.next(); + readOnDiskMapOutput(conf, fs, next, keys, values); + paths.add(next); + } + Assert.assertEquals(keys, Arrays.asList("apple", "banana", "carrot", "apple", "banana", "carrot")); + Assert.assertEquals(values, Arrays.asList("awesome", "bla", "amazing", "disgusting", "pretty good", "delicious")); + mergeManager.close(); + + mergeManager = new MergeManagerImpl( + reduceId2, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, + null, null, new Progress(), new MROutputFiles()); + + MergeThread onDiskMerger = mergeManager.createOnDiskMerger(); + onDiskMerger.merge(paths); + + Assert.assertEquals(1, mergeManager.onDiskMapOutputs.size()); + + keys = new ArrayList(); + values = new ArrayList(); + readOnDiskMapOutput(conf, fs, mergeManager.onDiskMapOutputs.iterator().next(), keys, values); + Assert.assertEquals(keys, Arrays.asList("apple", "apple", "banana", "banana", "carrot", "carrot")); + Assert.assertEquals(values, Arrays.asList("awesome", "disgusting", "pretty good", "bla", "amazing", "delicious")); mergeManager.close(); Assert.assertEquals(0, mergeManager.inMemoryMapOutputs.size()); Assert.assertEquals(0, mergeManager.inMemoryMergedMapOutputs.size()); Assert.assertEquals(0, mergeManager.onDiskMapOutputs.size()); } - + private byte[] writeMapOutput(Configuration conf, Map keysToValues) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -152,11 +224,13 @@ private byte[] writeMapOutput(Configuration conf, Map keysToValu writer.close(); return baos.toByteArray(); } - + private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path, List keys, List values) throws IOException { - IFile.Reader reader = new IFile.Reader(conf, fs, - path, null, null); + FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path)); + + IFile.Reader reader = new IFile.Reader(conf, in, + fs.getFileStatus(path).getLen(), null, null); DataInputBuffer keyBuff = new DataInputBuffer(); DataInputBuffer valueBuff = new DataInputBuffer(); Text key = new Text(); @@ -169,17 +243,17 @@ private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path, values.add(value.toString()); } } - + @Test public void testCompressed() throws IOException { testMergeShouldReturnProperProgress(getCompressedSegments()); - } - +} + @Test public void testUncompressed() throws IOException { testMergeShouldReturnProperProgress(getUncompressedSegments()); } - + @SuppressWarnings( { "deprecation", "unchecked" }) public void testMergeShouldReturnProperProgress( List> segments) throws IOException { @@ -212,7 +286,7 @@ private List> getUncompressedSegments() throws IOException { } return segments; } - + private List> getCompressedSegments() throws IOException { List> segments = new ArrayList>(); for (int i = 1; i < 1; i++) { @@ -220,7 +294,7 @@ private List> getCompressedSegments() throws IOException { } return segments; } - + private Segment getUncompressedSegment(int i) throws IOException { return new Segment(getReader(i), false); } @@ -258,7 +332,7 @@ public Boolean answer(InvocationOnMock invocation) { } }; } - + private Answer getValueAnswer(final String segmentName) { return new Answer() { int i = 0; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java index e3c7253afcb..a314fc1f578 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java @@ -18,6 +18,8 @@ package org.apache.hadoop.mapred; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; @@ -42,7 +44,7 @@ public void testIFileWriterWithCodec() throws Exception { DefaultCodec codec = new GzipCodec(); codec.setConf(conf); IFile.Writer writer = - new IFile.Writer(conf, rfs, path, Text.class, Text.class, + new IFile.Writer(conf, rfs.create(path), Text.class, Text.class, codec, null); writer.close(); } @@ -56,12 +58,15 @@ public void testIFileReaderWithCodec() throws Exception { Path path = new Path(new Path("build/test.ifile"), "data"); DefaultCodec codec = new GzipCodec(); codec.setConf(conf); + FSDataOutputStream out = rfs.create(path); IFile.Writer writer = - new IFile.Writer(conf, rfs, path, Text.class, Text.class, + new IFile.Writer(conf, out, Text.class, Text.class, codec, null); writer.close(); + FSDataInputStream in = rfs.open(path); IFile.Reader reader = - new IFile.Reader(conf, rfs, path, codec, null); + new IFile.Reader(conf, in, rfs.getFileStatus(path).getLen(), + codec, null); reader.close(); // test check sum diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java new file mode 100644 index 00000000000..ebc32adb9d9 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java @@ -0,0 +1,254 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapreduce.MRJobConfig; +import org.junit.Test; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Writer; + +import static org.junit.Assert.*; + +@SuppressWarnings(value={"unchecked", "deprecation"}) +/** + * This test tests the support for a merge operation in Hadoop. The input files + * are already sorted on the key. This test implements an external + * MapOutputCollector implementation that just copies the records to different + * partitions while maintaining the sort order in each partition. The Hadoop + * framework's merge on the reduce side will merge the partitions created to + * generate the final output which is sorted on the key. + */ +public class TestMRIntermediateDataEncryption { + // Where MR job's input will reside. + private static final Path INPUT_DIR = new Path("/test/input"); + // Where output goes. + private static final Path OUTPUT = new Path("/test/output"); + + @Test + public void testSingleReducer() throws Exception { + doEncryptionTest(3, 1, 2); + } + + @Test + public void testMultipleMapsPerNode() throws Exception { + doEncryptionTest(8, 1, 2); + } + + @Test + public void testMultipleReducers() throws Exception { + doEncryptionTest(2, 4, 2); + } + + public void doEncryptionTest(int numMappers, int numReducers, int numNodes) throws Exception { + doEncryptionTest(numMappers, numReducers, numNodes, 1000); + } + + public void doEncryptionTest(int numMappers, int numReducers, int numNodes, int numLines) throws Exception { + MiniDFSCluster dfsCluster = null; + MiniMRClientCluster mrCluster = null; + FileSystem fileSystem = null; + try { + Configuration conf = new Configuration(); + // Start the mini-MR and mini-DFS clusters + + dfsCluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(numNodes).build(); + fileSystem = dfsCluster.getFileSystem(); + mrCluster = MiniMRClientClusterFactory.create(this.getClass(), + numNodes, conf); + // Generate input. + createInput(fileSystem, numMappers, numLines); + // Run the test. + runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem, numMappers, numReducers, numLines); + } finally { + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + if (mrCluster != null) { + mrCluster.stop(); + } + } + } + + private void createInput(FileSystem fs, int numMappers, int numLines) throws Exception { + fs.delete(INPUT_DIR, true); + for (int i = 0; i < numMappers; i++) { + OutputStream os = fs.create(new Path(INPUT_DIR, "input_" + i + ".txt")); + Writer writer = new OutputStreamWriter(os); + for (int j = 0; j < numLines; j++) { + // Create sorted key, value pairs. + int k = j + 1; + String formattedNumber = String.format("%09d", k); + writer.write(formattedNumber + " " + formattedNumber + "\n"); + } + writer.close(); + } + } + + private void runMergeTest(JobConf job, FileSystem fileSystem, int numMappers, int numReducers, int numLines) + throws Exception { + fileSystem.delete(OUTPUT, true); + job.setJobName("Test"); + JobClient client = new JobClient(job); + RunningJob submittedJob = null; + FileInputFormat.setInputPaths(job, INPUT_DIR); + FileOutputFormat.setOutputPath(job, OUTPUT); + job.set("mapreduce.output.textoutputformat.separator", " "); + job.setInputFormat(TextInputFormat.class); + job.setMapOutputKeyClass(Text.class); + job.setMapOutputValueClass(Text.class); + job.setOutputKeyClass(Text.class); + job.setOutputValueClass(Text.class); + job.setMapperClass(MyMapper.class); + job.setPartitionerClass(MyPartitioner.class); + job.setOutputFormat(TextOutputFormat.class); + job.setNumReduceTasks(numReducers); + + job.setInt("mapreduce.map.maxattempts", 1); + job.setInt("mapreduce.reduce.maxattempts", 1); + job.setInt("mapred.test.num_lines", numLines); + job.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true); + try { + submittedJob = client.submitJob(job); + try { + if (! client.monitorAndPrintJob(job, submittedJob)) { + throw new IOException("Job failed!"); + } + } catch(InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } catch(IOException ioe) { + System.err.println("Job failed with: " + ioe); + } finally { + verifyOutput(submittedJob, fileSystem, numMappers, numLines); + } + } + + private void verifyOutput(RunningJob submittedJob, FileSystem fileSystem, int numMappers, int numLines) + throws Exception { + FSDataInputStream dis = null; + long numValidRecords = 0; + long numInvalidRecords = 0; + String prevKeyValue = "000000000"; + Path[] fileList = + FileUtil.stat2Paths(fileSystem.listStatus(OUTPUT, + new Utils.OutputFileUtils.OutputFilesFilter())); + for (Path outFile : fileList) { + try { + dis = fileSystem.open(outFile); + String record; + while((record = dis.readLine()) != null) { + // Split the line into key and value. + int blankPos = record.indexOf(" "); + String keyString = record.substring(0, blankPos); + String valueString = record.substring(blankPos+1); + // Check for sorted output and correctness of record. + if (keyString.compareTo(prevKeyValue) >= 0 + && keyString.equals(valueString)) { + prevKeyValue = keyString; + numValidRecords++; + } else { + numInvalidRecords++; + } + } + } finally { + if (dis != null) { + dis.close(); + dis = null; + } + } + } + // Make sure we got all input records in the output in sorted order. + assertEquals((long)(numMappers * numLines), numValidRecords); + // Make sure there is no extraneous invalid record. + assertEquals(0, numInvalidRecords); + } + + /** + * A mapper implementation that assumes that key text contains valid integers + * in displayable form. + */ + public static class MyMapper extends MapReduceBase + implements Mapper { + private Text keyText; + private Text valueText; + + public MyMapper() { + keyText = new Text(); + valueText = new Text(); + } + + @Override + public void map(LongWritable key, Text value, + OutputCollector output, + Reporter reporter) throws IOException { + String record = value.toString(); + int blankPos = record.indexOf(" "); + keyText.set(record.substring(0, blankPos)); + valueText.set(record.substring(blankPos+1)); + output.collect(keyText, valueText); + } + + public void close() throws IOException { + } + } + + /** + * Partitioner implementation to make sure that output is in total sorted + * order. We basically route key ranges to different reducers such that + * key values monotonically increase with the partition number. For example, + * in this test, the keys are numbers from 1 to 1000 in the form "000000001" + * to "000001000" in each input file. The keys "000000001" to "000000250" are + * routed to partition 0, "000000251" to "000000500" are routed to partition 1 + * and so on since we have 4 reducers. + */ + static class MyPartitioner implements Partitioner { + + private JobConf job; + + public MyPartitioner() { + } + + public void configure(JobConf job) { + this.job = job; + } + + public int getPartition(Text key, Text value, int numPartitions) { + int keyValue = 0; + try { + keyValue = Integer.parseInt(key.toString()); + } catch(NumberFormatException nfe) { + keyValue = 0; + } + int partitionNumber = (numPartitions*(Math.max(0, keyValue-1)))/job.getInt("mapred.test.num_lines", 10000); + return partitionNumber; + } + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java index d3a084449a1..43fd94871a2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java @@ -80,7 +80,7 @@ public void runValueIterator(Path tmpDir, Pair[] vals, FileSystem rfs = ((LocalFileSystem)localFs).getRaw(); Path path = new Path(tmpDir, "data.in"); IFile.Writer writer = - new IFile.Writer(conf, rfs, path, Text.class, Text.class, + new IFile.Writer(conf, rfs.create(path), Text.class, Text.class, codec, null); for(Pair p: vals) { writer.append(new Text(p.key), new Text(p.value)); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java index 69994f36bd2..f447ebcc7c9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java @@ -95,9 +95,9 @@ public void testRunner() throws Exception { new Counters.Counter(), new Progress()); FileSystem fs = new RawLocalFileSystem(); fs.setConf(conf); - Writer wr = new Writer(conf, fs, - new Path(workSpace + File.separator + "outfile"), IntWritable.class, - Text.class, null, null); + Writer wr = new Writer(conf, fs.create( + new Path(workSpace + File.separator + "outfile")), IntWritable.class, + Text.class, null, null, true); output.setWriter(wr); // stub for client File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationRunnableStub"); @@ -177,9 +177,9 @@ public void testApplication() throws Throwable { new Progress()); FileSystem fs = new RawLocalFileSystem(); fs.setConf(conf); - Writer wr = new Writer(conf, fs, - new Path(workSpace.getAbsolutePath() + File.separator + "outfile"), - IntWritable.class, Text.class, null, null); + Writer wr = new Writer(conf, fs.create( + new Path(workSpace.getAbsolutePath() + File.separator + "outfile")), + IntWritable.class, Text.class, null, null, true); output.setWriter(wr); conf.set(Submitter.PRESERVE_COMMANDFILE, "true"); From d79f27b429410daa6770a51867d7ecea728dff89 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 11 Jul 2014 20:54:47 +0000 Subject: [PATCH 031/354] HDFS-6474. Namenode needs to get the actual keys and iv from the KeyProvider. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1609833 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/fs/FileEncryptionInfo.java | 38 +- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../hadoop/hdfs/protocolPB/PBHelper.java | 4 +- .../namenode/EncryptionZoneManager.java | 416 +++++++++++++++--- .../hdfs/server/namenode/FSDirectory.java | 59 ++- .../hdfs/server/namenode/FSNamesystem.java | 192 ++++++-- .../namenode/RetryStartFileException.java | 21 + .../hadoop-hdfs/src/main/proto/hdfs.proto | 1 + .../src/main/resources/hdfs-default.xml | 11 + .../hadoop/hdfs/TestEncryptionZonesAPI.java | 93 +++- 11 files changed, 699 insertions(+), 141 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java index 77f4cdfe70d..f960233fb78 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java @@ -32,20 +32,33 @@ public class FileEncryptionInfo { private final CipherSuite cipherSuite; - private final byte[] key; + private final byte[] edek; private final byte[] iv; + private final String ezKeyVersionName; - public FileEncryptionInfo(CipherSuite suite, byte[] key, byte[] iv) { + /** + * Create a FileEncryptionInfo. + * + * @param suite CipherSuite used to encrypt the file + * @param edek encrypted data encryption key (EDEK) of the file + * @param iv initialization vector (IV) used to encrypt the file + * @param ezKeyVersionName name of the KeyVersion used to encrypt the + * encrypted data encryption key. + */ + public FileEncryptionInfo(final CipherSuite suite, final byte[] edek, + final byte[] iv, final String ezKeyVersionName) { checkNotNull(suite); - checkNotNull(key); + checkNotNull(edek); checkNotNull(iv); - checkArgument(key.length == suite.getAlgorithmBlockSize(), + checkNotNull(ezKeyVersionName); + checkArgument(edek.length == suite.getAlgorithmBlockSize(), "Unexpected key length"); checkArgument(iv.length == suite.getAlgorithmBlockSize(), "Unexpected IV length"); this.cipherSuite = suite; - this.key = key; + this.edek = edek; this.iv = iv; + this.ezKeyVersionName = ezKeyVersionName; } /** @@ -57,25 +70,32 @@ public CipherSuite getCipherSuite() { } /** - * @return encrypted data encryption key for the file + * @return encrypted data encryption key (EDEK) for the file */ public byte[] getEncryptedDataEncryptionKey() { - return key; + return edek; } /** - * @return initialization vector for the cipher used to encrypt the file + * @return initialization vector (IV) for the cipher used to encrypt the file */ public byte[] getIV() { return iv; } + /** + * @return name of the encryption zone KeyVersion used to encrypt the + * encrypted data encryption key (EDEK). + */ + public String getEzKeyVersionName() { return ezKeyVersionName; } + @Override public String toString() { StringBuilder builder = new StringBuilder("{"); builder.append("cipherSuite: " + cipherSuite); - builder.append(", key: " + Hex.encodeHexString(key)); + builder.append(", edek: " + Hex.encodeHexString(edek)); builder.append(", iv: " + Hex.encodeHexString(iv)); + builder.append(", ezKeyVersionName: " + ezKeyVersionName); builder.append("}"); return builder.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 4c2a60ef4b1..e75b065130c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -39,6 +39,9 @@ fs-encryption (Unreleased) HDFS-6635. Refactor encryption zone functionality into new EncryptionZoneManager class. (wang) + HDFS-6474. Namenode needs to get the actual keys and iv from the + KeyProvider. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index a43106943bb..fc2e4053447 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -561,6 +561,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false; public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm"; public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class"; + public static final String DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_KEY = "dfs.namenode.key.version.refresh.interval.ms"; + public static final int DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_DEFAULT = 5*60*1000; // Journal-node related configs. These are read on the JN side. public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index cd6a0e576ad..4f62b42ddaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -2335,6 +2335,7 @@ public static HdfsProtos.FileEncryptionInfoProto convert( .setSuite(convert(info.getCipherSuite())) .setKey(getByteString(info.getEncryptedDataEncryptionKey())) .setIv(getByteString(info.getIV())) + .setEzKeyVersionName(info.getEzKeyVersionName()) .build(); } @@ -2346,7 +2347,8 @@ public static FileEncryptionInfo convert( CipherSuite suite = convert(proto.getSuite()); byte[] key = proto.getKey().toByteArray(); byte[] iv = proto.getIv().toByteArray(); - return new FileEncryptionInfo(suite, key, iv); + String ezKeyVersionName = proto.getEzKeyVersionName(); + return new FileEncryptionInfo(suite, key, iv, ezKeyVersionName); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index a43273dfe34..faba3a929c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -3,28 +3,50 @@ import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants .CRYPTO_XATTR_ENCRYPTION_ZONE; /** - * Manages the list of encryption zones in the filesystem. Relies on the - * FSDirectory lock for synchronization. + * Manages the list of encryption zones in the filesystem. + *

+ * The EncryptionZoneManager has its own lock, but relies on the FSDirectory + * lock being held for many operations. The FSDirectory lock should not be + * taken if the manager lock is already held. */ public class EncryptionZoneManager { + public static Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager + .class); + /** * EncryptionZoneInt is the internal representation of an encryption zone. The * external representation of an EZ is embodied in an EncryptionZone and @@ -34,9 +56,30 @@ private class EncryptionZoneInt { private final String keyId; private final long inodeId; + private final HashSet keyVersions; + private KeyVersion latestVersion; + EncryptionZoneInt(long inodeId, String keyId) { this.keyId = keyId; this.inodeId = inodeId; + keyVersions = Sets.newHashSet(); + latestVersion = null; + } + + KeyVersion getLatestKeyVersion() { + return latestVersion; + } + + void addKeyVersion(KeyVersion version) { + Preconditions.checkNotNull(version); + if (!keyVersions.contains(version)) { + LOG.debug("Key {} has new key version {}", keyId, version); + keyVersions.add(version); + } + // Always set the latestVersion to not get stuck on an old version in + // racy situations. Should eventually converge thanks to the + // monitor. + latestVersion = version; } String getKeyId() { @@ -47,49 +90,265 @@ long getINodeId() { return inodeId; } - String getFullPathName() { - return dir.getInode(inodeId).getFullPathName(); - } + } + + /** + * Protects the encryptionZones map and its contents. + */ + private final ReentrantReadWriteLock lock; + + private void readLock() { + lock.readLock().lock(); + } + + private void readUnlock() { + lock.readLock().unlock(); + } + + private void writeLock() { + lock.writeLock().lock(); + } + + private void writeUnlock() { + lock.writeLock().unlock(); + } + + public boolean hasWriteLock() { + return lock.isWriteLockedByCurrentThread(); + } + + public boolean hasReadLock() { + return lock.getReadHoldCount() > 0 || hasWriteLock(); } private final Map encryptionZones; - private final FSDirectory dir; + private final ScheduledExecutorService monitor; + private final KeyProvider provider; /** * Construct a new EncryptionZoneManager. * * @param dir Enclosing FSDirectory */ - public EncryptionZoneManager(FSDirectory dir) { + public EncryptionZoneManager(FSDirectory dir, Configuration conf, + KeyProvider provider) { + this.dir = dir; + this.provider = provider; + lock = new ReentrantReadWriteLock(); encryptionZones = new HashMap(); + + monitor = Executors.newScheduledThreadPool(1, + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat(EncryptionZoneMonitor.class.getSimpleName() + "-%d") + .build()); + final int refreshMs = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_KEY, + DFSConfigKeys.DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_DEFAULT + ); + Preconditions.checkArgument(refreshMs >= 0, "%s cannot be negative", + DFSConfigKeys.DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_KEY); + monitor.scheduleAtFixedRate(new EncryptionZoneMonitor(), 0, refreshMs, + TimeUnit.MILLISECONDS); + } + + /** + * Periodically wakes up to fetch the latest version of each encryption + * zone key. + */ + private class EncryptionZoneMonitor implements Runnable { + @Override + public void run() { + LOG.debug("Monitor waking up to refresh encryption zone key versions"); + HashMap toFetch = Maps.newHashMap(); + HashMap toUpdate = + Maps.newHashMap(); + // Determine the keyIds to fetch + readLock(); + try { + for (EncryptionZoneInt ezi : encryptionZones.values()) { + toFetch.put(ezi.getINodeId(), ezi.getKeyId()); + } + } finally { + readUnlock(); + } + LOG.trace("Found {} keys to check", toFetch.size()); + // Fetch the key versions while not holding the lock + for (Map.Entry entry : toFetch.entrySet()) { + try { + KeyVersion version = provider.getCurrentKey(entry.getValue()); + toUpdate.put(entry.getKey(), version); + } catch (IOException e) { + LOG.warn("Error while getting the current key for {} {}", + entry.getValue(), e); + } + } + LOG.trace("Fetched {} key versions from KeyProvider", toUpdate.size()); + // Update the key versions for each encryption zone + writeLock(); + try { + for (Map.Entry entry : toUpdate.entrySet()) { + EncryptionZoneInt ezi = encryptionZones.get(entry.getKey()); + // zone might have been removed in the intervening time + if (ezi == null) { + continue; + } + ezi.addKeyVersion(entry.getValue()); + } + } finally { + writeUnlock(); + } + } + } + + /** + * Forces the EncryptionZoneMonitor to run, waiting until completion. + */ + @VisibleForTesting + public void kickMonitor() throws Exception { + Future future = monitor.submit(new EncryptionZoneMonitor()); + future.get(); + } + + /** + * Immediately fetches the latest KeyVersion for an encryption zone, + * also updating the encryption zone. + * + * @param iip of the encryption zone + * @return latest KeyVersion + * @throws IOException on KeyProvider error + */ + KeyVersion updateLatestKeyVersion(INodesInPath iip) throws IOException { + EncryptionZoneInt ezi; + readLock(); + try { + ezi = getEncryptionZoneForPath(iip); + } finally { + readUnlock(); + } + if (ezi == null) { + throw new IOException("Cannot update KeyVersion since iip is not within" + + " an encryption zone"); + } + + // Do not hold the lock while doing KeyProvider operations + KeyVersion version = provider.getCurrentKey(ezi.getKeyId()); + + writeLock(); + try { + ezi.addKeyVersion(version); + return version; + } finally { + writeUnlock(); + } } /** * Add a new encryption zone. + *

+ * Called while holding the FSDirectory lock. * * @param inodeId of the encryption zone * @param keyId encryption zone key id */ void addEncryptionZone(Long inodeId, String keyId) { + assert dir.hasWriteLock(); final EncryptionZoneInt ez = new EncryptionZoneInt(inodeId, keyId); - encryptionZones.put(inodeId, ez); + writeLock(); + try { + encryptionZones.put(inodeId, ez); + } finally { + writeUnlock(); + } } + /** + * Remove an encryption zone. + *

+ * Called while holding the FSDirectory lock. + */ void removeEncryptionZone(Long inodeId) { - encryptionZones.remove(inodeId); + assert dir.hasWriteLock(); + writeLock(); + try { + encryptionZones.remove(inodeId); + } finally { + writeUnlock(); + } } /** * Returns true if an IIP is within an encryption zone. + *

+ * Called while holding the FSDirectory lock. */ boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, SnapshotAccessControlException { - return (getEncryptionZoneForPath(iip) != null); + assert dir.hasReadLock(); + readLock(); + try { + return (getEncryptionZoneForPath(iip) != null); + } finally { + readUnlock(); + } } + /** + * Returns the path of the EncryptionZoneInt. + *

+ * Called while holding the FSDirectory lock. + */ + private String getFullPathName(EncryptionZoneInt ezi) { + assert dir.hasReadLock(); + return dir.getInode(ezi.getINodeId()).getFullPathName(); + } + + KeyVersion getLatestKeyVersion(final INodesInPath iip) { + readLock(); + try { + EncryptionZoneInt ezi = getEncryptionZoneForPath(iip); + if (ezi == null) { + return null; + } + return ezi.getLatestKeyVersion(); + } finally { + readUnlock(); + } + } + + /** + * @return true if the provided keyVersionName is the name of a + * valid KeyVersion for the encryption zone of iip, + * and iip is within an encryption zone. + */ + boolean isValidKeyVersion(final INodesInPath iip, String keyVersionName) { + readLock(); + try { + EncryptionZoneInt ezi = getEncryptionZoneForPath(iip); + if (ezi == null) { + return false; + } + for (KeyVersion ezVersion : ezi.keyVersions) { + if (keyVersionName.equals(ezVersion.getVersionName())) { + return true; + } + } + return false; + } finally { + readUnlock(); + } + } + + /** + * Looks up the EncryptionZoneInt for a path within an encryption zone. + * Returns null if path is not within an EZ. + *

+ * Must be called while holding the manager lock. + */ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) { + assert hasReadLock(); Preconditions.checkNotNull(iip); final INode[] inodes = iip.getINodes(); for (int i = inodes.length - 1; i >= 0; i--) { @@ -105,8 +364,10 @@ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) { } /** - * Throws an exception if the provided inode cannot be renamed into the + * Throws an exception if the provided path cannot be renamed into the * destination because of differing encryption zones. + *

+ * Called while holding the FSDirectory lock. * * @param srcIIP source IIP * @param dstIIP destination IIP @@ -115,66 +376,101 @@ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) { */ void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src) throws IOException { - final boolean srcInEZ = (getEncryptionZoneForPath(srcIIP) != null); - final boolean dstInEZ = (getEncryptionZoneForPath(dstIIP) != null); - if (srcInEZ) { - if (!dstInEZ) { - throw new IOException(src + " can't be moved from an encryption zone."); - } - } else { - if (dstInEZ) { - throw new IOException(src + " can't be moved into an encryption zone."); - } - } - - if (srcInEZ || dstInEZ) { + assert dir.hasReadLock(); + readLock(); + try { final EncryptionZoneInt srcEZI = getEncryptionZoneForPath(srcIIP); final EncryptionZoneInt dstEZI = getEncryptionZoneForPath(dstIIP); - Preconditions.checkArgument(srcEZI != null, "couldn't find src EZ?"); - Preconditions.checkArgument(dstEZI != null, "couldn't find dst EZ?"); - if (srcEZI != dstEZI) { - final String srcEZPath = srcEZI.getFullPathName(); - final String dstEZPath = dstEZI.getFullPathName(); - final StringBuilder sb = new StringBuilder(src); - sb.append(" can't be moved from encryption zone "); - sb.append(srcEZPath); - sb.append(" to encryption zone "); - sb.append(dstEZPath); - sb.append("."); - throw new IOException(sb.toString()); + final boolean srcInEZ = (srcEZI != null); + final boolean dstInEZ = (dstEZI != null); + if (srcInEZ) { + if (!dstInEZ) { + throw new IOException( + src + " can't be moved from an encryption zone."); + } + } else { + if (dstInEZ) { + throw new IOException( + src + " can't be moved into an encryption zone."); + } } + + if (srcInEZ || dstInEZ) { + Preconditions.checkState(srcEZI != null, "couldn't find src EZ?"); + Preconditions.checkState(dstEZI != null, "couldn't find dst EZ?"); + if (srcEZI != dstEZI) { + final String srcEZPath = getFullPathName(srcEZI); + final String dstEZPath = getFullPathName(dstEZI); + final StringBuilder sb = new StringBuilder(src); + sb.append(" can't be moved from encryption zone "); + sb.append(srcEZPath); + sb.append(" to encryption zone "); + sb.append(dstEZPath); + sb.append("."); + throw new IOException(sb.toString()); + } + } + } finally { + readUnlock(); } } - XAttr createEncryptionZone(String src, String keyId) throws IOException { - if (dir.isNonEmptyDirectory(src)) { - throw new IOException( - "Attempt to create an encryption zone for a non-empty directory."); - } + /** + * Create a new encryption zone. + *

+ * Called while holding the FSDirectory lock. + */ + XAttr createEncryptionZone(String src, String keyId, KeyVersion keyVersion) + throws IOException { + assert dir.hasWriteLock(); + writeLock(); + try { + if (dir.isNonEmptyDirectory(src)) { + throw new IOException( + "Attempt to create an encryption zone for a non-empty directory."); + } - final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false); - final EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP); - if (ezi != null) { - throw new IOException("Directory " + src + - " is already in an encryption zone. (" + ezi.getFullPathName() + ")"); - } + final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false); + EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP); + if (ezi != null) { + throw new IOException("Directory " + src + " is already in an " + + "encryption zone. (" + getFullPathName(ezi) + ")"); + } - final XAttr keyIdXAttr = - XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); - final List xattrs = Lists.newArrayListWithCapacity(1); - xattrs.add(keyIdXAttr); - final INode inode = - dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); - addEncryptionZone(inode.getId(), keyId); - return keyIdXAttr; + final XAttr keyIdXAttr = XAttrHelper + .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); + + final List xattrs = Lists.newArrayListWithCapacity(1); + xattrs.add(keyIdXAttr); + // updating the xattr will call addEncryptionZone, + // done this way to handle edit log loading + dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); + // Re-get the new encryption zone add the latest key version + ezi = getEncryptionZoneForPath(srcIIP); + ezi.addKeyVersion(keyVersion); + return keyIdXAttr; + } finally { + writeUnlock(); + } } + /** + * Return the current list of encryption zones. + *

+ * Called while holding the FSDirectory lock. + */ List listEncryptionZones() throws IOException { - final List ret = - Lists.newArrayListWithExpectedSize(encryptionZones.size()); - for (EncryptionZoneInt ezi : encryptionZones.values()) { - ret.add(new EncryptionZone(ezi.getFullPathName(), ezi.getKeyId())); + assert dir.hasReadLock(); + readLock(); + try { + final List ret = + Lists.newArrayListWithExpectedSize(encryptionZones.size()); + for (EncryptionZoneInt ezi : encryptionZones.values()) { + ret.add(new EncryptionZone(getFullPathName(ezi), ezi.getKeyId())); + } + return ret; + } finally { + readUnlock(); } - return ret; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 81dcf9ef95b..5c070d50971 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO; import static org.apache.hadoop.util.Time.now; @@ -35,6 +36,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileEncryptionInfo; @@ -162,7 +164,7 @@ boolean hasWriteLock() { } boolean hasReadLock() { - return this.dirLock.getReadHoldCount() > 0; + return this.dirLock.getReadHoldCount() > 0 || hasWriteLock(); } public int getReadHoldCount() { @@ -173,7 +175,8 @@ public int getWriteHoldCount() { return this.dirLock.getWriteHoldCount(); } - final EncryptionZoneManager ezManager; + @VisibleForTesting + public final EncryptionZoneManager ezManager; /** * Caches frequently used file names used in {@link INode} to reuse @@ -224,7 +227,7 @@ public int getWriteHoldCount() { nameCache = new NameCache(threshold); namesystem = ns; - ezManager = new EncryptionZoneManager(this); + ezManager = new EncryptionZoneManager(this, conf, ns.getProvider()); } private FSNamesystem getFSNamesystem() { @@ -905,16 +908,6 @@ void updateQuotasInSourceTree() throws QuotaExceededException { } } - boolean isInAnEZ(INodesInPath iip) - throws UnresolvedLinkException, SnapshotAccessControlException { - readLock(); - try { - return ezManager.isInAnEZ(iip); - } finally { - readUnlock(); - } - } - /** * Set file replication * @@ -2618,12 +2611,46 @@ List filterINodeXAttrs(final List existingXAttrs, return newXAttrs; } - - XAttr createEncryptionZone(String src, String keyId) + + boolean isInAnEZ(INodesInPath iip) + throws UnresolvedLinkException, SnapshotAccessControlException { + readLock(); + try { + return ezManager.isInAnEZ(iip); + } finally { + readUnlock(); + } + } + + KeyVersion getLatestKeyVersion(INodesInPath iip) { + readLock(); + try { + return ezManager.getLatestKeyVersion(iip); + } finally { + readUnlock(); + } + } + + KeyVersion updateLatestKeyVersion(INodesInPath iip) throws + IOException { + // No locking, this operation does not involve any FSDirectory operations + return ezManager.updateLatestKeyVersion(iip); + } + + boolean isValidKeyVersion(INodesInPath iip, String keyVersionName) { + readLock(); + try { + return ezManager.isValidKeyVersion(iip, keyVersionName); + } finally { + readUnlock(); + } + } + + XAttr createEncryptionZone(String src, String keyId, KeyVersion keyVersion) throws IOException { writeLock(); try { - return ezManager.createEncryptionZone(src, keyId); + return ezManager.createEncryptionZone(src, keyId, keyVersion); } finally { writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6d0ca573a34..1e6fbd777bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; @@ -100,6 +101,7 @@ import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.URI; +import java.security.GeneralSecurityException; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; @@ -133,6 +135,7 @@ import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.CacheFlag; @@ -533,7 +536,7 @@ private void logAuditEvent(boolean succeeded, private final NNConf nnConf; - private KeyProvider provider = null; + private KeyProviderCryptoExtension provider = null; private KeyProvider.Options providerOptions = null; private final CryptoCodec codec; @@ -929,7 +932,8 @@ private void initializeKeyProvider(final Configuration conf) { LOG.error(err); throw new RuntimeException(err); } - provider = providers.get(0); + provider = KeyProviderCryptoExtension + .createKeyProviderCryptoExtension(providers.get(0)); if (provider.isTransient()) { final String err = "A KeyProvider was found but it is a transient provider."; @@ -2310,7 +2314,7 @@ private void verifyParentDir(String src) throws FileNotFoundException, * CipherSuite from the list provided by the client. Since the client may * be newer, need to handle unknown CipherSuites. * - * @param src path of the file + * @param srcIIP path of the file * @param cipherSuites client-provided list of supported CipherSuites, * in desired order. * @return chosen CipherSuite, or null if file is not in an EncryptionZone @@ -2349,6 +2353,62 @@ private CipherSuite chooseCipherSuite(INodesInPath srcIIP, List return chosen; } + /** + * Create a new FileEncryptionInfo for a path. Also chooses an + * appropriate CipherSuite to use from the list provided by the + * client. + * + * @param src Target path + * @param pathComponents Target path split up into path components + * @param cipherSuites List of CipherSuites provided by the client + * @return a new FileEncryptionInfo, or null if path is not within an + * encryption + * zone. + * @throws IOException + */ + private FileEncryptionInfo newFileEncryptionInfo(String src, + byte[][] pathComponents, List cipherSuites) + throws IOException { + INodesInPath iip = null; + CipherSuite suite = null; + KeyVersion latestEZKeyVersion = null; + readLock(); + try { + src = FSDirectory.resolvePath(src, pathComponents, dir); + iip = dir.getINodesInPath4Write(src); + // Nothing to do if the path is not within an EZ + if (!dir.isInAnEZ(iip)) { + return null; + } + suite = chooseCipherSuite(iip, cipherSuites); + if (suite != null) { + Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN), + "Chose an UNKNOWN CipherSuite!"); + } + latestEZKeyVersion = dir.getLatestKeyVersion(iip); + } finally { + readUnlock(); + } + + // If the latest key version is null, need to fetch it and update + if (latestEZKeyVersion == null) { + latestEZKeyVersion = dir.updateLatestKeyVersion(iip); + } + Preconditions.checkState(latestEZKeyVersion != null); + + // Generate the EDEK while not holding the lock + KeyProviderCryptoExtension.EncryptedKeyVersion edek = null; + try { + edek = provider.generateEncryptedKey(latestEZKeyVersion); + } catch (GeneralSecurityException e) { + throw new IOException(e); + } + Preconditions.checkNotNull(edek); + + return new FileEncryptionInfo(suite, edek.getEncryptedKey().getMaterial(), + edek.getIv(), edek.getKeyVersionName()); + } + /** * Create a new file entry in the namespace. * @@ -2426,26 +2486,62 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, boolean overwrite = flag.contains(CreateFlag.OVERWRITE); waitForLoadingFSImage(); - writeLock(); + + /* + * We want to avoid holding any locks while creating a new + * FileEncryptionInfo, since this can be very slow. Since the path can + * flip flop between being in an encryption zone and not in the meantime, + * we need to recheck the preconditions and generate a new + * FileEncryptionInfo in some circumstances. + * + * A special RetryStartFileException is used to indicate that we should + * retry creation of a FileEncryptionInfo. + */ try { - checkOperation(OperationCategory.WRITE); - checkNameNodeSafeMode("Cannot create file" + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); - startFileInternal(pc, src, permissions, holder, clientMachine, create, - overwrite, createParent, replication, blockSize, cipherSuites, - logRetryCache); - stat = dir.getFileInfo(src, false); - } catch (StandbyException se) { - skipSync = true; - throw se; + boolean shouldContinue = true; + int iters = 0; + while (shouldContinue) { + skipSync = false; + if (iters >= 10) { + throw new IOException("Too many retries because of encryption zone " + + "operations, something might be broken!"); + } + shouldContinue = false; + iters++; + // Optimistically generate a FileEncryptionInfo for this path. + FileEncryptionInfo feInfo = + newFileEncryptionInfo(src, pathComponents, cipherSuites); + + // Try to create the file with this feInfo + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + checkNameNodeSafeMode("Cannot create file" + src); + src = FSDirectory.resolvePath(src, pathComponents, dir); + startFileInternal(pc, src, permissions, holder, clientMachine, create, + overwrite, createParent, replication, blockSize, feInfo, + logRetryCache); + stat = dir.getFileInfo(src, false); + } catch (StandbyException se) { + skipSync = true; + throw se; + } catch (RetryStartFileException e) { + shouldContinue = true; + if (LOG.isTraceEnabled()) { + LOG.trace("Preconditions failed, retrying creation of " + + "FileEncryptionInfo", e); + } + } finally { + writeUnlock(); + } + } } finally { - writeUnlock(); // There might be transactions logged while trying to recover the lease. // They need to be sync'ed even when an exception was thrown. if (!skipSync) { getEditLog().logSync(); } - } + } logAuditEvent(true, "create", src, null, stat); return stat; @@ -2463,11 +2559,11 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, private void startFileInternal(FSPermissionChecker pc, String src, PermissionStatus permissions, String holder, String clientMachine, boolean create, boolean overwrite, boolean createParent, - short replication, long blockSize, List cipherSuites, + short replication, long blockSize, FileEncryptionInfo feInfo, boolean logRetryEntry) throws FileAlreadyExistsException, AccessControlException, UnresolvedLinkException, FileNotFoundException, - ParentNotDirectoryException, IOException { + ParentNotDirectoryException, RetryStartFileException, IOException { assert hasWriteLock(); // Verify that the destination does not exist as a directory already. final INodesInPath iip = dir.getINodesInPath4Write(src); @@ -2477,22 +2573,21 @@ private void startFileInternal(FSPermissionChecker pc, String src, " already exists as a directory"); } - FileEncryptionInfo feInfo = null; - CipherSuite suite = chooseCipherSuite(iip, cipherSuites); - if (suite != null) { - Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN), - "Chose an UNKNOWN CipherSuite!"); - // TODO: fill in actual key/iv in HDFS-6474 - // For now, populate with dummy data - byte[] key = new byte[suite.getAlgorithmBlockSize()]; - for (int i = 0; i < key.length; i++) { - key[i] = (byte)i; + if (!dir.isInAnEZ(iip)) { + // If the path is not in an EZ, we don't need an feInfo. + // Null it out in case one was already generated. + feInfo = null; + } else { + // The path is now within an EZ, but no feInfo. Retry. + if (feInfo == null) { + throw new RetryStartFileException(); } - byte[] iv = new byte[suite.getAlgorithmBlockSize()]; - for (int i = 0; i < iv.length; i++) { - iv[i] = (byte)(3+i*2); + // It's in an EZ and we have a provided feInfo. Make sure the + // keyVersion of the encryption key used matches one of the keyVersions of + // the key of the encryption zone. + if (!dir.isValidKeyVersion(iip, feInfo.getEzKeyVersionName())) { + throw new RetryStartFileException(); } - feInfo = new FileEncryptionInfo(suite, key, iv); } final INodeFile myFile = INodeFile.valueOf(inode, src, true); @@ -8319,12 +8414,14 @@ void createEncryptionZone(final String src, String keyIdArg) String keyId = keyIdArg; boolean success = false; try { + KeyVersion keyVersion; if (keyId == null || keyId.isEmpty()) { - keyId = createNewKey(src); + keyId = UUID.randomUUID().toString(); + keyVersion = createNewKey(keyId, src); createdKey = true; } else { - if (provider.getCurrentKey(keyId) == null) { - + keyVersion = provider.getCurrentKey(keyId); + if (keyVersion == null) { /* * It would be nice if we threw something more specific than * IOException when the key is not found, but the KeyProvider API @@ -8336,7 +8433,7 @@ void createEncryptionZone(final String src, String keyIdArg) throw new IOException("Key " + keyId + " doesn't exist."); } } - createEncryptionZoneInt(src, keyId, cacheEntry != null); + createEncryptionZoneInt(src, keyId, keyVersion, cacheEntry != null); success = true; } catch (AccessControlException e) { logAuditEvent(false, "createEncryptionZone", src); @@ -8351,7 +8448,8 @@ void createEncryptionZone(final String src, String keyIdArg) } private void createEncryptionZoneInt(final String srcArg, String keyId, - final boolean logRetryCache) throws IOException { + final KeyVersion keyVersion, final boolean logRetryCache) throws + IOException { String src = srcArg; HdfsFileStatus resultingStat = null; checkSuperuserPrivilege(); @@ -8365,7 +8463,7 @@ private void createEncryptionZoneInt(final String srcArg, String keyId, checkNameNodeSafeMode("Cannot create encryption zone on " + src); src = FSDirectory.resolvePath(src, pathComponents, dir); - final XAttr keyIdXAttr = dir.createEncryptionZone(src, keyId); + final XAttr keyIdXAttr = dir.createEncryptionZone(src, keyId, keyVersion); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(keyIdXAttr); getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); @@ -8377,19 +8475,29 @@ private void createEncryptionZoneInt(final String srcArg, String keyId, logAuditEvent(true, "createEncryptionZone", src, null, resultingStat); } - private String createNewKey(String src) + /** + * Create a new key on the KeyProvider for an encryption zone. + * + * @param keyId id of the key + * @param src path of the encryption zone. + * @return KeyVersion of the created key + * @throws IOException + */ + private KeyVersion createNewKey(String keyId, String src) throws IOException { - final String keyId = UUID.randomUUID().toString(); + Preconditions.checkNotNull(keyId); + Preconditions.checkNotNull(src); // TODO pass in hdfs://HOST:PORT (HDFS-6490) providerOptions.setDescription(src); providerOptions.setBitLength(codec.getCipherSuite() .getAlgorithmBlockSize()*8); + KeyVersion version = null; try { - provider.createKey(keyId, providerOptions); + version = provider.createKey(keyId, providerOptions); } catch (NoSuchAlgorithmException e) { throw new IOException(e); } - return keyId; + return version; } List listEncryptionZones() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java new file mode 100644 index 00000000000..a5758a7e0e4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +public class RetryStartFileException extends Exception { +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index b9f17e4bc20..da53be1006e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -184,6 +184,7 @@ message FileEncryptionInfoProto { required CipherSuite suite = 1; required bytes key = 2; required bytes iv = 3; + required string ezKeyVersionName = 4; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 1a1f22aaf17..222bf92cd58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2008,4 +2008,15 @@ + + dfs.namenode.key.version.refresh.interval.ms + 300000 + How frequently the namenode will attempt to fetch the latest + key version of encryption zone keys from the configured KeyProvider, in + milliseconds. New key versions are created when a key is rolled. This + setting thus controls the window of staleness where an old key version + is used after a key is rolled. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java index 85f7ff57013..a75e46e4bd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java @@ -21,17 +21,20 @@ import java.io.IOException; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.UUID; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderFactory; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -39,16 +42,20 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; -import com.google.common.base.Preconditions; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.fail; public class TestEncryptionZonesAPI { @@ -71,6 +78,7 @@ public void setUpCluster() throws IOException { JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); fs = (DistributedFileSystem) createFileSystem(conf); + Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); } protected FileSystem createFileSystem(Configuration conf) throws IOException { @@ -382,21 +390,80 @@ public void testCipherSuiteNegotiation() throws Exception { fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); DFSTestUtil.createFile(fs, new Path(zone, "success3"), 4096, (short) 1, 0xFEED); + // Check KeyProvider state + // Flushing the KP on the NN, since it caches, and init a test one + cluster.getNamesystem().getProvider().flush(); + KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0); + List keys = provider.getKeys(); + assertEquals("Expected NN to have created one key per zone", 1, + keys.size()); + List allVersions = Lists.newArrayList(); + for (String key : keys) { + List versions = provider.getKeyVersions(key); + assertEquals("Should only have one key version per key", 1, + versions.size()); + allVersions.addAll(versions); + } // Check that the specified CipherSuite was correctly saved on the NN for (int i=2; i<=3; i++) { - LocatedBlocks blocks = - fs.getClient().getLocatedBlocks(zone.toString() + "/success2", 0); - FileEncryptionInfo feInfo = blocks.getFileEncryptionInfo(); + FileEncryptionInfo feInfo = + getFileEncryptionInfo(new Path(zone.toString() + + "/success" + i)); assertEquals(feInfo.getCipherSuite(), CipherSuite.AES_CTR_NOPADDING); - // TODO: validate against actual key/iv in HDFS-6474 - byte[] key = feInfo.getEncryptedDataEncryptionKey(); - for (int j = 0; j < key.length; j++) { - assertEquals("Unexpected key byte", (byte)j, key[j]); - } - byte[] iv = feInfo.getIV(); - for (int j = 0; j < iv.length; j++) { - assertEquals("Unexpected IV byte", (byte)(3+j*2), iv[j]); - } } } + + private void validateFiles(Path p1, Path p2, int len) throws Exception { + FSDataInputStream in1 = fs.open(p1); + FSDataInputStream in2 = fs.open(p2); + for (int i=0; i zones = dfsAdmin.listEncryptionZones(); + assertEquals("Expected 1 EZ", 1, zones.size()); + String keyId = zones.get(0).getKeyId(); + cluster.getNamesystem().getProvider().rollNewVersion(keyId); + cluster.getNamesystem().getFSDirectory().ezManager.kickMonitor(); + // Read them back in and compare byte-by-byte + validateFiles(baseFile, encFile1, len); + // Write a new enc file and validate + final Path encFile2 = new Path(zone, "myfile2"); + DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); + // FEInfos should be different + FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1); + FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2); + assertFalse("EDEKs should be different", Arrays.equals( + feInfo1.getEncryptedDataEncryptionKey(), + feInfo2.getEncryptedDataEncryptionKey())); + assertNotEquals("Key was rolled, versions should be different", + feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName()); + // Contents still equal + validateFiles(encFile1, encFile2, len); + } } From 905c90b066503c966ff78e3d1add57f4aff6e9c6 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Sat, 12 Jul 2014 01:29:26 +0000 Subject: [PATCH 032/354] HADOOP-10734. Implement high-performance secure random number sources. (Yi Liu via Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1609874 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 + hadoop-common-project/hadoop-common/pom.xml | 2 + .../hadoop-common/src/CMakeLists.txt | 3 +- .../hadoop/crypto/JceAesCtrCryptoCodec.java | 8 +- .../crypto/OpensslAesCtrCryptoCodec.java | 33 +- .../crypto/random/OpensslSecureRandom.java | 119 +++++++ .../hadoop/crypto/random/OsSecureRandom.java | 115 ++++++ .../hadoop/fs/CommonConfigurationKeys.java | 1 - .../fs/CommonConfigurationKeysPublic.java | 14 +- .../crypto/random/OpensslSecureRandom.c | 335 ++++++++++++++++++ .../random/org_apache_hadoop_crypto_random.h | 40 +++ .../src/main/resources/core-default.xml | 20 +- .../random/TestOpensslSecureRandom.java | 114 ++++++ .../crypto/random/TestOsSecureRandom.java | 139 ++++++++ 14 files changed, 933 insertions(+), 13 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c create mode 100644 hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/org_apache_hadoop_crypto_random.h create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 2ea4420bce2..f133f813136 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -6,6 +6,9 @@ fs-encryption (Unreleased) NEW FEATURES + HADOOP-10734. Implement high-performance secure random number sources. + (Yi Liu via Colin Patrick McCabe) + IMPROVEMENTS HADOOP-10603. Crypto input and output streams implementing Hadoop stream diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 9a5e36c7561..0c90a9490df 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -542,6 +542,7 @@ org.apache.hadoop.io.compress.lz4.Lz4Compressor org.apache.hadoop.io.compress.lz4.Lz4Decompressor org.apache.hadoop.crypto.OpensslCipher + org.apache.hadoop.crypto.random.OpensslSecureRandom org.apache.hadoop.util.NativeCrc32 org.apache.hadoop.net.unix.DomainSocket org.apache.hadoop.net.unix.DomainSocketWatcher @@ -657,6 +658,7 @@ org.apache.hadoop.io.compress.lz4.Lz4Compressor org.apache.hadoop.io.compress.lz4.Lz4Decompressor org.apache.hadoop.crypto.OpensslCipher + org.apache.hadoop.crypto.random.OpensslSecureRandom org.apache.hadoop.util.NativeCrc32 ${project.build.directory}/native/javah diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt index 9ad049e0853..84c27e5db71 100644 --- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt +++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt @@ -167,7 +167,8 @@ find_path(OPENSSL_INCLUDE_DIR if (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR) GET_FILENAME_COMPONENT(HADOOP_OPENSSL_LIBRARY ${OPENSSL_LIBRARY} NAME) SET(OPENSSL_SOURCE_FILES - "${D}/crypto/OpensslCipher.c") + "${D}/crypto/OpensslCipher.c" + "${D}/crypto/random/OpensslSecureRandom.c") else (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR) SET(OPENSSL_INCLUDE_DIR "") SET(OPENSSL_SOURCE_FILES "") diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java index 2482586720c..cd093203867 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java @@ -32,8 +32,8 @@ import com.google.common.base.Preconditions; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT; /** * Implement the AES-CTR crypto codec using JCE provider. @@ -57,8 +57,8 @@ public void setConf(Configuration conf) { this.conf = conf; provider = conf.get(HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY); final String secureRandomAlg = conf.get( - HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY, - HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT); + HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY, + HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT); try { random = (provider != null) ? SecureRandom.getInstance(secureRandomAlg, provider) : diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java index ee11f50683b..04c2db09e1e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java @@ -17,23 +17,34 @@ */ package org.apache.hadoop.crypto; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY; + +import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; import java.security.GeneralSecurityException; import java.security.SecureRandom; +import java.util.Random; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import com.google.common.base.Preconditions; +import org.apache.hadoop.crypto.random.OsSecureRandom; +import org.apache.hadoop.util.ReflectionUtils; /** * Implement the AES-CTR crypto codec using JNI into OpenSSL. */ @InterfaceAudience.Private public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec { + private static final Log LOG = + LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName()); + private Configuration conf; - private SecureRandom random = new SecureRandom(); + private Random random; public OpensslAesCtrCryptoCodec() { } @@ -41,6 +52,26 @@ public OpensslAesCtrCryptoCodec() { @Override public void setConf(Configuration conf) { this.conf = conf; + final Class klass = conf.getClass( + HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY, OsSecureRandom.class, + Random.class); + try { + random = ReflectionUtils.newInstance(klass, conf); + } catch (Exception e) { + LOG.info("Unable to use " + klass.getName() + ". Falling back to " + + "Java SecureRandom.", e); + this.random = new SecureRandom(); + } + } + + @Override + protected void finalize() throws Throwable { + try { + Closeable r = (Closeable) this.random; + r.close(); + } catch (ClassCastException e) { + } + super.finalize(); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java new file mode 100644 index 00000000000..b1fa9883373 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.random; + +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.NativeCodeLoader; + +import com.google.common.base.Preconditions; + +/** + * OpenSSL secure random using JNI. + * This implementation is thread-safe. + *

+ * + * If using an Intel chipset with RDRAND, the high-performance hardware + * random number generator will be used and it's much faster than + * {@link java.security.SecureRandom}. If RDRAND is unavailable, default + * OpenSSL secure random generator will be used. It's still faster + * and can generate strong random bytes. + *

+ * @see https://wiki.openssl.org/index.php/Random_Numbers + * @see http://en.wikipedia.org/wiki/RdRand + */ +@InterfaceAudience.Private +public class OpensslSecureRandom extends Random { + private static final long serialVersionUID = -7828193502768789584L; + private static final Log LOG = + LogFactory.getLog(OpensslSecureRandom.class.getName()); + + /** If native SecureRandom unavailable, use java SecureRandom */ + private java.security.SecureRandom fallback = null; + private static boolean nativeEnabled = false; + static { + if (NativeCodeLoader.isNativeCodeLoaded() && + NativeCodeLoader.buildSupportsOpenssl()) { + try { + initSR(); + nativeEnabled = true; + } catch (Throwable t) { + LOG.error("Failed to load Openssl SecureRandom", t); + } + } + } + + public static boolean isNativeCodeLoaded() { + return nativeEnabled; + } + + public OpensslSecureRandom() { + if (!nativeEnabled) { + fallback = new java.security.SecureRandom(); + } + } + + /** + * Generates a user-specified number of random bytes. + * It's thread-safe. + * + * @param bytes the array to be filled in with random bytes. + */ + @Override + public void nextBytes(byte[] bytes) { + if (!nativeEnabled || !nextRandBytes(bytes)) { + fallback.nextBytes(bytes); + } + } + + @Override + public void setSeed(long seed) { + // Self-seeding. + } + + /** + * Generates an integer containing the user-specified number of + * random bits (right justified, with leading zeros). + * + * @param numBits number of random bits to be generated, where + * 0 <= numBits <= 32. + * + * @return int an int containing the user-specified number + * of random bits (right justified, with leading zeros). + */ + @Override + final protected int next(int numBits) { + Preconditions.checkArgument(numBits >= 0 && numBits <= 32); + int numBytes = (numBits + 7) / 8; + byte b[] = new byte[numBytes]; + int next = 0; + + nextBytes(b); + for (int i = 0; i < numBytes; i++) { + next = (next << 8) + (b[i] & 0xFF); + } + + return next >>> (numBytes * 8 - numBits); + } + + private native static void initSR(); + private native boolean nextRandBytes(byte[] bytes); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java new file mode 100644 index 00000000000..3fa61fbbdce --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.random; + +import java.io.Closeable; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.util.Random; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT; + +/** + * A Random implementation that uses random bytes sourced from the + * operating system. + */ +@InterfaceAudience.Private +public class OsSecureRandom extends Random implements Closeable, Configurable { + + private Configuration conf; + + private final int RESERVOIR_LENGTH = 8192; + + private String randomDevPath; + + private FileInputStream stream; + + private final byte[] reservoir = new byte[RESERVOIR_LENGTH]; + + private int pos = reservoir.length; + + private void fillReservoir(int min) { + if (pos >= reservoir.length - min) { + try { + IOUtils.readFully(stream, reservoir, 0, reservoir.length); + } catch (IOException e) { + throw new RuntimeException("failed to fill reservoir", e); + } + pos = 0; + } + } + + public OsSecureRandom() { + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + this.randomDevPath = conf.get( + HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY, + HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT); + File randomDevFile = new File(randomDevPath); + try { + this.stream = new FileInputStream(randomDevFile); + fillReservoir(0); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + synchronized public void nextBytes(byte[] bytes) { + int off = 0; + int n = 0; + while (off < bytes.length) { + fillReservoir(0); + n = Math.min(bytes.length - off, reservoir.length - pos); + System.arraycopy(reservoir, pos, bytes, off, n); + off += n; + pos += n; + } + } + + @Override + synchronized protected int next(int nbits) { + fillReservoir(4); + int n = reservoir[pos] | + (reservoir[pos + 1] << 8) | + (reservoir[pos + 2] << 16) | + (reservoir[pos + 3] << 24); + pos += 4; + return n & (0xffffffff >> (32 - nbits)); + } + + @Override + public void close() throws IOException { + stream.close(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 3345e3c93d5..69503ba21ca 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -280,5 +280,4 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";"; public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts"; public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw"; - } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 279bbc7e2b3..9691539821b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -297,10 +297,16 @@ public class CommonConfigurationKeysPublic { public static final String HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS = "hadoop.security.impersonation.provider.class"; /** See core-default.xml */ - public static final String HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY = - "hadoop.security.secure.random.algorithm"; - /** Defalt value for HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_KEY */ - public static final String HADOOP_SECURITY_SECURE_RANDOM_ALGORITHM_DEFAULT = + public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY = + "hadoop.security.java.secure.random.algorithm"; + /** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */ + public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT = "SHA1PRNG"; + public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY = + "hadoop.security.secure.random.impl"; + public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY = + "hadoop.security.random.device.file.path"; + public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT = + "/dev/urandom"; } diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c new file mode 100644 index 00000000000..ee09aedff8b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c @@ -0,0 +1,335 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_crypto_random.h" + +#include +#include +#include + +#ifdef UNIX +#include +#include +#include +#include +#endif + +#ifdef WINDOWS +#include +#endif + +#include "org_apache_hadoop_crypto_random_OpensslSecureRandom.h" + +#ifdef UNIX +static void * (*dlsym_CRYPTO_malloc) (int, const char *, int); +static void (*dlsym_CRYPTO_free) (void *); +static int (*dlsym_CRYPTO_num_locks) (void); +static void (*dlsym_CRYPTO_set_locking_callback) (void (*)()); +static void (*dlsym_CRYPTO_set_id_callback) (unsigned long (*)()); +static void (*dlsym_ENGINE_load_rdrand) (void); +static ENGINE * (*dlsym_ENGINE_by_id) (const char *); +static int (*dlsym_ENGINE_init) (ENGINE *); +static int (*dlsym_ENGINE_set_default) (ENGINE *, unsigned int); +static int (*dlsym_ENGINE_finish) (ENGINE *); +static int (*dlsym_ENGINE_free) (ENGINE *); +static void (*dlsym_ENGINE_cleanup) (void); +static int (*dlsym_RAND_bytes) (unsigned char *, int); +static unsigned long (*dlsym_ERR_get_error) (void); +#endif + +#ifdef WINDOWS +typedef void * (__cdecl *__dlsym_CRYPTO_malloc) (int, const char *, int); +typedef void (__cdecl *__dlsym_CRYPTO_free) (void *); +typedef int (__cdecl *__dlsym_CRYPTO_num_locks) (void); +typedef void (__cdecl *__dlsym_CRYPTO_set_locking_callback) \ + (void (*)(int, int, char *, int); +typedef void (__cdecl *__dlsym_ENGINE_load_rdrand) (void); +typedef ENGINE * (__cdecl *__dlsym_ENGINE_by_id) (const char *); +typedef int (__cdecl *__dlsym_ENGINE_init) (ENGINE *); +typedef int (__cdecl *__dlsym_ENGINE_set_default) (ENGINE *, unsigned int); +typedef int (__cdecl *__dlsym_ENGINE_finish) (ENGINE *); +typedef int (__cdecl *__dlsym_ENGINE_free) (ENGINE *); +typedef void (__cdecl *__dlsym_ENGINE_cleanup) (void); +typedef int (__cdecl *__dlsym_RAND_bytes) (unsigned char *, int); +typedef unsigned long (__cdecl *__dlsym_ERR_get_error) (void); +static __dlsym_CRYPTO_malloc dlsym_CRYPTO_malloc; +static __dlsym_CRYPTO_free dlsym_CRYPTO_free; +static __dlsym_CRYPTO_num_locks dlsym_CRYPTO_num_locks; +static __dlsym_CRYPTO_set_locking_callback dlsym_CRYPTO_set_locking_callback; +static __dlsym_ENGINE_load_rdrand dlsym_ENGINE_load_rdrand; +static __dlsym_ENGINE_by_id dlsym_ENGINE_by_id; +static __dlsym_ENGINE_init dlsym_ENGINE_init; +static __dlsym_ENGINE_set_default dlsym_ENGINE_set_default; +static __dlsym_ENGINE_finish dlsym_ENGINE_finish; +static __dlsym_ENGINE_free dlsym_ENGINE_free; +static __dlsym_ENGINE_cleanup dlsym_ENGINE_cleanup; +static __dlsym_RAND_bytes dlsym_RAND_bytes; +static __dlsym_ERR_get_error dlsym_ERR_get_error; +#endif + +static ENGINE * openssl_rand_init(); +static void openssl_rand_clean(ENGINE *eng, int clean_locks); +static int openssl_rand_bytes(unsigned char *buf, int num); + +JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_initSR + (JNIEnv *env, jclass clazz) +{ + char msg[1000]; +#ifdef UNIX + void *openssl = dlopen(HADOOP_OPENSSL_LIBRARY, RTLD_LAZY | RTLD_GLOBAL); +#endif + +#ifdef WINDOWS + HMODULE openssl = LoadLibrary(HADOOP_OPENSSL_LIBRARY); +#endif + + if (!openssl) { + snprintf(msg, sizeof(msg), "Cannot load %s (%s)!", HADOOP_OPENSSL_LIBRARY, \ + dlerror()); + THROW(env, "java/lang/UnsatisfiedLinkError", msg); + return; + } + +#ifdef UNIX + dlerror(); // Clear any existing error + LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_malloc, env, openssl, "CRYPTO_malloc"); + LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_free, env, openssl, "CRYPTO_free"); + LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_num_locks, env, openssl, "CRYPTO_num_locks"); + LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_set_locking_callback, \ + env, openssl, "CRYPTO_set_locking_callback"); + LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_set_id_callback, env, \ + openssl, "CRYPTO_set_id_callback"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_load_rdrand, env, \ + openssl, "ENGINE_load_rdrand"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_by_id, env, openssl, "ENGINE_by_id"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_init, env, openssl, "ENGINE_init"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_set_default, env, \ + openssl, "ENGINE_set_default"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_finish, env, openssl, "ENGINE_finish"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_free, env, openssl, "ENGINE_free"); + LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_cleanup, env, openssl, "ENGINE_cleanup"); + LOAD_DYNAMIC_SYMBOL(dlsym_RAND_bytes, env, openssl, "RAND_bytes"); + LOAD_DYNAMIC_SYMBOL(dlsym_ERR_get_error, env, openssl, "ERR_get_error"); +#endif + +#ifdef WINDOWS + LOAD_DYNAMIC_SYMBOL(__dlsym_CRYPTO_malloc, dlsym_CRYPTO_malloc, \ + env, openssl, "CRYPTO_malloc"); + LOAD_DYNAMIC_SYMBOL(__dlsym_CRYPTO_free, dlsym_CRYPTO_free, \ + env, openssl, "CRYPTO_free"); + LOAD_DYNAMIC_SYMBOL(__dlsym_CRYPTO_num_locks, dlsym_CRYPTO_num_locks, \ + env, openssl, "CRYPTO_num_locks"); + LOAD_DYNAMIC_SYMBOL(__dlsym_CRYPTO_set_locking_callback, \ + dlsym_CRYPTO_set_locking_callback, \ + env, openssl, "CRYPTO_set_locking_callback"); + LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_load_rdrand, dlsym_ENGINE_load_rdrand, \ + env, openssl, "ENGINE_load_rdrand"); + LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_by_id, dlsym_ENGINE_by_id, \ + env, openssl, "ENGINE_by_id"); + LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_init, dlsym_ENGINE_init, \ + env, openssl, "ENGINE_init"); + LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_set_default, dlsym_ENGINE_set_default, \ + env, openssl, "ENGINE_set_default"); + LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_finish, dlsym_ENGINE_finish, \ + env, openssl, "ENGINE_finish"); + LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_free, dlsym_ENGINE_free, \ + env, openssl, "ENGINE_free"); + LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_cleanup, dlsym_ENGINE_cleanup, \ + env, openssl, "ENGINE_cleanup"); + LOAD_DYNAMIC_SYMBOL(__dlsym_RAND_bytes, dlsym_RAND_bytes, \ + env, openssl, "RAND_bytes"); + LOAD_DYNAMIC_SYMBOL(__dlsym_ERR_get_error, dlsym_ERR_get_error, \ + env, openssl, "ERR_get_error"); +#endif + + openssl_rand_init(env); +} + +JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_nextRandBytes___3B + (JNIEnv *env, jobject object, jbyteArray bytes) +{ + if (NULL == bytes) { + THROW(env, "java/lang/NullPointerException", "Buffer cannot be null."); + return JNI_FALSE; + } + jbyte *b = (*env)->GetByteArrayElements(env, bytes, NULL); + if (NULL == b) { + THROW(env, "java/lang/InternalError", "Cannot get bytes array."); + return JNI_FALSE; + } + int b_len = (*env)->GetArrayLength(env, bytes); + int ret = openssl_rand_bytes((unsigned char *)b, b_len); + (*env)->ReleaseByteArrayElements(env, bytes, b, 0); + + if (1 != ret) { + return JNI_FALSE; + } + return JNI_TRUE; +} + +/** + * To ensure thread safety for random number generators, we need to call + * CRYPTO_set_locking_callback. + * http://wiki.openssl.org/index.php/Random_Numbers + * Example: crypto/threads/mttest.c + */ + +#ifdef WINDOWS +static void windows_locking_callback(int mode, int type, char *file, int line); +static HANDLE *lock_cs; + +static void locks_setup(void) +{ + int i; + lock_cs = dlsym_CRYPTO_malloc(dlsym_CRYPTO_num_locks() * sizeof(HANDLE), \ + __FILE__, __LINE__); + + for (i = 0; i < dlsym_CRYPTO_num_locks(); i++) { + lock_cs[i] = CreateMutex(NULL, FALSE, NULL); + } + dlsym_CRYPTO_set_locking_callback((void (*)(int, int, char *, int)) \ + windows_locking_callback); + /* id callback defined */ +} + +static void locks_cleanup(void) +{ + int i; + dlsym_CRYPTO_set_locking_callback(NULL); + + for (i = 0; i < dlsym_CRYPTO_num_locks(); i++) { + CloseHandle(lock_cs[i]); + } + dlsym_CRYPTO_free(lock_cs); +} + +static void windows_locking_callback(int mode, int type, char *file, int line) +{ + UNUSED(file), UNUSED(line); + + if (mode & CRYPTO_LOCK) { + WaitForSingleObject(lock_cs[type], INFINITE); + } else { + ReleaseMutex(lock_cs[type]); + } +} +#endif /* WINDOWS */ + +#ifdef UNIX +static void pthreads_locking_callback(int mode, int type, char *file, int line); +static unsigned long pthreads_thread_id(void); +static pthread_mutex_t *lock_cs; + +static void locks_setup(void) +{ + int i; + lock_cs = dlsym_CRYPTO_malloc(dlsym_CRYPTO_num_locks() * \ + sizeof(pthread_mutex_t), __FILE__, __LINE__); + + for (i = 0; i < dlsym_CRYPTO_num_locks(); i++) { + pthread_mutex_init(&(lock_cs[i]), NULL); + } + + dlsym_CRYPTO_set_id_callback((unsigned long (*)())pthreads_thread_id); + dlsym_CRYPTO_set_locking_callback((void (*)())pthreads_locking_callback); +} + +static void locks_cleanup(void) +{ + int i; + dlsym_CRYPTO_set_locking_callback(NULL); + + for (i = 0; i < dlsym_CRYPTO_num_locks(); i++) { + pthread_mutex_destroy(&(lock_cs[i])); + } + + dlsym_CRYPTO_free(lock_cs); +} + +static void pthreads_locking_callback(int mode, int type, char *file, int line) +{ + UNUSED(file), UNUSED(line); + + if (mode & CRYPTO_LOCK) { + pthread_mutex_lock(&(lock_cs[type])); + } else { + pthread_mutex_unlock(&(lock_cs[type])); + } +} + +static unsigned long pthreads_thread_id(void) +{ + return (unsigned long)syscall(SYS_gettid); +} + +#endif /* UNIX */ + +/** + * If using an Intel chipset with RDRAND, the high-performance hardware + * random number generator will be used. + */ +static ENGINE * openssl_rand_init() +{ + locks_setup(); + + dlsym_ENGINE_load_rdrand(); + ENGINE *eng = dlsym_ENGINE_by_id("rdrand"); + + int ret = -1; + do { + if (NULL == eng) { + break; + } + + int rc = dlsym_ENGINE_init(eng); + if (0 == rc) { + break; + } + + rc = dlsym_ENGINE_set_default(eng, ENGINE_METHOD_RAND); + if (0 == rc) { + break; + } + + ret = 0; + } while(0); + + if (ret == -1) { + openssl_rand_clean(eng, 0); + } + + return eng; +} + +static void openssl_rand_clean(ENGINE *eng, int clean_locks) +{ + if (NULL != eng) { + dlsym_ENGINE_finish(eng); + dlsym_ENGINE_free(eng); + } + + dlsym_ENGINE_cleanup(); + if (clean_locks) { + locks_cleanup(); + } +} + +static int openssl_rand_bytes(unsigned char *buf, int num) +{ + return dlsym_RAND_bytes(buf, num); +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/org_apache_hadoop_crypto_random.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/org_apache_hadoop_crypto_random.h new file mode 100644 index 00000000000..1200718e69f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/org_apache_hadoop_crypto_random.h @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ORG_APACHE_HADOOP_CRYPTO_RANDOM_H +#define ORG_APACHE_HADOOP_CRYPTO_RANDOM_H + +#include "org_apache_hadoop.h" + +#ifdef UNIX +#include +#include "config.h" +#endif + +#ifdef WINDOWS +#include "winutils.h" +#endif + +#define UNUSED(x) ((void)(x)) + +#include +#include +#include +#include + +#endif //ORG_APACHE_HADOOP_CRYPTO_RANDOM_H \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index d4b775643cf..224fb12e921 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1477,10 +1477,26 @@ for ldap providers in the same way as above does. - hadoop.security.secure.random.algorithm + hadoop.security.java.secure.random.algorithm - The secure random algorithm. + The java secure random algorithm. + + + + + hadoop.security.secure.random.impl + + + Implementation of secure random. + + + + + hadoop.security.random.device.file.path + + + OS security random dev path, it's /dev/urandom in linux. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java new file mode 100644 index 00000000000..f40c6ac4c91 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.random; + +import java.util.Arrays; + +import org.junit.Test; + +public class TestOpensslSecureRandom { + + @Test(timeout=120000) + public void testRandomBytes() throws Exception { + OpensslSecureRandom random = new OpensslSecureRandom(); + + // len = 16 + checkRandomBytes(random, 16); + // len = 32 + checkRandomBytes(random, 32); + // len = 128 + checkRandomBytes(random, 128); + // len = 256 + checkRandomBytes(random, 256); + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + private void checkRandomBytes(OpensslSecureRandom random, int len) { + byte[] bytes = new byte[len]; + byte[] bytes1 = new byte[len]; + random.nextBytes(bytes); + random.nextBytes(bytes1); + + while (Arrays.equals(bytes, bytes1)) { + random.nextBytes(bytes1); + } + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + @Test(timeout=120000) + public void testRandomInt() throws Exception { + OpensslSecureRandom random = new OpensslSecureRandom(); + + int rand1 = random.nextInt(); + int rand2 = random.nextInt(); + while (rand1 == rand2) { + rand2 = random.nextInt(); + } + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + @Test(timeout=120000) + public void testRandomLong() throws Exception { + OpensslSecureRandom random = new OpensslSecureRandom(); + + long rand1 = random.nextLong(); + long rand2 = random.nextLong(); + while (rand1 == rand2) { + rand2 = random.nextLong(); + } + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + @Test(timeout=120000) + public void testRandomFloat() throws Exception { + OpensslSecureRandom random = new OpensslSecureRandom(); + + float rand1 = random.nextFloat(); + float rand2 = random.nextFloat(); + while (rand1 == rand2) { + rand2 = random.nextFloat(); + } + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + @Test(timeout=120000) + public void testRandomDouble() throws Exception { + OpensslSecureRandom random = new OpensslSecureRandom(); + + double rand1 = random.nextDouble(); + double rand2 = random.nextDouble(); + while (rand1 == rand2) { + rand2 = random.nextDouble(); + } + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java new file mode 100644 index 00000000000..8fc5c70775a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.random; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.lang.SystemUtils; +import org.apache.hadoop.conf.Configuration; +import org.junit.Assume; +import org.junit.Test; + +public class TestOsSecureRandom { + + private static OsSecureRandom getOsSecureRandom() throws IOException { + Assume.assumeTrue(SystemUtils.IS_OS_LINUX); + OsSecureRandom random = new OsSecureRandom(); + random.setConf(new Configuration()); + return random; + } + + @Test(timeout=120000) + public void testRandomBytes() throws Exception { + OsSecureRandom random = getOsSecureRandom(); + // len = 16 + checkRandomBytes(random, 16); + // len = 32 + checkRandomBytes(random, 32); + // len = 128 + checkRandomBytes(random, 128); + // len = 256 + checkRandomBytes(random, 256); + random.close(); + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + private void checkRandomBytes(OsSecureRandom random, int len) { + byte[] bytes = new byte[len]; + byte[] bytes1 = new byte[len]; + random.nextBytes(bytes); + random.nextBytes(bytes1); + + while (Arrays.equals(bytes, bytes1)) { + random.nextBytes(bytes1); + } + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + @Test(timeout=120000) + public void testRandomInt() throws Exception { + OsSecureRandom random = getOsSecureRandom(); + + int rand1 = random.nextInt(); + int rand2 = random.nextInt(); + while (rand1 == rand2) { + rand2 = random.nextInt(); + } + random.close(); + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + @Test(timeout=120000) + public void testRandomLong() throws Exception { + OsSecureRandom random = getOsSecureRandom(); + + long rand1 = random.nextLong(); + long rand2 = random.nextLong(); + while (rand1 == rand2) { + rand2 = random.nextLong(); + } + random.close(); + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + @Test(timeout=120000) + public void testRandomFloat() throws Exception { + OsSecureRandom random = getOsSecureRandom(); + + float rand1 = random.nextFloat(); + float rand2 = random.nextFloat(); + while (rand1 == rand2) { + rand2 = random.nextFloat(); + } + random.close(); + } + + /** + * Test will timeout if secure random implementation always returns a + * constant value. + */ + @Test(timeout=120000) + public void testRandomDouble() throws Exception { + OsSecureRandom random = getOsSecureRandom(); + + double rand1 = random.nextDouble(); + double rand2 = random.nextDouble(); + while (rand1 == rand2) { + rand2 = random.nextDouble(); + } + random.close(); + } + + @Test(timeout=120000) + public void testRefillReservoir() throws Exception { + OsSecureRandom random = getOsSecureRandom(); + + for (int i = 0; i < 8196; i++) { + random.nextLong(); + } + random.close(); + } +} From b066be8115eeee3099f4e16259b13063bd3f1104 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 15 Jul 2014 20:59:40 +0000 Subject: [PATCH 033/354] HDFS-6619. Clean up encryption-related tests. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1610849 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../apache/hadoop/fs/TestHDFSEncryption.java | 352 ------------- .../hadoop/hdfs/TestEncryptionZones.java | 433 ++++++++++++++++ .../hadoop/hdfs/TestEncryptionZonesAPI.java | 469 ------------------ .../TestFileContextEncryptionZones.java | 70 --- 5 files changed, 435 insertions(+), 891 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSEncryption.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextEncryptionZones.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index e75b065130c..0b7986a470b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -42,6 +42,8 @@ fs-encryption (Unreleased) HDFS-6474. Namenode needs to get the actual keys and iv from the KeyProvider. (wang) + HDFS-6619. Clean up encryption-related tests. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSEncryption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSEncryption.java deleted file mode 100644 index 4b32fe42135..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSEncryption.java +++ /dev/null @@ -1,352 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs; - -import static org.apache.hadoop.fs.CreateFlag.CREATE; -import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize; -import static org.apache.hadoop.fs.FileContextTestHelper.getFileData; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.File; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.Arrays; -import java.util.EnumSet; -import java.util.List; -import java.util.UUID; - -import javax.security.auth.login.LoginException; - -import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; -import org.apache.hadoop.crypto.key.KeyProviderFactory; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -public class TestHDFSEncryption { - private static MiniDFSCluster cluster; - private static Path defaultWorkingDirectory; - private static final HdfsConfiguration CONF = new HdfsConfiguration(); - private static FileContext fc; - private Path localFsRootPath; - private Path src1; - /* The KeyProvider, if any. */ - private static KeyProvider provider = null; - - private static File tmpDir; - - @BeforeClass - public static void clusterSetupAtBegining() throws IOException, - LoginException, URISyntaxException { - tmpDir = new File(System.getProperty("test.build.data", "target"), - UUID.randomUUID().toString()).getAbsoluteFile(); - tmpDir.mkdirs(); - - CONF.set(KeyProviderFactory.KEY_PROVIDER_PATH, - JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); - initializeKeyProvider(CONF); - try { - createOneKey(); - KeyVersion blort = provider.getCurrentKey("blort"); - } catch (java.security.NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - - cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1).build(); - cluster.waitClusterUp(); - - URI uri0 = cluster.getURI(0); - fc = FileContext.getFileContext(uri0, CONF); - defaultWorkingDirectory = fc.makeQualified(new Path("/user/" + - UserGroupInformation.getCurrentUser().getShortUserName())); - fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); - } - - private static void initializeKeyProvider(final Configuration conf) - throws IOException { - final List providers = KeyProviderFactory.getProviders(conf); - if (providers == null) { - return; - } - - if (providers.size() == 0) { - return; - } - - if (providers.size() > 1) { - final String err = - "Multiple KeyProviders found. Only one is permitted."; - throw new RuntimeException(err); - } - provider = providers.get(0); - if (provider.isTransient()) { - final String err = - "A KeyProvider was found but it is a transient provider."; - throw new RuntimeException(err); - } - } - - private static void createOneKey() - throws java.security.NoSuchAlgorithmException, IOException { - final org.apache.hadoop.crypto.key.KeyProvider.Options options = - KeyProvider.options(CONF); - provider.createKey("blort", options); - provider.flush(); - } - - @AfterClass - public static void ClusterShutdownAtEnd() throws Exception { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } - } - - @Before - public void setUp() throws Exception { - File testBuildData = new File(System.getProperty("test.build.data", - "build/test/data"), RandomStringUtils.randomAlphanumeric(10)); - Path rootPath = new Path(testBuildData.getAbsolutePath(), - "root-uri"); - localFsRootPath = rootPath.makeQualified(LocalFileSystem.NAME, null); - fc.mkdir(getTestRootPath(fc, "test"), FileContext.DEFAULT_PERM, true); - src1 = getTestRootPath(fc, "testfile"); - } - - @After - public void tearDown() throws Exception { - final boolean del = - fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test")), true); - assertTrue(del); - fc.delete(localFsRootPath, true); - } - - protected final FileContextTestHelper fileContextTestHelper = - createFileContextHelper(); - - protected FileContextTestHelper createFileContextHelper() { - return new FileContextTestHelper(); - } - - protected Path getDefaultWorkingDirectory() { - return defaultWorkingDirectory; - } - - private Path getTestRootPath(FileContext fc, String path) { - return fileContextTestHelper.getTestRootPath(fc, path); - } - - protected IOException unwrapException(IOException e) { - if (e instanceof RemoteException) { - return ((RemoteException) e).unwrapRemoteException(); - } - return e; - } - - private static final int NUM_BLOCKS = 3; - - private static final byte[] data = getFileData(NUM_BLOCKS, - getDefaultBlockSize()); - - private void writeSomeData() throws Exception { - writeSomeData(false, false); - } - - private void writeSomeData(boolean doHFlush, boolean doHSync) throws Exception { - final FSDataOutputStream out = - fc.create(src1, EnumSet.of(CREATE), Options.CreateOpts.createParent()); - out.write(data, 0, data.length); - if (doHFlush) { - out.hflush(); - } - - if (doHSync) { - out.hsync(); - } - - out.close(); - } - - private void writeAndVerify(boolean doHFlush, boolean doHSync) throws Exception { - writeSomeData(doHFlush, doHSync); - - final FSDataInputStream in = fc.open(src1); - try { - final byte[] readBuf = new byte[getDefaultBlockSize() * NUM_BLOCKS]; - - in.readFully(readBuf); - assertTrue("Expected read-back data to be equal (hflush=" + doHFlush - + " hfsync=" + doHSync + ")", Arrays.equals(data, readBuf)); - } finally { - in.close(); - } - } - - @Test - public void testBasicEncryptionStreamNoFlushNoSync() throws Exception { - writeAndVerify(false, false); - } - - @Test - public void testBasicEncryptionStreamFlushSync() throws Exception { - writeAndVerify(true, true); - } - - @Test - public void testBasicEncryptionStreamNoFlushSync() throws Exception { - writeAndVerify(false, true); - } - - @Test - public void testBasicEncryptionStreamFlushNoSync() throws Exception { - writeAndVerify(true, false); - } - - @Test - public void testGetPos() throws Exception { - writeSomeData(); - - final FSDataInputStream in = fc.open(src1); - - int expectedGetPos = 0; - while (in.read() != -1) { - assertTrue(++expectedGetPos == in.getPos()); - } - } - - @Test - public void testDoubleClose() throws Exception { - writeSomeData(); - - final FSDataInputStream in = fc.open(src1); - in.close(); - try { - in.close(); - } catch (Exception e) { - fail("Caught unexpected exception on double-close: " + e); - } - } - - @Test - public void testHFlush() throws Exception { - final DistributedFileSystem fs = cluster.getFileSystem(); - final FSDataOutputStream out = - fc.create(src1, EnumSet.of(CREATE), Options.CreateOpts.createParent()); - out.write(data, 0, data.length); - out.hflush(); - out.close(); - } - - @Test - public void testSeekBogusArgs() throws Exception { - writeSomeData(); - - final FSDataInputStream in = fc.open(src1); - try { - in.seek(-1); - fail("Expected IOException"); - } catch (Exception e) { - GenericTestUtils.assertExceptionContains("Cannot seek to negative offset", e); - } - - try { - in.seek(1 << 20); - fail("Expected IOException"); - } catch (Exception e) { - GenericTestUtils.assertExceptionContains("Cannot seek after EOF", e); - } - in.close(); - } - - @Test - public void testSeekForward() throws Exception { - writeSomeData(); - - final FSDataInputStream in = fc.open(src1); - - for (int seekInc = 1; seekInc < 1024; seekInc += 32) { - long seekTo = 0; - while (seekTo < data.length) { - in.seek(seekTo); - int b = in.read(); - byte expected = data[(int) seekTo]; - assertTrue("seek(" + seekTo + ") Expected: " + expected + ", but got: " + b, - b == expected); - seekTo += seekInc; - } - } - in.close(); - } - - @Test - public void testSeekBackwards() throws Exception { - writeSomeData(); - - final FSDataInputStream in = fc.open(src1); - - for (int seekInc = 1; seekInc < 1024; seekInc += 32) { - long seekTo = data.length - 1; - while (seekTo >= 0) { - in.seek(seekTo); - int b = in.read(); - byte expected = data[(int) seekTo]; - assertTrue("seek(" + seekTo + ") Expected: " + expected + ", but got: " + b, - b == expected); - seekTo -= seekInc; - } - } - in.close(); - } - - @Test - public void testPostionedReadable() throws Exception { - writeSomeData(); - - final FSDataInputStream in = fc.open(src1); - - try { - final byte[] oneByteToRead = new byte[1]; - for (int i = 0; i < data.length; i++) { - int nread = in.read(i, oneByteToRead, 0, 1); - final byte b = oneByteToRead[0]; - byte expected = data[(int) i]; - assertTrue("read() expected only one byte to be read, but got " + nread, nread == 1); - assertTrue("read() expected: " + expected + ", but got: " + b, - b == expected); - } - } finally { - in.close(); - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java new file mode 100644 index 00000000000..09ccf2860b3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -0,0 +1,433 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.File; +import java.io.IOException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.List; + +import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderFactory; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSTestWrapper; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileContextTestWrapper; +import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FileSystemTestWrapper; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + + +import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class TestEncryptionZones { + + private Configuration conf; + private FileSystemTestHelper fsHelper; + + private MiniDFSCluster cluster; + private HdfsAdmin dfsAdmin; + private DistributedFileSystem fs; + + protected FileSystemTestWrapper fsWrapper; + protected FileContextTestWrapper fcWrapper; + + @Before + public void setup() throws IOException { + conf = new HdfsConfiguration(); + fsHelper = new FileSystemTestHelper(); + // Set up java key store + String testRoot = fsHelper.getTestRootDir(); + File testRootDir = new File(testRoot).getAbsoluteFile(); + conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" + ); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); + fs = cluster.getFileSystem(); + fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); + fcWrapper = new FileContextTestWrapper( + FileContext.getFileContext(cluster.getURI(), conf)); + dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); + } + + @After + public void teardown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + public void assertNumZones(final int numZones) throws IOException { + final List zones = dfsAdmin.listEncryptionZones(); + assertEquals("Unexpected number of encryption zones!", numZones, + zones.size()); + } + + /** + * Checks that an encryption zone with the specified keyId and path (if not + * null) is present. + * + * @throws IOException if a matching zone could not be found + */ + public void assertZonePresent(String keyId, String path) throws IOException { + final List zones = dfsAdmin.listEncryptionZones(); + boolean match = false; + for (EncryptionZone zone : zones) { + boolean matchKey = (keyId == null); + boolean matchPath = (path == null); + if (keyId != null && zone.getKeyId().equals(keyId)) { + matchKey = true; + } + if (path != null && zone.getPath().equals(path)) { + matchPath = true; + } + if (matchKey && matchPath) { + match = true; + break; + } + } + assertTrue("Did not find expected encryption zone with keyId " + keyId + + " path " + path, match + ); + } + + /** + * Helper function to create a key in the Key Provider. + */ + private void createKey(String keyId) + throws NoSuchAlgorithmException, IOException { + KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); + final KeyProvider.Options options = KeyProvider.options(conf); + provider.createKey(keyId, options); + provider.flush(); + } + + @Test(timeout = 60000) + public void testBasicOperations() throws Exception { + + int numZones = 0; + + /* Test failure of create EZ on a directory that doesn't exist. */ + final Path zone1 = new Path("/zone1"); + try { + dfsAdmin.createEncryptionZone(zone1, null); + fail("expected /test doesn't exist"); + } catch (IOException e) { + assertExceptionContains("cannot find", e); + } + + /* Normal creation of an EZ */ + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + dfsAdmin.createEncryptionZone(zone1, null); + assertNumZones(++numZones); + assertZonePresent(null, zone1.toString()); + + /* Test failure of create EZ on a directory which is already an EZ. */ + try { + dfsAdmin.createEncryptionZone(zone1, null); + } catch (IOException e) { + assertExceptionContains("already in an encryption zone", e); + } + + /* Test failure of create EZ operation in an existing EZ. */ + final Path zone1Child = new Path(zone1, "child"); + fsWrapper.mkdir(zone1Child, FsPermission.getDirDefault(), false); + try { + dfsAdmin.createEncryptionZone(zone1Child, null); + fail("EZ in an EZ"); + } catch (IOException e) { + assertExceptionContains("already in an encryption zone", e); + } + + /* create EZ on a folder with a folder fails */ + final Path notEmpty = new Path("/notEmpty"); + final Path notEmptyChild = new Path(notEmpty, "child"); + fsWrapper.mkdir(notEmptyChild, FsPermission.getDirDefault(), true); + try { + dfsAdmin.createEncryptionZone(notEmpty, null); + fail("Created EZ on an non-empty directory with folder"); + } catch (IOException e) { + assertExceptionContains("create an encryption zone", e); + } + fsWrapper.delete(notEmptyChild, false); + + /* create EZ on a folder with a file fails */ + fsWrapper.createFile(notEmptyChild); + try { + dfsAdmin.createEncryptionZone(notEmpty, null); + fail("Created EZ on an non-empty directory with file"); + } catch (IOException e) { + assertExceptionContains("create an encryption zone", e); + } + + /* Test failure of creating an EZ passing a key that doesn't exist. */ + final Path zone2 = new Path("/zone2"); + fsWrapper.mkdir(zone2, FsPermission.getDirDefault(), false); + final String myKeyId = "mykeyid"; + try { + dfsAdmin.createEncryptionZone(zone2, myKeyId); + fail("expected key doesn't exist"); + } catch (IOException e) { + assertExceptionContains("doesn't exist.", e); + } + assertNumZones(1); + + /* Test success of creating an EZ when they key exists. */ + createKey(myKeyId); + dfsAdmin.createEncryptionZone(zone2, myKeyId); + assertNumZones(++numZones); + assertZonePresent(myKeyId, zone2.toString()); + + /* Test failure of create encryption zones as a non super user. */ + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] { "mygroup" }); + final Path nonSuper = new Path("/nonSuper"); + fsWrapper.mkdir(nonSuper, FsPermission.getDirDefault(), false); + + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final HdfsAdmin userAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + try { + userAdmin.createEncryptionZone(nonSuper, null); + fail("createEncryptionZone is superuser-only operation"); + } catch (AccessControlException e) { + assertExceptionContains("Superuser privilege is required", e); + } + return null; + } + }); + + // Test success of creating an encryption zone a few levels down. + Path deepZone = new Path("/d/e/e/p/zone"); + fsWrapper.mkdir(deepZone, FsPermission.getDirDefault(), true); + dfsAdmin.createEncryptionZone(deepZone, null); + assertNumZones(++numZones); + assertZonePresent(null, deepZone.toString()); + } + + /** + * Test listing encryption zones as a non super user. + */ + @Test(timeout = 60000) + public void testListEncryptionZonesAsNonSuperUser() throws Exception { + + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] { "mygroup" }); + + final Path testRoot = new Path(fsHelper.getTestRootDir()); + final Path superPath = new Path(testRoot, "superuseronly"); + final Path allPath = new Path(testRoot, "accessall"); + + fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true); + dfsAdmin.createEncryptionZone(superPath, null); + + fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true); + dfsAdmin.createEncryptionZone(allPath, null); + + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final HdfsAdmin userAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + try { + userAdmin.listEncryptionZones(); + } catch (AccessControlException e) { + assertExceptionContains("Superuser privilege is required", e); + } + return null; + } + }); + } + + /** + * Test success of Rename EZ on a directory which is already an EZ. + */ + private void doRenameEncryptionZone(FSTestWrapper wrapper) throws Exception { + final Path testRoot = new Path(fsHelper.getTestRootDir()); + final Path pathFoo = new Path(testRoot, "foo"); + final Path pathFooBaz = new Path(pathFoo, "baz"); + wrapper.mkdir(pathFoo, FsPermission.getDirDefault(), true); + dfsAdmin.createEncryptionZone(pathFoo, null); + wrapper.mkdir(pathFooBaz, FsPermission.getDirDefault(), true); + try { + wrapper.rename(pathFooBaz, testRoot); + } catch (IOException e) { + assertExceptionContains(pathFooBaz.toString() + " can't be moved from" + + " an encryption zone.", e + ); + } + } + + @Test(timeout = 60000) + public void testRenameFileSystem() throws Exception { + doRenameEncryptionZone(fsWrapper); + } + + @Test(timeout = 60000) + public void testRenameFileContext() throws Exception { + doRenameEncryptionZone(fcWrapper); + } + + private void validateFiles(Path p1, Path p2, int len) throws Exception { + FSDataInputStream in1 = fs.open(p1); + FSDataInputStream in2 = fs.open(p2); + for (int i = 0; i < len; i++) { + assertEquals("Mismatch at byte " + i, in1.read(), in2.read()); + } + in1.close(); + in2.close(); + } + + private FileEncryptionInfo getFileEncryptionInfo(Path path) throws Exception { + LocatedBlocks blocks = fs.getClient().getLocatedBlocks(path.toString(), 0); + return blocks.getFileEncryptionInfo(); + } + + @Test(timeout = 120000) + public void testReadWrite() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + // Create a base file for comparison + final Path baseFile = new Path("/base"); + final int len = 8192; + DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED); + // Create the first enc file + final Path zone = new Path("/zone"); + fs.mkdirs(zone); + dfsAdmin.createEncryptionZone(zone, null); + final Path encFile1 = new Path(zone, "myfile"); + DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED); + // Read them back in and compare byte-by-byte + validateFiles(baseFile, encFile1, len); + // Roll the key of the encryption zone + List zones = dfsAdmin.listEncryptionZones(); + assertEquals("Expected 1 EZ", 1, zones.size()); + String keyId = zones.get(0).getKeyId(); + cluster.getNamesystem().getProvider().rollNewVersion(keyId); + cluster.getNamesystem().getFSDirectory().ezManager.kickMonitor(); + // Read them back in and compare byte-by-byte + validateFiles(baseFile, encFile1, len); + // Write a new enc file and validate + final Path encFile2 = new Path(zone, "myfile2"); + DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); + // FEInfos should be different + FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1); + FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2); + assertFalse("EDEKs should be different", Arrays + .equals(feInfo1.getEncryptedDataEncryptionKey(), + feInfo2.getEncryptedDataEncryptionKey())); + assertNotEquals("Key was rolled, versions should be different", + feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName()); + // Contents still equal + validateFiles(encFile1, encFile2, len); + } + + @Test(timeout = 60000) + public void testCipherSuiteNegotiation() throws Exception { + final HdfsAdmin dfsAdmin = + new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + final Path zone = new Path("/zone"); + fs.mkdirs(zone); + dfsAdmin.createEncryptionZone(zone, null); + // Create a file in an EZ, which should succeed + DFSTestUtil + .createFile(fs, new Path(zone, "success1"), 0, (short) 1, 0xFEED); + // Pass no cipherSuites, fail + fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(0); + try { + DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, 0xFEED); + fail("Created a file without specifying a CipherSuite!"); + } catch (UnknownCipherSuiteException e) { + assertExceptionContains("No cipher suites", e); + } + // Pass some unknown cipherSuites, fail + fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + try { + DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, 0xFEED); + fail("Created a file without specifying a CipherSuite!"); + } catch (UnknownCipherSuiteException e) { + assertExceptionContains("No cipher suites", e); + } + // Pass some unknown and a good cipherSuites, success + fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); + fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + DFSTestUtil + .createFile(fs, new Path(zone, "success2"), 0, (short) 1, 0xFEED); + fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); + fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); + DFSTestUtil + .createFile(fs, new Path(zone, "success3"), 4096, (short) 1, 0xFEED); + // Check KeyProvider state + // Flushing the KP on the NN, since it caches, and init a test one + cluster.getNamesystem().getProvider().flush(); + KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0); + List keys = provider.getKeys(); + assertEquals("Expected NN to have created one key per zone", 1, + keys.size()); + List allVersions = Lists.newArrayList(); + for (String key : keys) { + List versions = provider.getKeyVersions(key); + assertEquals("Should only have one key version per key", 1, + versions.size()); + allVersions.addAll(versions); + } + // Check that the specified CipherSuite was correctly saved on the NN + for (int i = 2; i <= 3; i++) { + FileEncryptionInfo feInfo = + getFileEncryptionInfo(new Path(zone.toString() + + "/success" + i)); + assertEquals(feInfo.getCipherSuite(), CipherSuite.AES_CTR_NOPADDING); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java deleted file mode 100644 index a75e46e4bd4..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesAPI.java +++ /dev/null @@ -1,469 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs; - -import java.io.File; -import java.io.IOException; -import java.security.NoSuchAlgorithmException; -import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.UUID; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderFactory; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.client.HdfsAdmin; -import org.apache.hadoop.hdfs.protocol.EncryptionZone; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.fail; - -public class TestEncryptionZonesAPI { - - private static final Path TEST_PATH = new Path("/test"); - private static final Path TEST_PATH_WITH_CHILD = new Path(TEST_PATH, "foo"); - private static final Path TEST_PATH_WITH_MULTIPLE_CHILDREN = - new Path(TEST_PATH_WITH_CHILD, "baz"); - private static final String TEST_KEYID = "mykeyid"; - private final Configuration conf = new Configuration(); - private MiniDFSCluster cluster; - private static File tmpDir; - private DistributedFileSystem fs; - - @Before - public void setUpCluster() throws IOException { - tmpDir = new File(System.getProperty("test.build.data", "target"), - UUID.randomUUID().toString()).getAbsoluteFile(); - conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, - JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - fs = (DistributedFileSystem) createFileSystem(conf); - Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); - } - - protected FileSystem createFileSystem(Configuration conf) throws IOException { - return cluster.getFileSystem(); - } - - @After - public void shutDownCluster() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** Test failure of Create EZ on a directory that doesn't exist. */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneDirectoryDoesntExist() throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - try { - dfsAdmin.createEncryptionZone(TEST_PATH, null); - fail("expected /test doesn't exist"); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("cannot find", e); - } - } - - /** Test failure of Create EZ on a directory which is already an EZ. */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneWhichAlreadyExists() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH, null); - try { - dfsAdmin.createEncryptionZone(TEST_PATH, null); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("already in an encryption zone", - e); - } - } - - /** Test success of Create EZ in which a key is created. */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneAndGenerateKeyDirectoryEmpty() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH, null); - } - - /** Test failure of Create EZ operation in an existing EZ. */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneInExistingEncryptionZone() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH, null); - FileSystem.mkdirs(fs, TEST_PATH_WITH_CHILD, - new FsPermission((short) 0777)); - try { - dfsAdmin.createEncryptionZone(TEST_PATH_WITH_CHILD, null); - fail("EZ in an EZ"); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("already in an encryption zone", e); - } - } - - /** Test failure of creating an EZ using a non-empty directory. */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneAndGenerateKeyDirectoryNotEmpty() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - FileSystem.create(fs, new Path("/test/foo"), - new FsPermission((short) 0777)); - try { - dfsAdmin.createEncryptionZone(TEST_PATH, null); - fail("expected key doesn't exist"); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("create an encryption zone", e); - } - } - - /** Test failure of creating an EZ passing a key that doesn't exist. */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneKeyDoesntExist() throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - try { - dfsAdmin.createEncryptionZone(TEST_PATH, TEST_KEYID); - fail("expected key doesn't exist"); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("doesn't exist.", e); - } - final List zones = dfsAdmin.listEncryptionZones(); - Preconditions.checkState(zones.size() == 0, "More than one zone found?"); - } - - /** Test success of creating an EZ when they key exists. */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneKeyExist() throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - createAKey(TEST_KEYID); - dfsAdmin.createEncryptionZone(TEST_PATH, TEST_KEYID); - final List zones = dfsAdmin.listEncryptionZones(); - Preconditions.checkState(zones.size() == 1, "More than one zone found?"); - final EncryptionZone ez = zones.get(0); - GenericTestUtils.assertMatches(ez.toString(), - "EncryptionZone \\[path=/test, keyId="); - } - - /** Helper function to create a key in the Key Provider. */ - private void createAKey(String keyId) - throws NoSuchAlgorithmException, IOException { - KeyProvider provider = - cluster.getNameNode().getNamesystem().getProvider(); - final KeyProvider.Options options = KeyProvider.options(conf); - provider.createKey(keyId, options); - provider.flush(); - } - - /** Test failure of create encryption zones as a non super user. */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneAsNonSuperUser() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - - final UserGroupInformation user = UserGroupInformation. - createUserForTesting("user", new String[] { "mygroup" }); - - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0700)); - - user.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - final HdfsAdmin userAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - try { - userAdmin.createEncryptionZone(TEST_PATH, null); - fail("createEncryptionZone is superuser-only operation"); - } catch (AccessControlException e) { - GenericTestUtils.assertExceptionContains( - "Superuser privilege is required", e); - } - return null; - } - }); - } - - /** - * Test success of creating an encryption zone a few levels down. - */ - @Test(timeout = 60000) - public void testCreateEncryptionZoneDownAFewLevels() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH_WITH_MULTIPLE_CHILDREN, - new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH_WITH_MULTIPLE_CHILDREN, null); - final List zones = dfsAdmin.listEncryptionZones(); - Preconditions.checkState(zones.size() == 1, "More than one zone found?"); - final EncryptionZone ez = zones.get(0); - GenericTestUtils.assertMatches(ez.toString(), - "EncryptionZone \\[path=/test/foo/baz, keyId="); - } - - /** Test failure of creating an EZ using a non-empty directory. */ - @Test(timeout = 60000) - public void testCreateFileInEncryptionZone() throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH, null); - FileSystem.create(fs, TEST_PATH_WITH_CHILD, new FsPermission((short) 0777)); - - final List zones = dfsAdmin.listEncryptionZones(); - final EncryptionZone ez = zones.get(0); - GenericTestUtils.assertMatches(ez.toString(), - "EncryptionZone \\[path=/test, keyId="); - } - - /** Test listing encryption zones. */ - @Test(timeout = 60000) - public void testListEncryptionZones() throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - final int N_EZs = 5; - final Set ezPathNames = new HashSet(N_EZs); - for (int i = 0; i < N_EZs; i++) { - final Path p = new Path(TEST_PATH, "" + i); - ezPathNames.add(p.toString()); - FileSystem.mkdirs(fs, p, new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(p, null); - } - - final List zones = dfsAdmin.listEncryptionZones(); - Preconditions.checkState(zones.size() == N_EZs, "wrong number of EZs returned"); - for (EncryptionZone z : zones) { - final String ezPathName = z.getPath(); - Preconditions.checkState(ezPathNames.remove( - ezPathName), "Path " + ezPathName + " not returned from listEZ"); - } - Preconditions.checkState(ezPathNames.size() == 0); - } - - /** Test listing encryption zones as a non super user. */ - @Test(timeout = 60000) - public void testListEncryptionZonesAsNonSuperUser() throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - - final UserGroupInformation user = UserGroupInformation. - createUserForTesting("user", new String[] {"mygroup"}); - - final Path TEST_PATH_SUPERUSER_ONLY = new Path(TEST_PATH, "superuseronly"); - final Path TEST_PATH_ALL = new Path(TEST_PATH, "accessall"); - - FileSystem.mkdirs(fs, TEST_PATH_SUPERUSER_ONLY, - new FsPermission((short) 0700)); - dfsAdmin.createEncryptionZone(TEST_PATH_SUPERUSER_ONLY, null); - FileSystem.mkdirs(fs, TEST_PATH_ALL, - new FsPermission((short) 0707)); - dfsAdmin.createEncryptionZone(TEST_PATH_ALL, null); - - user.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - final HdfsAdmin userAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - try { - final List zones = userAdmin.listEncryptionZones(); - } catch (AccessControlException e) { - GenericTestUtils.assertExceptionContains( - "Superuser privilege is required", e); - } - return null; - } - }); - } - - /** Test success of Rename EZ on a directory which is already an EZ. */ - @Test(timeout = 60000) - public void testRenameEncryptionZone() - throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - FileSystem.mkdirs(fs, TEST_PATH_WITH_CHILD, - new FsPermission((short) 0777)); - dfsAdmin.createEncryptionZone(TEST_PATH_WITH_CHILD, null); - FileSystem.mkdirs(fs, TEST_PATH_WITH_MULTIPLE_CHILDREN, - new FsPermission((short) 0777)); - try { - fs.rename(TEST_PATH_WITH_MULTIPLE_CHILDREN, TEST_PATH); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains( - "/test/foo/baz can't be moved from an encryption zone.", e); - } - } - - @Test(timeout = 60000) - public void testCipherSuiteNegotiation() throws Exception { - final HdfsAdmin dfsAdmin = - new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); - final Path zone = new Path("/zone"); - fs.mkdirs(zone); - dfsAdmin.createEncryptionZone(zone, null); - // Create a file in an EZ, which should succeed - DFSTestUtil.createFile(fs, new Path(zone, "success1"), 0, (short) 1, - 0xFEED); - // Pass no cipherSuites, fail - fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(0); - try { - DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, - 0xFEED); - fail("Created a file without specifying a CipherSuite!"); - } catch (UnknownCipherSuiteException e) { - GenericTestUtils.assertExceptionContains("No cipher suites", e); - } - // Pass some unknown cipherSuites, fail - fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); - fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); - fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); - fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); - try { - DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, - 0xFEED); - fail("Created a file without specifying a CipherSuite!"); - } catch (UnknownCipherSuiteException e) { - GenericTestUtils.assertExceptionContains("No cipher suites", e); - } - // Pass some unknown and a good cipherSuites, success - fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); - fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); - fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); - fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); - DFSTestUtil.createFile(fs, new Path(zone, "success2"), 0, (short) 1, - 0xFEED); - fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3); - fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); - fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); - fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); - DFSTestUtil.createFile(fs, new Path(zone, "success3"), 4096, (short) 1, - 0xFEED); - // Check KeyProvider state - // Flushing the KP on the NN, since it caches, and init a test one - cluster.getNamesystem().getProvider().flush(); - KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0); - List keys = provider.getKeys(); - assertEquals("Expected NN to have created one key per zone", 1, - keys.size()); - List allVersions = Lists.newArrayList(); - for (String key : keys) { - List versions = provider.getKeyVersions(key); - assertEquals("Should only have one key version per key", 1, - versions.size()); - allVersions.addAll(versions); - } - // Check that the specified CipherSuite was correctly saved on the NN - for (int i=2; i<=3; i++) { - FileEncryptionInfo feInfo = - getFileEncryptionInfo(new Path(zone.toString() + - "/success" + i)); - assertEquals(feInfo.getCipherSuite(), CipherSuite.AES_CTR_NOPADDING); - } - } - - private void validateFiles(Path p1, Path p2, int len) throws Exception { - FSDataInputStream in1 = fs.open(p1); - FSDataInputStream in2 = fs.open(p2); - for (int i=0; i zones = dfsAdmin.listEncryptionZones(); - assertEquals("Expected 1 EZ", 1, zones.size()); - String keyId = zones.get(0).getKeyId(); - cluster.getNamesystem().getProvider().rollNewVersion(keyId); - cluster.getNamesystem().getFSDirectory().ezManager.kickMonitor(); - // Read them back in and compare byte-by-byte - validateFiles(baseFile, encFile1, len); - // Write a new enc file and validate - final Path encFile2 = new Path(zone, "myfile2"); - DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); - // FEInfos should be different - FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1); - FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2); - assertFalse("EDEKs should be different", Arrays.equals( - feInfo1.getEncryptedDataEncryptionKey(), - feInfo2.getEncryptedDataEncryptionKey())); - assertNotEquals("Key was rolled, versions should be different", - feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName()); - // Contents still equal - validateFiles(encFile1, encFile2, len); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextEncryptionZones.java deleted file mode 100644 index da283f5f85b..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextEncryptionZones.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.namenode; - -import java.io.IOException; -import java.net.URI; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileContext; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.XAttrSetFlag; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.TestEncryptionZonesAPI; -import org.junit.BeforeClass; - -/** - * Tests of encryption zone operations using FileContext APIs. - */ -public class TestFileContextEncryptionZones extends TestEncryptionZonesAPI { - - @Override - protected FileSystem createFileSystem(Configuration conf) throws IOException { - FileContextFS fcFs = new FileContextFS(); - fcFs.initialize(FileSystem.getDefaultUri(conf), conf); - return fcFs; - } - - /** - * This reuses FSXAttrBaseTest's testcases by creating a filesystem - * implementation which uses FileContext by only overriding the xattr related - * methods. Other operations will use the normal filesystem. - */ - public static class FileContextFS extends DistributedFileSystem { - - private FileContext fc; - - @Override - public void initialize(URI uri, Configuration conf) throws IOException { - super.initialize(uri, conf); - fc = FileContext.getFileContext(conf); - } - - @Override - public boolean rename(Path src, Path dst) throws IOException { - fc.rename(src, dst); - return true; - } - } -} From beb0c19cde5db554c4a665cce0a16188070c6a7b Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Wed, 16 Jul 2014 00:36:30 +0000 Subject: [PATCH 034/354] HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to JCE if non native support. (yliu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1610887 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 + .../org/apache/hadoop/crypto/CipherSuite.java | 10 +++ .../org/apache/hadoop/crypto/CryptoCodec.java | 63 +++++++++++++++++-- .../crypto/OpensslAesCtrCryptoCodec.java | 3 + .../fs/CommonConfigurationKeysPublic.java | 10 +++ .../src/main/resources/core-default.xml | 16 ++++- ...toStreamsWithOpensslAesCtrCryptoCodec.java | 3 +- 7 files changed, 100 insertions(+), 8 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index f133f813136..aa65991d2e9 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -37,6 +37,9 @@ fs-encryption (Unreleased) HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name format. (Yi Liu) + HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to + JCE if non native support. (Yi Liu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java index 9c4b8fdd8d4..c75311aef65 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java @@ -72,4 +72,14 @@ public String toString() { builder.append("}"); return builder.toString(); } + + public static void checkName(String name) { + CipherSuite[] suites = CipherSuite.values(); + for (CipherSuite suite : suites) { + if (suite.getName().equals(name)) { + return; + } + } + throw new IllegalArgumentException("Invalid cipher suite name: " + name); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index 80e15cd6b70..45c06f02daa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -18,13 +18,23 @@ package org.apache.hadoop.crypto; import java.security.GeneralSecurityException; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Splitter; +import com.google.common.collect.Lists; + import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASS_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT; /** * Crypto codec class, encapsulates encryptor/decryptor pair. @@ -32,12 +42,57 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class CryptoCodec implements Configurable { + public static Logger LOG = LoggerFactory.getLogger(CryptoCodec.class); public static CryptoCodec getInstance(Configuration conf) { - final Class klass = conf.getClass( - HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, JceAesCtrCryptoCodec.class, - CryptoCodec.class); - return ReflectionUtils.newInstance(klass, conf); + List> klasses = getCodecClasses(conf); + String name = conf.get(HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY, + HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT); + CipherSuite.checkName(name); + CryptoCodec codec = null; + for (Class klass : klasses) { + try { + CryptoCodec c = ReflectionUtils.newInstance(klass, conf); + if (c.getCipherSuite().getName().equalsIgnoreCase(name)) { + if (codec == null) { + LOG.debug("Using crypto codec {}.", klass.getName()); + codec = c; + } + } else { + LOG.warn("Crypto codec {} doesn't meet the cipher suite {}.", + klass.getName(), name); + } + } catch (Exception e) { + LOG.warn("Crypto codec {} is not available.", klass.getName()); + } + } + + if (codec != null) { + return codec; + } + + throw new RuntimeException("No available crypto codec which meets " + + "the cipher suite " + name + "."); + } + + private static List> getCodecClasses( + Configuration conf) { + List> result = Lists.newArrayList(); + String codecString = conf.get(HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, + HADOOP_SECURITY_CRYPTO_CODEC_CLASS_DEFAULT); + for (String c : Splitter.on(',').trimResults().omitEmptyStrings(). + split(codecString)) { + try { + Class cls = conf.getClassByName(c); + result.add(cls.asSubclass(CryptoCodec.class)); + } catch (ClassCastException e) { + LOG.warn("Class " + c + " is not a CryptoCodec."); + } catch (ClassNotFoundException e) { + LOG.warn("Crypto codec " + c + " not found."); + } + } + + return result; } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java index 04c2db09e1e..7db7b063e5c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java @@ -47,6 +47,9 @@ public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec { private Random random; public OpensslAesCtrCryptoCodec() { + if (!OpensslCipher.isNativeCodeLoaded()) { + throw new RuntimeException("Failed to load OpenSSL Cipher."); + } } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 9691539821b..35c3bce6c79 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -285,6 +285,14 @@ public class CommonConfigurationKeysPublic { /** See core-default.xml */ public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY = "hadoop.security.crypto.codec.class"; + public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASS_DEFAULT = + "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec," + + "org.apache.hadoop.crypto.JceAesCtrCryptoCodec"; + /** See core-default.xml */ + public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY = + "hadoop.security.crypto.cipher.suite"; + public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT = + "AES/CTR/NoPadding"; /** See core-default.xml */ public static final String HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY = "hadoop.security.crypto.jce.provider"; @@ -302,8 +310,10 @@ public class CommonConfigurationKeysPublic { /** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */ public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT = "SHA1PRNG"; + /** See core-default.xml */ public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY = "hadoop.security.secure.random.impl"; + /** See core-default.xml */ public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY = "hadoop.security.random.device.file.path"; public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT = diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 224fb12e921..3c93ab8f111 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1453,10 +1453,20 @@ for ldap providers in the same way as above does. hadoop.security.crypto.codec.class - + org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, + org.apache.hadoop.crypto.JceAesCtrCryptoCodec - The default implementation of CryptoCodec which is used for encryption - and decryption. + Comma list of CryptoCodec implementations which are used for encryption + and decryption. The first implementation will be used if avaiable, others + are fallbacks. + + + + + hadoop.security.crypto.cipher.suite + AES/CTR/NoPadding + + Cipher suite for crypto codec. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java index 8150d57ee66..4e962cb42d5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java @@ -29,7 +29,8 @@ public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec public static void init() throws Exception { Configuration conf = new Configuration(); conf.set(HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, - OpensslAesCtrCryptoCodec.class.getName()); + OpensslAesCtrCryptoCodec.class.getName() + "," + + JceAesCtrCryptoCodec.class.getName()); codec = CryptoCodec.getInstance(conf); } } From 77f0e2cca57f74fc66d92ff240f64eed73c5e528 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 16 Jul 2014 19:04:06 +0000 Subject: [PATCH 035/354] HDFS-6405. Test Crypto streams in HDFS. (yliu via wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1611140 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/crypto/CryptoStreamsTestBase.java | 20 ++++ .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../hdfs/crypto/TestHdfsCryptoStreams.java | 91 +++++++++++++++++++ 3 files changed, 113 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java index f5a8ad49ec5..f5acc73b147 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.HasEnhancedByteBufferAccess; import org.apache.hadoop.fs.PositionedReadable; import org.apache.hadoop.fs.ReadOption; @@ -146,6 +147,25 @@ private void readCheck(InputStream in) throws Exception { in.close(); } + /** Test crypto writing with different buffer size. */ + @Test(timeout = 120000) + public void testWrite() throws Exception { + // Default buffer size + writeCheck(defaultBufferSize); + + // Small buffer size + writeCheck(smallBufferSize); + } + + private void writeCheck(int bufferSize) throws Exception { + OutputStream out = getOutputStream(bufferSize); + writeData(out); + + if (out instanceof FSDataOutputStream) { + Assert.assertEquals(((FSDataOutputStream) out).getPos(), getDataLen()); + } + } + /** Test crypto with different IV. */ @Test(timeout=120000) public void testCryptoIV() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 0b7986a470b..f40841715e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -44,6 +44,8 @@ fs-encryption (Unreleased) HDFS-6619. Clean up encryption-related tests. (wang) + HDFS-6405. Test Crypto streams in HDFS. (yliu via wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java new file mode 100644 index 00000000000..35c13e65b51 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.crypto; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoCodec; +import org.apache.hadoop.crypto.CryptoStreamsTestBase; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.crypto.CryptoFSDataInputStream; +import org.apache.hadoop.fs.crypto.CryptoFSDataOutputStream; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +public class TestHdfsCryptoStreams extends CryptoStreamsTestBase { + private static MiniDFSCluster dfsCluster; + private static FileSystem fs; + private static int pathCount = 0; + private static Path path; + private static Path file; + + @BeforeClass + public static void init() throws Exception { + Configuration conf = new HdfsConfiguration(); + dfsCluster = new MiniDFSCluster.Builder(conf).build(); + dfsCluster.waitClusterUp(); + fs = dfsCluster.getFileSystem(); + codec = CryptoCodec.getInstance(conf); + } + + @AfterClass + public static void shutdown() throws Exception { + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + } + + @Before + @Override + public void setUp() throws IOException { + ++pathCount; + path = new Path("/p" + pathCount); + file = new Path(path, "file"); + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0700)); + + super.setUp(); + } + + @After + public void cleanUp() throws IOException { + fs.delete(path, true); + } + + @Override + protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv) + throws IOException { + return new CryptoFSDataOutputStream(fs.create(file), codec, bufferSize, + key, iv); + } + + @Override + protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) + throws IOException { + return new CryptoFSDataInputStream(fs.open(file), codec, bufferSize, key, + iv); + } +} From 962ef6939e55f9e9643ef8a04ffc5877b8a762fc Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Fri, 18 Jul 2014 17:13:55 +0000 Subject: [PATCH 036/354] HDFS-6490. Fix the keyid format for generated keys in FSNamesystem.createEncryptionZone (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1611722 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 +++ .../hdfs/server/namenode/FSNamesystem.java | 25 +++++++++++++------ 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index f40841715e5..4803dcbc809 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -46,6 +46,9 @@ fs-encryption (Unreleased) HDFS-6405. Test Crypto streams in HDFS. (yliu via wang) + HDFS-6490. Fix the keyid format for generated keys in + FSNamesystem.createEncryptionZone (clamb) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index fa52d3e5452..387c372404c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -420,6 +420,8 @@ private void logAuditEvent(boolean succeeded, private final CacheManager cacheManager; private final DatanodeStatistics datanodeStatistics; + private String nameserviceId; + private RollingUpgradeInfo rollingUpgradeInfo = null; /** * A flag that indicates whether the checkpointer should checkpoint a rollback @@ -791,7 +793,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { // block allocation has to be persisted in HA using a shared edits directory // so that the standby has up-to-date namespace information - String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); + nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); // Sanity check the HA-related config. @@ -8502,22 +8504,31 @@ private void createEncryptionZoneInt(final String srcArg, String keyId, /** * Create a new key on the KeyProvider for an encryption zone. * - * @param keyId id of the key + * @param keyIdArg id of the key * @param src path of the encryption zone. * @return KeyVersion of the created key * @throws IOException */ - private KeyVersion createNewKey(String keyId, String src) + private KeyVersion createNewKey(String keyIdArg, String src) throws IOException { - Preconditions.checkNotNull(keyId); + Preconditions.checkNotNull(keyIdArg); Preconditions.checkNotNull(src); - // TODO pass in hdfs://HOST:PORT (HDFS-6490) - providerOptions.setDescription(src); + final StringBuilder sb = new StringBuilder("hdfs://"); + if (nameserviceId != null) { + sb.append(nameserviceId); + } + sb.append(src); + if (!src.endsWith("/")) { + sb.append('/'); + } + sb.append(keyIdArg); + final String keyId = sb.toString(); + providerOptions.setDescription(keyId); providerOptions.setBitLength(codec.getCipherSuite() .getAlgorithmBlockSize()*8); KeyVersion version = null; try { - version = provider.createKey(keyId, providerOptions); + version = provider.createKey(keyIdArg, providerOptions); } catch (NoSuchAlgorithmException e) { throw new IOException(e); } From a4984f5f0a21fb9f3c84d8f959b8009264a43db2 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Mon, 21 Jul 2014 23:56:53 +0000 Subject: [PATCH 037/354] HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612436 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 2 ++ .../org/apache/hadoop/crypto/random/OpensslSecureRandom.c | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index aa65991d2e9..4c365bf0aeb 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -43,3 +43,5 @@ fs-encryption (Unreleased) OPTIMIZATIONS BUG FIXES + + HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe) diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c index ee09aedff8b..6c31d10599c 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c @@ -82,7 +82,7 @@ static __dlsym_RAND_bytes dlsym_RAND_bytes; static __dlsym_ERR_get_error dlsym_ERR_get_error; #endif -static ENGINE * openssl_rand_init(); +static ENGINE * openssl_rand_init(void); static void openssl_rand_clean(ENGINE *eng, int clean_locks); static int openssl_rand_bytes(unsigned char *buf, int num); @@ -157,7 +157,7 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_ env, openssl, "ERR_get_error"); #endif - openssl_rand_init(env); + openssl_rand_init(); } JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_nextRandBytes___3B @@ -283,7 +283,7 @@ static unsigned long pthreads_thread_id(void) * If using an Intel chipset with RDRAND, the high-performance hardware * random number generator will be used. */ -static ENGINE * openssl_rand_init() +static ENGINE * openssl_rand_init(void) { locks_setup(); @@ -332,4 +332,4 @@ static void openssl_rand_clean(ENGINE *eng, int clean_locks) static int openssl_rand_bytes(unsigned char *buf, int num) { return dlsym_RAND_bytes(buf, num); -} \ No newline at end of file +} From 7b466b3b7087a7b8c6e4e466600f6e13284e0dee Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 22 Jul 2014 00:27:51 +0000 Subject: [PATCH 038/354] HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612438 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 - .../namenode/EncryptionZoneManager.java | 188 ++---------------- .../hdfs/server/namenode/FSDirectory.java | 21 +- .../hdfs/server/namenode/FSNamesystem.java | 128 ++++++------ .../src/main/resources/hdfs-default.xml | 11 - .../hadoop/hdfs/TestEncryptionZones.java | 1 - 7 files changed, 83 insertions(+), 271 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 4803dcbc809..24cfbfb5b20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -49,6 +49,9 @@ fs-encryption (Unreleased) HDFS-6490. Fix the keyid format for generated keys in FSNamesystem.createEncryptionZone (clamb) + HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode. + (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 86eda952096..2c0bdb0794f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -563,8 +563,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class"; public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection"; public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class"; - public static final String DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_KEY = "dfs.namenode.key.version.refresh.interval.ms"; - public static final int DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_DEFAULT = 5*60*1000; // Journal-node related configs. These are read on the JN side. public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index faba3a929c0..890200d787b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -3,27 +3,16 @@ import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; @@ -53,37 +42,16 @@ public class EncryptionZoneManager { * contains the EZ's pathname. */ private class EncryptionZoneInt { - private final String keyId; + private final String keyName; private final long inodeId; - private final HashSet keyVersions; - private KeyVersion latestVersion; - - EncryptionZoneInt(long inodeId, String keyId) { - this.keyId = keyId; + EncryptionZoneInt(long inodeId, String keyName) { + this.keyName = keyName; this.inodeId = inodeId; - keyVersions = Sets.newHashSet(); - latestVersion = null; } - KeyVersion getLatestKeyVersion() { - return latestVersion; - } - - void addKeyVersion(KeyVersion version) { - Preconditions.checkNotNull(version); - if (!keyVersions.contains(version)) { - LOG.debug("Key {} has new key version {}", keyId, version); - keyVersions.add(version); - } - // Always set the latestVersion to not get stuck on an old version in - // racy situations. Should eventually converge thanks to the - // monitor. - latestVersion = version; - } - - String getKeyId() { - return keyId; + String getKeyName() { + return keyName; } long getINodeId() { @@ -123,7 +91,6 @@ public boolean hasReadLock() { private final Map encryptionZones; private final FSDirectory dir; - private final ScheduledExecutorService monitor; private final KeyProvider provider; /** @@ -131,118 +98,11 @@ public boolean hasReadLock() { * * @param dir Enclosing FSDirectory */ - public EncryptionZoneManager(FSDirectory dir, Configuration conf, - KeyProvider provider) { - + public EncryptionZoneManager(FSDirectory dir, KeyProvider provider) { this.dir = dir; this.provider = provider; lock = new ReentrantReadWriteLock(); encryptionZones = new HashMap(); - - monitor = Executors.newScheduledThreadPool(1, - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat(EncryptionZoneMonitor.class.getSimpleName() + "-%d") - .build()); - final int refreshMs = conf.getInt( - DFSConfigKeys.DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_KEY, - DFSConfigKeys.DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_DEFAULT - ); - Preconditions.checkArgument(refreshMs >= 0, "%s cannot be negative", - DFSConfigKeys.DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_KEY); - monitor.scheduleAtFixedRate(new EncryptionZoneMonitor(), 0, refreshMs, - TimeUnit.MILLISECONDS); - } - - /** - * Periodically wakes up to fetch the latest version of each encryption - * zone key. - */ - private class EncryptionZoneMonitor implements Runnable { - @Override - public void run() { - LOG.debug("Monitor waking up to refresh encryption zone key versions"); - HashMap toFetch = Maps.newHashMap(); - HashMap toUpdate = - Maps.newHashMap(); - // Determine the keyIds to fetch - readLock(); - try { - for (EncryptionZoneInt ezi : encryptionZones.values()) { - toFetch.put(ezi.getINodeId(), ezi.getKeyId()); - } - } finally { - readUnlock(); - } - LOG.trace("Found {} keys to check", toFetch.size()); - // Fetch the key versions while not holding the lock - for (Map.Entry entry : toFetch.entrySet()) { - try { - KeyVersion version = provider.getCurrentKey(entry.getValue()); - toUpdate.put(entry.getKey(), version); - } catch (IOException e) { - LOG.warn("Error while getting the current key for {} {}", - entry.getValue(), e); - } - } - LOG.trace("Fetched {} key versions from KeyProvider", toUpdate.size()); - // Update the key versions for each encryption zone - writeLock(); - try { - for (Map.Entry entry : toUpdate.entrySet()) { - EncryptionZoneInt ezi = encryptionZones.get(entry.getKey()); - // zone might have been removed in the intervening time - if (ezi == null) { - continue; - } - ezi.addKeyVersion(entry.getValue()); - } - } finally { - writeUnlock(); - } - } - } - - /** - * Forces the EncryptionZoneMonitor to run, waiting until completion. - */ - @VisibleForTesting - public void kickMonitor() throws Exception { - Future future = monitor.submit(new EncryptionZoneMonitor()); - future.get(); - } - - /** - * Immediately fetches the latest KeyVersion for an encryption zone, - * also updating the encryption zone. - * - * @param iip of the encryption zone - * @return latest KeyVersion - * @throws IOException on KeyProvider error - */ - KeyVersion updateLatestKeyVersion(INodesInPath iip) throws IOException { - EncryptionZoneInt ezi; - readLock(); - try { - ezi = getEncryptionZoneForPath(iip); - } finally { - readUnlock(); - } - if (ezi == null) { - throw new IOException("Cannot update KeyVersion since iip is not within" + - " an encryption zone"); - } - - // Do not hold the lock while doing KeyProvider operations - KeyVersion version = provider.getCurrentKey(ezi.getKeyId()); - - writeLock(); - try { - ezi.addKeyVersion(version); - return version; - } finally { - writeUnlock(); - } } /** @@ -305,37 +165,20 @@ private String getFullPathName(EncryptionZoneInt ezi) { return dir.getInode(ezi.getINodeId()).getFullPathName(); } - KeyVersion getLatestKeyVersion(final INodesInPath iip) { + /** + * Get the key name for an encryption zone. Returns null if iip is + * not within an encryption zone. + *

+ * Called while holding the FSDirectory lock. + */ + String getKeyName(final INodesInPath iip) { readLock(); try { EncryptionZoneInt ezi = getEncryptionZoneForPath(iip); if (ezi == null) { return null; } - return ezi.getLatestKeyVersion(); - } finally { - readUnlock(); - } - } - - /** - * @return true if the provided keyVersionName is the name of a - * valid KeyVersion for the encryption zone of iip, - * and iip is within an encryption zone. - */ - boolean isValidKeyVersion(final INodesInPath iip, String keyVersionName) { - readLock(); - try { - EncryptionZoneInt ezi = getEncryptionZoneForPath(iip); - if (ezi == null) { - return false; - } - for (KeyVersion ezVersion : ezi.keyVersions) { - if (keyVersionName.equals(ezVersion.getVersionName())) { - return true; - } - } - return false; + return ezi.getKeyName(); } finally { readUnlock(); } @@ -447,7 +290,6 @@ XAttr createEncryptionZone(String src, String keyId, KeyVersion keyVersion) dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); // Re-get the new encryption zone add the latest key version ezi = getEncryptionZoneForPath(srcIIP); - ezi.addKeyVersion(keyVersion); return keyIdXAttr; } finally { writeUnlock(); @@ -466,7 +308,7 @@ List listEncryptionZones() throws IOException { final List ret = Lists.newArrayListWithExpectedSize(encryptionZones.size()); for (EncryptionZoneInt ezi : encryptionZones.values()) { - ret.add(new EncryptionZone(getFullPathName(ezi), ezi.getKeyId())); + ret.add(new EncryptionZone(getFullPathName(ezi), ezi.getKeyName())); } return ret; } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index b7a7857a269..4b13da6a513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -227,7 +227,7 @@ public int getWriteHoldCount() { nameCache = new NameCache(threshold); namesystem = ns; - ezManager = new EncryptionZoneManager(this, conf, ns.getProvider()); + ezManager = new EncryptionZoneManager(this, ns.getProvider()); } private FSNamesystem getFSNamesystem() { @@ -2623,25 +2623,10 @@ boolean isInAnEZ(INodesInPath iip) } } - KeyVersion getLatestKeyVersion(INodesInPath iip) { + String getKeyName(INodesInPath iip) { readLock(); try { - return ezManager.getLatestKeyVersion(iip); - } finally { - readUnlock(); - } - } - - KeyVersion updateLatestKeyVersion(INodesInPath iip) throws - IOException { - // No locking, this operation does not involve any FSDirectory operations - return ezManager.updateLatestKeyVersion(iip); - } - - boolean isValidKeyVersion(INodesInPath iip, String keyVersionName) { - readLock(); - try { - return ezManager.isValidKeyVersion(iip, keyVersionName); + return ezManager.getKeyName(iip); } finally { readUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 10a4db3d31e..9c3f1320b7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; +import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension + .EncryptedKeyVersion; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; @@ -2356,59 +2358,26 @@ private CipherSuite chooseCipherSuite(INodesInPath srcIIP, List } /** - * Create a new FileEncryptionInfo for a path. Also chooses an - * appropriate CipherSuite to use from the list provided by the - * client. + * Invoke KeyProvider APIs to generate an encrypted data encryption key for an + * encryption zone. Should not be called with any locks held. * - * @param src Target path - * @param pathComponents Target path split up into path components - * @param cipherSuites List of CipherSuites provided by the client - * @return a new FileEncryptionInfo, or null if path is not within an - * encryption - * zone. + * @param ezKeyName key name of an encryption zone + * @return New EDEK, or null if ezKeyName is null * @throws IOException */ - private FileEncryptionInfo newFileEncryptionInfo(String src, - byte[][] pathComponents, List cipherSuites) - throws IOException { - INodesInPath iip = null; - CipherSuite suite = null; - KeyVersion latestEZKeyVersion = null; - readLock(); - try { - src = FSDirectory.resolvePath(src, pathComponents, dir); - iip = dir.getINodesInPath4Write(src); - // Nothing to do if the path is not within an EZ - if (!dir.isInAnEZ(iip)) { - return null; - } - suite = chooseCipherSuite(iip, cipherSuites); - if (suite != null) { - Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN), - "Chose an UNKNOWN CipherSuite!"); - } - latestEZKeyVersion = dir.getLatestKeyVersion(iip); - } finally { - readUnlock(); + private EncryptedKeyVersion generateEncryptedDataEncryptionKey(String + ezKeyName) throws IOException { + if (ezKeyName == null) { + return null; } - - // If the latest key version is null, need to fetch it and update - if (latestEZKeyVersion == null) { - latestEZKeyVersion = dir.updateLatestKeyVersion(iip); - } - Preconditions.checkState(latestEZKeyVersion != null); - - // Generate the EDEK while not holding the lock - KeyProviderCryptoExtension.EncryptedKeyVersion edek = null; + EncryptedKeyVersion edek = null; try { - edek = provider.generateEncryptedKey(""); + edek = provider.generateEncryptedKey(ezKeyName); } catch (GeneralSecurityException e) { throw new IOException(e); } Preconditions.checkNotNull(edek); - - return new FileEncryptionInfo(suite, edek.getEncryptedKey().getMaterial(), - edek.getIv(), edek.getKeyVersionName()); + return edek; } /** @@ -2490,11 +2459,11 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, waitForLoadingFSImage(); /* - * We want to avoid holding any locks while creating a new - * FileEncryptionInfo, since this can be very slow. Since the path can + * We want to avoid holding any locks while doing KeyProvider operations, + * since they can be very slow. Since the path can * flip flop between being in an encryption zone and not in the meantime, - * we need to recheck the preconditions and generate a new - * FileEncryptionInfo in some circumstances. + * we need to recheck the preconditions and redo KeyProvider operations + * in some situations. * * A special RetryStartFileException is used to indicate that we should * retry creation of a FileEncryptionInfo. @@ -2510,18 +2479,45 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, } shouldContinue = false; iters++; - // Optimistically generate a FileEncryptionInfo for this path. - FileEncryptionInfo feInfo = - newFileEncryptionInfo(src, pathComponents, cipherSuites); - // Try to create the file with this feInfo + // Optimistically determine CipherSuite and ezKeyName if the path is + // currently within an encryption zone + CipherSuite suite = null; + String ezKeyName = null; + readLock(); + try { + src = FSDirectory.resolvePath(src, pathComponents, dir); + INodesInPath iip = dir.getINodesInPath4Write(src); + // Nothing to do if the path is not within an EZ + if (dir.isInAnEZ(iip)) { + suite = chooseCipherSuite(iip, cipherSuites); + if (suite != null) { + Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN), + "Chose an UNKNOWN CipherSuite!"); + } + ezKeyName = dir.getKeyName(iip); + Preconditions.checkState(ezKeyName != null); + } + } finally { + readUnlock(); + } + + Preconditions.checkState( + (suite == null && ezKeyName == null) || + (suite != null && ezKeyName != null), + "Both suite and ezKeyName should both be null or not null"); + // Generate EDEK if necessary while not holding the lock + EncryptedKeyVersion edek = + generateEncryptedDataEncryptionKey(ezKeyName); + + // Try to create the file with the computed cipher suite and EDEK writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create file" + src); src = FSDirectory.resolvePath(src, pathComponents, dir); startFileInternal(pc, src, permissions, holder, clientMachine, create, - overwrite, createParent, replication, blockSize, feInfo, + overwrite, createParent, replication, blockSize, suite, edek, logRetryCache); stat = dir.getFileInfo(src, false); } catch (StandbyException se) { @@ -2561,8 +2557,8 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, private void startFileInternal(FSPermissionChecker pc, String src, PermissionStatus permissions, String holder, String clientMachine, boolean create, boolean overwrite, boolean createParent, - short replication, long blockSize, FileEncryptionInfo feInfo, - boolean logRetryEntry) + short replication, long blockSize, CipherSuite suite, + EncryptedKeyVersion edek, boolean logRetryEntry) throws FileAlreadyExistsException, AccessControlException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, RetryStartFileException, IOException { @@ -2575,21 +2571,21 @@ private void startFileInternal(FSPermissionChecker pc, String src, " already exists as a directory"); } - if (!dir.isInAnEZ(iip)) { - // If the path is not in an EZ, we don't need an feInfo. - // Null it out in case one was already generated. - feInfo = null; - } else { - // The path is now within an EZ, but no feInfo. Retry. - if (feInfo == null) { + FileEncryptionInfo feInfo = null; + if (dir.isInAnEZ(iip)) { + // The path is now within an EZ, but we're missing encryption parameters + if (suite == null || edek == null) { throw new RetryStartFileException(); } - // It's in an EZ and we have a provided feInfo. Make sure the - // keyVersion of the encryption key used matches one of the keyVersions of - // the key of the encryption zone. - if (!dir.isValidKeyVersion(iip, feInfo.getEzKeyVersionName())) { + // Path is within an EZ and we have provided encryption parameters. + // Make sure that the generated EDEK matches the settings of the EZ. + String ezKeyName = dir.getKeyName(iip); + if (!ezKeyName.equals(edek.getKeyName())) { throw new RetryStartFileException(); } + feInfo = new FileEncryptionInfo(suite, edek.getEncryptedKey() + .getMaterial(), edek.getIv(), edek.getKeyVersionName()); + Preconditions.checkNotNull(feInfo); } final INodeFile myFile = INodeFile.valueOf(inode, src, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 5a7d4fb8324..f80b9f85e51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2039,15 +2039,4 @@ - - dfs.namenode.key.version.refresh.interval.ms - 300000 - How frequently the namenode will attempt to fetch the latest - key version of encryption zone keys from the configured KeyProvider, in - milliseconds. New key versions are created when a key is rolled. This - setting thus controls the window of staleness where an old key version - is used after a key is rolled. - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 09ccf2860b3..cf5cec64a79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -347,7 +347,6 @@ public void testReadWrite() throws Exception { assertEquals("Expected 1 EZ", 1, zones.size()); String keyId = zones.get(0).getKeyId(); cluster.getNamesystem().getProvider().rollNewVersion(keyId); - cluster.getNamesystem().getFSDirectory().ezManager.kickMonitor(); // Read them back in and compare byte-by-byte validateFiles(baseFile, encFile1, len); // Write a new enc file and validate From b57ec165674143daf4fa50378446a9f831d3b9d3 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 22 Jul 2014 00:55:20 +0000 Subject: [PATCH 039/354] HDFS-6718. Remove EncryptionZoneManager lock. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612439 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../namenode/EncryptionZoneManager.java | 190 ++++++------------ 2 files changed, 65 insertions(+), 127 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 24cfbfb5b20..311e42c6dae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -52,6 +52,8 @@ fs-encryption (Unreleased) HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode. (wang) + HDFS-6718. Remove EncryptionZoneManager lock. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 890200d787b..0f7f61c264d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -60,35 +60,6 @@ long getINodeId() { } - /** - * Protects the encryptionZones map and its contents. - */ - private final ReentrantReadWriteLock lock; - - private void readLock() { - lock.readLock().lock(); - } - - private void readUnlock() { - lock.readLock().unlock(); - } - - private void writeLock() { - lock.writeLock().lock(); - } - - private void writeUnlock() { - lock.writeLock().unlock(); - } - - public boolean hasWriteLock() { - return lock.isWriteLockedByCurrentThread(); - } - - public boolean hasReadLock() { - return lock.getReadHoldCount() > 0 || hasWriteLock(); - } - private final Map encryptionZones; private final FSDirectory dir; private final KeyProvider provider; @@ -101,7 +72,6 @@ public boolean hasReadLock() { public EncryptionZoneManager(FSDirectory dir, KeyProvider provider) { this.dir = dir; this.provider = provider; - lock = new ReentrantReadWriteLock(); encryptionZones = new HashMap(); } @@ -116,12 +86,7 @@ public EncryptionZoneManager(FSDirectory dir, KeyProvider provider) { void addEncryptionZone(Long inodeId, String keyId) { assert dir.hasWriteLock(); final EncryptionZoneInt ez = new EncryptionZoneInt(inodeId, keyId); - writeLock(); - try { - encryptionZones.put(inodeId, ez); - } finally { - writeUnlock(); - } + encryptionZones.put(inodeId, ez); } /** @@ -131,12 +96,7 @@ void addEncryptionZone(Long inodeId, String keyId) { */ void removeEncryptionZone(Long inodeId) { assert dir.hasWriteLock(); - writeLock(); - try { - encryptionZones.remove(inodeId); - } finally { - writeUnlock(); - } + encryptionZones.remove(inodeId); } /** @@ -147,12 +107,7 @@ void removeEncryptionZone(Long inodeId) { boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, SnapshotAccessControlException { assert dir.hasReadLock(); - readLock(); - try { - return (getEncryptionZoneForPath(iip) != null); - } finally { - readUnlock(); - } + return (getEncryptionZoneForPath(iip) != null); } /** @@ -172,16 +127,12 @@ private String getFullPathName(EncryptionZoneInt ezi) { * Called while holding the FSDirectory lock. */ String getKeyName(final INodesInPath iip) { - readLock(); - try { - EncryptionZoneInt ezi = getEncryptionZoneForPath(iip); - if (ezi == null) { - return null; - } - return ezi.getKeyName(); - } finally { - readUnlock(); + assert dir.hasReadLock(); + EncryptionZoneInt ezi = getEncryptionZoneForPath(iip); + if (ezi == null) { + return null; } + return ezi.getKeyName(); } /** @@ -191,7 +142,7 @@ String getKeyName(final INodesInPath iip) { * Must be called while holding the manager lock. */ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) { - assert hasReadLock(); + assert dir.hasReadLock(); Preconditions.checkNotNull(iip); final INode[] inodes = iip.getINodes(); for (int i = inodes.length - 1; i >= 0; i--) { @@ -220,41 +171,36 @@ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) { void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src) throws IOException { assert dir.hasReadLock(); - readLock(); - try { - final EncryptionZoneInt srcEZI = getEncryptionZoneForPath(srcIIP); - final EncryptionZoneInt dstEZI = getEncryptionZoneForPath(dstIIP); - final boolean srcInEZ = (srcEZI != null); - final boolean dstInEZ = (dstEZI != null); - if (srcInEZ) { - if (!dstInEZ) { - throw new IOException( - src + " can't be moved from an encryption zone."); - } - } else { - if (dstInEZ) { - throw new IOException( - src + " can't be moved into an encryption zone."); - } + final EncryptionZoneInt srcEZI = getEncryptionZoneForPath(srcIIP); + final EncryptionZoneInt dstEZI = getEncryptionZoneForPath(dstIIP); + final boolean srcInEZ = (srcEZI != null); + final boolean dstInEZ = (dstEZI != null); + if (srcInEZ) { + if (!dstInEZ) { + throw new IOException( + src + " can't be moved from an encryption zone."); } + } else { + if (dstInEZ) { + throw new IOException( + src + " can't be moved into an encryption zone."); + } + } - if (srcInEZ || dstInEZ) { - Preconditions.checkState(srcEZI != null, "couldn't find src EZ?"); - Preconditions.checkState(dstEZI != null, "couldn't find dst EZ?"); - if (srcEZI != dstEZI) { - final String srcEZPath = getFullPathName(srcEZI); - final String dstEZPath = getFullPathName(dstEZI); - final StringBuilder sb = new StringBuilder(src); - sb.append(" can't be moved from encryption zone "); - sb.append(srcEZPath); - sb.append(" to encryption zone "); - sb.append(dstEZPath); - sb.append("."); - throw new IOException(sb.toString()); - } + if (srcInEZ || dstInEZ) { + Preconditions.checkState(srcEZI != null, "couldn't find src EZ?"); + Preconditions.checkState(dstEZI != null, "couldn't find dst EZ?"); + if (srcEZI != dstEZI) { + final String srcEZPath = getFullPathName(srcEZI); + final String dstEZPath = getFullPathName(dstEZI); + final StringBuilder sb = new StringBuilder(src); + sb.append(" can't be moved from encryption zone "); + sb.append(srcEZPath); + sb.append(" to encryption zone "); + sb.append(dstEZPath); + sb.append("."); + throw new IOException(sb.toString()); } - } finally { - readUnlock(); } } @@ -266,34 +212,29 @@ void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src) XAttr createEncryptionZone(String src, String keyId, KeyVersion keyVersion) throws IOException { assert dir.hasWriteLock(); - writeLock(); - try { - if (dir.isNonEmptyDirectory(src)) { - throw new IOException( - "Attempt to create an encryption zone for a non-empty directory."); - } - - final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false); - EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP); - if (ezi != null) { - throw new IOException("Directory " + src + " is already in an " + - "encryption zone. (" + getFullPathName(ezi) + ")"); - } - - final XAttr keyIdXAttr = XAttrHelper - .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); - - final List xattrs = Lists.newArrayListWithCapacity(1); - xattrs.add(keyIdXAttr); - // updating the xattr will call addEncryptionZone, - // done this way to handle edit log loading - dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); - // Re-get the new encryption zone add the latest key version - ezi = getEncryptionZoneForPath(srcIIP); - return keyIdXAttr; - } finally { - writeUnlock(); + if (dir.isNonEmptyDirectory(src)) { + throw new IOException( + "Attempt to create an encryption zone for a non-empty directory."); } + + final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false); + EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP); + if (ezi != null) { + throw new IOException("Directory " + src + " is already in an " + + "encryption zone. (" + getFullPathName(ezi) + ")"); + } + + final XAttr keyIdXAttr = XAttrHelper + .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); + + final List xattrs = Lists.newArrayListWithCapacity(1); + xattrs.add(keyIdXAttr); + // updating the xattr will call addEncryptionZone, + // done this way to handle edit log loading + dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); + // Re-get the new encryption zone add the latest key version + ezi = getEncryptionZoneForPath(srcIIP); + return keyIdXAttr; } /** @@ -303,16 +244,11 @@ XAttr createEncryptionZone(String src, String keyId, KeyVersion keyVersion) */ List listEncryptionZones() throws IOException { assert dir.hasReadLock(); - readLock(); - try { - final List ret = - Lists.newArrayListWithExpectedSize(encryptionZones.size()); - for (EncryptionZoneInt ezi : encryptionZones.values()) { - ret.add(new EncryptionZone(getFullPathName(ezi), ezi.getKeyName())); - } - return ret; - } finally { - readUnlock(); + final List ret = + Lists.newArrayListWithExpectedSize(encryptionZones.size()); + for (EncryptionZoneInt ezi : encryptionZones.values()) { + ret.add(new EncryptionZone(getFullPathName(ezi), ezi.getKeyName())); } + return ret; } } From 6fac3e9b611c43b4f7a97c80f86dd761782cef09 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Tue, 22 Jul 2014 00:58:10 +0000 Subject: [PATCH 040/354] HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old openssl versions (cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612440 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 +++ .../crypto/OpensslAesCtrCryptoCodec.java | 5 ++-- .../apache/hadoop/crypto/OpensslCipher.java | 23 +++++++++++-------- .../hadoop/util/NativeLibraryChecker.java | 16 +++++++------ .../apache/hadoop/crypto/TestCryptoCodec.java | 16 ++++++------- .../hadoop/crypto/TestOpensslCipher.java | 13 ++++------- 6 files changed, 40 insertions(+), 36 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 4c365bf0aeb..3f9f13df0e0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -40,6 +40,9 @@ fs-encryption (Unreleased) HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to JCE if non native support. (Yi Liu) + HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old + openssl versions (cmccabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java index 7db7b063e5c..4ca79b307d6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java @@ -47,8 +47,9 @@ public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec { private Random random; public OpensslAesCtrCryptoCodec() { - if (!OpensslCipher.isNativeCodeLoaded()) { - throw new RuntimeException("Failed to load OpenSSL Cipher."); + String loadingFailureReason = OpensslCipher.getLoadingFailureReason(); + if (loadingFailureReason != null) { + throw new RuntimeException(loadingFailureReason); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java index 652a8b4c324..264652b202a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java @@ -76,21 +76,26 @@ static int get(String padding) throws NoSuchPaddingException { private final int alg; private final int padding; - private static boolean nativeCipherLoaded = false; + private static final String loadingFailureReason; + static { - if (NativeCodeLoader.isNativeCodeLoaded() && - NativeCodeLoader.buildSupportsOpenssl()) { - try { + String loadingFailure = null; + try { + if (!NativeCodeLoader.buildSupportsOpenssl()) { + loadingFailure = "build does not support openssl."; + } else { initIDs(); - nativeCipherLoaded = true; - } catch (Throwable t) { - LOG.error("Failed to load OpenSSL Cipher.", t); } + } catch (Throwable t) { + loadingFailure = t.getMessage(); + LOG.debug("Failed to load OpenSSL Cipher.", t); + } finally { + loadingFailureReason = loadingFailure; } } - public static boolean isNativeCodeLoaded() { - return nativeCipherLoaded; + public static String getLoadingFailureReason() { + return loadingFailureReason; } private OpensslCipher(long context, int alg, int padding) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java index 4891f03cbb8..0d87bceda17 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java @@ -58,14 +58,14 @@ public static void main(String[] args) { boolean nativeHadoopLoaded = NativeCodeLoader.isNativeCodeLoaded(); boolean zlibLoaded = false; boolean snappyLoaded = false; - boolean opensslLoaded = false; // lz4 is linked within libhadoop boolean lz4Loaded = nativeHadoopLoaded; boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf); + boolean openSslLoaded = false; + String openSslDetail = ""; String hadoopLibraryName = ""; String zlibLibraryName = ""; String snappyLibraryName = ""; - String opensslLibraryName = ""; String lz4LibraryName = ""; String bzip2LibraryName = ""; if (nativeHadoopLoaded) { @@ -79,10 +79,12 @@ public static void main(String[] args) { if (snappyLoaded && NativeCodeLoader.buildSupportsSnappy()) { snappyLibraryName = SnappyCodec.getLibraryName(); } - opensslLoaded = NativeCodeLoader.buildSupportsOpenssl() && - OpensslCipher.isNativeCodeLoaded(); - if (opensslLoaded) { - opensslLibraryName = OpensslCipher.getLibraryName(); + if (OpensslCipher.getLoadingFailureReason() != null) { + openSslDetail = OpensslCipher.getLoadingFailureReason(); + openSslLoaded = false; + } else { + openSslDetail = OpensslCipher.getLibraryName(); + openSslLoaded = true; } if (lz4Loaded) { lz4LibraryName = Lz4Codec.getLibraryName(); @@ -97,7 +99,7 @@ public static void main(String[] args) { System.out.printf("snappy: %b %s\n", snappyLoaded, snappyLibraryName); System.out.printf("lz4: %b %s\n", lz4Loaded, lz4LibraryName); System.out.printf("bzip2: %b %s\n", bzip2Loaded, bzip2LibraryName); - System.out.printf("openssl: %b %s\n", opensslLoaded, opensslLibraryName); + System.out.printf("openssl: %b %s\n", openSslLoaded, openSslDetail); if ((!nativeHadoopLoaded) || (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) { // return 1 to indicated check failed diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java index d95052815cf..49b5056a86f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java @@ -38,6 +38,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.ReflectionUtils; import org.junit.Assert; +import org.junit.Assume; import org.junit.Test; public class TestCryptoCodec { @@ -62,15 +63,12 @@ public void testJceAesCtrCryptoCodec() throws Exception { @Test(timeout=1200000) public void testOpensslAesCtrCryptoCodec() throws Exception { - if (NativeCodeLoader.buildSupportsOpenssl()) { - Assert.assertTrue(OpensslCipher.isNativeCodeLoaded()); - } - if (OpensslCipher.isNativeCodeLoaded()) { - cryptoCodecTest(conf, seed, 0, - "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); - cryptoCodecTest(conf, seed, count, - "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); - } + Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); + Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason()); + cryptoCodecTest(conf, seed, 0, + "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); + cryptoCodecTest(conf, seed, count, + "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); } private void cryptoCodecTest(Configuration conf, int seed, int count, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java index b3a894a164f..966a88723a2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java @@ -24,6 +24,7 @@ import javax.crypto.ShortBufferException; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assume; import org.junit.Assert; import org.junit.Test; @@ -35,9 +36,7 @@ public class TestOpensslCipher { @Test(timeout=120000) public void testGetInstance() throws Exception { - if (!OpensslCipher.isNativeCodeLoaded()) { - return; - } + Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); @@ -58,9 +57,7 @@ public void testGetInstance() throws Exception { @Test(timeout=120000) public void testUpdateArguments() throws Exception { - if (!OpensslCipher.isNativeCodeLoaded()) { - return; - } + Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); @@ -93,9 +90,7 @@ public void testUpdateArguments() throws Exception { @Test(timeout=120000) public void testDoFinalArguments() throws Exception { - if (!OpensslCipher.isNativeCodeLoaded()) { - return; - } + Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); From b52b80d7bdcad00b95619544fa869af56746ebf0 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Tue, 22 Jul 2014 08:38:38 +0000 Subject: [PATCH 041/354] HADOOP-10853. Refactor get instance of CryptoCodec and support create via algorithm/mode/padding. (yliu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612513 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES-fs-encryption.txt | 3 ++ .../org/apache/hadoop/crypto/CipherSuite.java | 30 +++++++++++++ .../org/apache/hadoop/crypto/CryptoCodec.java | 43 +++++++++++++------ .../hadoop/crypto/JceAesCtrCryptoCodec.java | 8 +++- .../fs/CommonConfigurationKeysPublic.java | 22 ++++------ .../src/main/resources/core-default.xml | 26 +++++++---- ...toStreamsWithOpensslAesCtrCryptoCodec.java | 5 --- .../org/apache/hadoop/hdfs/DFSClient.java | 5 ++- 8 files changed, 99 insertions(+), 43 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt index 3f9f13df0e0..3ce7695ddfe 100644 --- a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt +++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt @@ -43,6 +43,9 @@ fs-encryption (Unreleased) HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old openssl versions (cmccabe) + HADOOP-10853. Refactor get instance of CryptoCodec and support create via + algorithm/mode/padding. (Yi Liu) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java index c75311aef65..9962b3870da 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java @@ -82,4 +82,34 @@ public static void checkName(String name) { } throw new IllegalArgumentException("Invalid cipher suite name: " + name); } + + /** + * Convert to CipherSuite from name, {@link #algoBlockSize} is fixed for + * certain cipher suite, just need to compare the name. + * @param name cipher suite name + * @return CipherSuite cipher suite + */ + public static CipherSuite convert(String name) { + CipherSuite[] suites = CipherSuite.values(); + for (CipherSuite suite : suites) { + if (suite.getName().equals(name)) { + return suite; + } + } + throw new IllegalArgumentException("Invalid cipher suite name: " + name); + } + + /** + * Returns suffix of cipher suite configuration. + * @return String configuration suffix + */ + public String getConfigSuffix() { + String[] parts = name.split("/"); + StringBuilder suffix = new StringBuilder(); + for (String part : parts) { + suffix.append(".").append(part.toLowerCase()); + } + + return suffix.toString(); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java index 45c06f02daa..f484083b592 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java @@ -31,8 +31,7 @@ import com.google.common.base.Splitter; import com.google.common.collect.Lists; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASS_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT; @@ -44,23 +43,28 @@ public abstract class CryptoCodec implements Configurable { public static Logger LOG = LoggerFactory.getLogger(CryptoCodec.class); - public static CryptoCodec getInstance(Configuration conf) { - List> klasses = getCodecClasses(conf); - String name = conf.get(HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY, - HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT); - CipherSuite.checkName(name); + /** + * Get crypto codec for specified algorithm/mode/padding. + * @param conf the configuration + * @param CipherSuite algorithm/mode/padding + * @return CryptoCodec the codec object + */ + public static CryptoCodec getInstance(Configuration conf, + CipherSuite cipherSuite) { + List> klasses = getCodecClasses( + conf, cipherSuite); CryptoCodec codec = null; for (Class klass : klasses) { try { CryptoCodec c = ReflectionUtils.newInstance(klass, conf); - if (c.getCipherSuite().getName().equalsIgnoreCase(name)) { + if (c.getCipherSuite().getName().equals(cipherSuite.getName())) { if (codec == null) { LOG.debug("Using crypto codec {}.", klass.getName()); codec = c; } } else { LOG.warn("Crypto codec {} doesn't meet the cipher suite {}.", - klass.getName(), name); + klass.getName(), cipherSuite.getName()); } } catch (Exception e) { LOG.warn("Crypto codec {} is not available.", klass.getName()); @@ -72,14 +76,27 @@ public static CryptoCodec getInstance(Configuration conf) { } throw new RuntimeException("No available crypto codec which meets " + - "the cipher suite " + name + "."); + "the cipher suite " + cipherSuite.getName() + "."); + } + + /** + * Get crypto codec for algorithm/mode/padding in config value + * hadoop.security.crypto.cipher.suite + * @param conf the configuration + * @return CryptoCodec the codec object + */ + public static CryptoCodec getInstance(Configuration conf) { + String name = conf.get(HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY, + HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT); + return getInstance(conf, CipherSuite.convert(name)); } private static List> getCodecClasses( - Configuration conf) { + Configuration conf, CipherSuite cipherSuite) { List> result = Lists.newArrayList(); - String codecString = conf.get(HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, - HADOOP_SECURITY_CRYPTO_CODEC_CLASS_DEFAULT); + String configName = HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX + + cipherSuite.getConfigSuffix(); + String codecString = conf.get(configName); for (String c : Splitter.on(',').trimResults().omitEmptyStrings(). split(codecString)) { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java index cd093203867..61ee743c421 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java @@ -26,6 +26,8 @@ import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -40,6 +42,9 @@ */ @InterfaceAudience.Private public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec { + private static final Log LOG = + LogFactory.getLog(JceAesCtrCryptoCodec.class.getName()); + private Configuration conf; private String provider; private SecureRandom random; @@ -64,7 +69,8 @@ public void setConf(Configuration conf) { SecureRandom.getInstance(secureRandomAlg, provider) : SecureRandom.getInstance(secureRandomAlg); } catch (GeneralSecurityException e) { - throw new IllegalArgumentException(e); + LOG.warn(e.getMessage()); + random = new SecureRandom(); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 3b353ebcdb3..933f2c239a1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -282,12 +282,8 @@ public class CommonConfigurationKeysPublic { /** Class to override Sasl Properties for a connection */ public static final String HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS = "hadoop.security.saslproperties.resolver.class"; - /** See core-default.xml */ - public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY = - "hadoop.security.crypto.codec.class"; - public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASS_DEFAULT = - "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec," + - "org.apache.hadoop.crypto.JceAesCtrCryptoCodec"; + public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX = + "hadoop.security.crypto.codec.classes"; /** See core-default.xml */ public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY = "hadoop.security.crypto.cipher.suite"; @@ -295,10 +291,10 @@ public class CommonConfigurationKeysPublic { "AES/CTR/NoPadding"; /** See core-default.xml */ public static final String HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY = - "hadoop.security.crypto.jce.provider"; + "hadoop.security.crypto.jce.provider"; /** See core-default.xml */ public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = - "hadoop.security.crypto.buffer.size"; + "hadoop.security.crypto.buffer.size"; /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */ public static final int HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT = 8192; /** Class to override Impersonation provider */ @@ -334,17 +330,17 @@ public class CommonConfigurationKeysPublic { /** See core-default.xml */ public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY = - "hadoop.security.java.secure.random.algorithm"; + "hadoop.security.java.secure.random.algorithm"; /** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */ public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT = - "SHA1PRNG"; + "SHA1PRNG"; /** See core-default.xml */ public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY = - "hadoop.security.secure.random.impl"; + "hadoop.security.secure.random.impl"; /** See core-default.xml */ public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY = - "hadoop.security.random.device.file.path"; + "hadoop.security.random.device.file.path"; public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT = - "/dev/urandom"; + "/dev/urandom"; } diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 24e6726f71a..37348eaeac1 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1452,13 +1452,21 @@ for ldap providers in the same way as above does. - hadoop.security.crypto.codec.class - org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, - org.apache.hadoop.crypto.JceAesCtrCryptoCodec + hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE + - Comma list of CryptoCodec implementations which are used for encryption - and decryption. The first implementation will be used if avaiable, others - are fallbacks. + The prefix for a given crypto codec, contains a comma-separated + list of implementation classes for a given crypto codec (eg EXAMPLECIPHERSUITE). + The first implementation will be used if available, others are fallbacks. + + + + + hadoop.security.crypto.codec.classes.aes.ctr.nopadding + org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec + + Comma-separated list of crypto codec implementations for AES/CTR/NoPadding. + The first implementation will be used if available, others are fallbacks. @@ -1488,7 +1496,7 @@ for ldap providers in the same way as above does. hadoop.security.java.secure.random.algorithm - + SHA1PRNG The java secure random algorithm. @@ -1504,9 +1512,9 @@ for ldap providers in the same way as above does. hadoop.security.random.device.file.path - + /dev/urandom - OS security random dev path, it's /dev/urandom in linux. + OS security random device file path. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java index 4e962cb42d5..f64e8dcca69 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java @@ -20,17 +20,12 @@ import org.apache.hadoop.conf.Configuration; import org.junit.BeforeClass; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY; - public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec extends TestCryptoStreams { @BeforeClass public static void init() throws Exception { Configuration conf = new Configuration(); - conf.set(HADOOP_SECURITY_CRYPTO_CODEC_CLASS_KEY, - OpensslAesCtrCryptoCodec.class.getName() + "," + - JceAesCtrCryptoCodec.class.getName()); codec = CryptoCodec.getInstance(conf); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 9f471d8e21f..102343797c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1301,8 +1301,9 @@ public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis) if (feInfo != null) { // File is encrypted, wrap the stream in a crypto stream. final CryptoInputStream cryptoIn = - new CryptoInputStream(dfsis, codec, - feInfo.getEncryptedDataEncryptionKey(), feInfo.getIV()); + new CryptoInputStream(dfsis, CryptoCodec.getInstance(conf, + feInfo.getCipherSuite()), feInfo.getEncryptedDataEncryptionKey(), + feInfo.getIV()); return new HdfsDataInputStream(cryptoIn); } else { // No key/IV pair so no encryption. From 69b75fca7aec5f5cbf79bc7db3915119cef69e65 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 22 Jul 2014 17:57:06 +0000 Subject: [PATCH 042/354] HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612632 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../org/apache/hadoop/hdfs/DFSClient.java | 4 +- .../hadoop/hdfs/DistributedFileSystem.java | 4 +- .../apache/hadoop/hdfs/client/HdfsAdmin.java | 34 +++++-------- .../hadoop/hdfs/protocol/ClientProtocol.java | 2 +- .../hadoop/hdfs/protocol/EncryptionZone.java | 18 +++---- ...amenodeProtocolServerSideTranslatorPB.java | 2 +- .../ClientNamenodeProtocolTranslatorPB.java | 6 +-- .../hadoop/hdfs/protocolPB/PBHelper.java | 4 +- .../namenode/EncryptionZoneManager.java | 24 ++++------ .../hdfs/server/namenode/FSDirectory.java | 8 ++-- .../hdfs/server/namenode/FSNamesystem.java | 48 +++++++++---------- .../server/namenode/NameNodeRpcServer.java | 4 +- .../apache/hadoop/hdfs/tools/CryptoAdmin.java | 13 ++--- .../src/main/proto/encryption.proto | 4 +- .../org/apache/hadoop/cli/TestCryptoCLI.java | 4 +- .../hadoop/hdfs/TestEncryptionZones.java | 28 +++++------ .../src/test/resources/testCryptoConf.xml | 4 +- 18 files changed, 99 insertions(+), 114 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 311e42c6dae..7170acb225a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -54,6 +54,8 @@ fs-encryption (Unreleased) HDFS-6718. Remove EncryptionZoneManager lock. (wang) + HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 102343797c6..a6f02fbfab5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2807,11 +2807,11 @@ public AclStatus getAclStatus(String src) throws IOException { } } - public void createEncryptionZone(String src, String keyId) + public void createEncryptionZone(String src, String keyName) throws IOException { checkOpen(); try { - namenode.createEncryptionZone(src, keyId); + namenode.createEncryptionZone(src, keyName); } catch (RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, SafeModeException.class, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index c348a1b3a95..eccd563b270 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1799,9 +1799,9 @@ public AclStatus next(final FileSystem fs, final Path p) } /* HDFS only */ - public void createEncryptionZone(Path path, String keyId) + public void createEncryptionZone(Path path, String keyName) throws IOException { - dfs.createEncryptionZone(getPathName(path), keyId); + dfs.createEncryptionZone(getPathName(path), keyName); } /* HDFS only */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index 018fce4e797..be3ac51cfe7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -231,21 +231,16 @@ public RemoteIterator listCachePools() throws IOException { } /** - * Create an encryption zone rooted at path using the optional encryption key - * id. An encryption zone is a portion of the HDFS file system hierarchy in - * which all files are encrypted with the same key, but possibly different - * key versions per file. - *

- * Path must refer to an empty, existing directory. Otherwise an IOException - * will be thrown. keyId specifies the id of an encryption key in the - * KeyProvider that the Namenode has been configured to use. If keyId is - * null, then a key is generated in the KeyProvider using {@link - * java.util.UUID} to generate a key id. + * Create an encryption zone rooted at an empty existing directory. An + * encryption zone has an associated encryption key used when reading and + * writing files within the zone. An existing key can be specified, + * else a new key will be generated for the encryption zone. * - * @param path The path of the root of the encryption zone. + * @param path The path of the root of the encryption zone. Must refer to + * an empty, existing directory. * - * @param keyId An optional keyId in the KeyProvider. If null, then - * a key is generated. + * @param keyName Optional name of key available at the KeyProvider. If null, + * then a key is generated. * * @throws IOException if there was a general IO exception * @@ -253,18 +248,15 @@ public RemoteIterator listCachePools() throws IOException { * * @throws FileNotFoundException if the path does not exist */ - public void createEncryptionZone(Path path, String keyId) + public void createEncryptionZone(Path path, String keyName) throws IOException, AccessControlException, FileNotFoundException { - dfs.createEncryptionZone(path, keyId); + dfs.createEncryptionZone(path, keyName); } /** - * Return a list of all {@EncryptionZone}s in the HDFS hierarchy which are - * visible to the caller. If the caller is the HDFS admin, then the returned - * EncryptionZone instances will have the key id field filled in. If the - * caller is not the HDFS admin, then the EncryptionZone instances will only - * have the path field filled in and only those zones that are visible to the - * user are returned. + * Return a list of all {@link EncryptionZone}s in the HDFS hierarchy which + * are visible to the caller. If the caller is an HDFS superuser, + * then the key name of each encryption zone will also be provided. * * @throws IOException if there was a general IO exception * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index a307520fc05..7996717d9d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1263,7 +1263,7 @@ public void removeAclEntries(String src, List aclSpec) * Create an encryption zone */ @AtMostOnce - public void createEncryptionZone(String src, String keyId) + public void createEncryptionZone(String src, String keyName) throws IOException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java index f4fcc609e41..a20e93c111e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java @@ -24,32 +24,32 @@ /** * A simple class for representing an encryption zone. Presently an encryption - * zone only has a path (the root of the encryption zone) and a key id. + * zone only has a path (the root of the encryption zone) and a key name. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class EncryptionZone { private final String path; - private final String keyId; + private final String keyName; - public EncryptionZone(String path, String keyId) { + public EncryptionZone(String path, String keyName) { this.path = path; - this.keyId = keyId; + this.keyName = keyName; } public String getPath() { return path; } - public String getKeyId() { - return keyId; + public String getKeyName() { + return keyName; } @Override public int hashCode() { return new HashCodeBuilder(13, 31). - append(path).append(keyId). + append(path).append(keyName). toHashCode(); } @@ -68,12 +68,12 @@ public boolean equals(Object obj) { EncryptionZone rhs = (EncryptionZone) obj; return new EqualsBuilder(). append(path, rhs.path). - append(keyId, rhs.keyId). + append(keyName, rhs.keyName). isEquals(); } @Override public String toString() { - return "EncryptionZone [path=" + path + ", keyId=" + keyId + "]"; + return "EncryptionZone [path=" + path + ", keyName=" + keyName + "]"; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 570386e04e6..203fa381083 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -1287,7 +1287,7 @@ public CreateEncryptionZoneResponseProto createEncryptionZone( RpcController controller, CreateEncryptionZoneRequestProto req) throws ServiceException { try { - server.createEncryptionZone(req.getSrc(), req.getKeyId()); + server.createEncryptionZone(req.getSrc(), req.getKeyName()); return CreateEncryptionZoneResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 6a15ac57752..85fb745ca84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -1284,13 +1284,13 @@ public AclStatus getAclStatus(String src) throws IOException { } @Override - public void createEncryptionZone(String src, String keyId) + public void createEncryptionZone(String src, String keyName) throws IOException { final CreateEncryptionZoneRequestProto.Builder builder = CreateEncryptionZoneRequestProto.newBuilder(); builder.setSrc(src); - if (keyId != null && !keyId.isEmpty()) { - builder.setKeyId(keyId); + if (keyName != null && !keyName.isEmpty()) { + builder.setKeyName(keyName); } CreateEncryptionZoneRequestProto req = builder.build(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 4f62b42ddaa..edbd284dfc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -2209,7 +2209,7 @@ public static List convertEZProto( final EncryptionZoneProto.Builder builder = EncryptionZoneProto.newBuilder(); builder.setPath(a.getPath()); - builder.setKeyId(a.getKeyId()); + builder.setKeyName(a.getKeyName()); ret.add(builder.build()); } return ret; @@ -2221,7 +2221,7 @@ public static List convertEZ( Lists.newArrayListWithCapacity(ezs.size()); for (EncryptionZoneProto a : ezs) { final EncryptionZone ez = - new EncryptionZone(a.getPath(), a.getKeyId()); + new EncryptionZone(a.getPath(), a.getKeyName()); ret.add(ez); } return ret; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 0f7f61c264d..f40d315676a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -5,11 +5,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.locks.ReentrantReadWriteLock; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; @@ -20,7 +18,6 @@ import org.slf4j.LoggerFactory; -import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants .CRYPTO_XATTR_ENCRYPTION_ZONE; @@ -62,16 +59,14 @@ long getINodeId() { private final Map encryptionZones; private final FSDirectory dir; - private final KeyProvider provider; /** * Construct a new EncryptionZoneManager. * * @param dir Enclosing FSDirectory */ - public EncryptionZoneManager(FSDirectory dir, KeyProvider provider) { + public EncryptionZoneManager(FSDirectory dir) { this.dir = dir; - this.provider = provider; encryptionZones = new HashMap(); } @@ -81,11 +76,11 @@ public EncryptionZoneManager(FSDirectory dir, KeyProvider provider) { * Called while holding the FSDirectory lock. * * @param inodeId of the encryption zone - * @param keyId encryption zone key id + * @param keyName encryption zone key name */ - void addEncryptionZone(Long inodeId, String keyId) { + void addEncryptionZone(Long inodeId, String keyName) { assert dir.hasWriteLock(); - final EncryptionZoneInt ez = new EncryptionZoneInt(inodeId, keyId); + final EncryptionZoneInt ez = new EncryptionZoneInt(inodeId, keyName); encryptionZones.put(inodeId, ez); } @@ -209,7 +204,7 @@ void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src) *

* Called while holding the FSDirectory lock. */ - XAttr createEncryptionZone(String src, String keyId, KeyVersion keyVersion) + XAttr createEncryptionZone(String src, String keyName) throws IOException { assert dir.hasWriteLock(); if (dir.isNonEmptyDirectory(src)) { @@ -224,17 +219,16 @@ XAttr createEncryptionZone(String src, String keyId, KeyVersion keyVersion) "encryption zone. (" + getFullPathName(ezi) + ")"); } - final XAttr keyIdXAttr = XAttrHelper - .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyId.getBytes()); + final XAttr ezXAttr = XAttrHelper + .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyName.getBytes()); final List xattrs = Lists.newArrayListWithCapacity(1); - xattrs.add(keyIdXAttr); + xattrs.add(ezXAttr); // updating the xattr will call addEncryptionZone, // done this way to handle edit log loading dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); - // Re-get the new encryption zone add the latest key version ezi = getEncryptionZoneForPath(srcIIP); - return keyIdXAttr; + return ezXAttr; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 4b13da6a513..d20f3b991bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO; import static org.apache.hadoop.util.Time.now; @@ -36,7 +35,6 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileEncryptionInfo; @@ -227,7 +225,7 @@ public int getWriteHoldCount() { nameCache = new NameCache(threshold); namesystem = ns; - ezManager = new EncryptionZoneManager(this, ns.getProvider()); + ezManager = new EncryptionZoneManager(this); } private FSNamesystem getFSNamesystem() { @@ -2632,11 +2630,11 @@ String getKeyName(INodesInPath iip) { } } - XAttr createEncryptionZone(String src, String keyId, KeyVersion keyVersion) + XAttr createEncryptionZone(String src, String keyName) throws IOException { writeLock(); try { - return ezManager.createEncryptionZone(src, keyId, keyVersion); + return ezManager.createEncryptionZone(src, keyName); } finally { writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9c3f1320b7d..31a89bfde2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8421,13 +8421,13 @@ AclStatus getAclStatus(String src) throws IOException { } /** - * Create an encryption zone on directory src either using keyIdArg if - * supplied or generating a keyId if it's null. + * Create an encryption zone on directory src. If provided, + * will use an existing key, else will generate a new key. * * @param src the path of a directory which will be the root of the * encryption zone. The directory must be empty. * - * @param keyIdArg an optional keyId of a key in the configured + * @param keyNameArg an optional name of a key in the configured * KeyProvider. If this is null, then a a new key is generated. * * @throws AccessControlException if the caller is not the superuser. @@ -8436,7 +8436,7 @@ AclStatus getAclStatus(String src) throws IOException { * * @throws SafeModeException if the Namenode is in safe mode. */ - void createEncryptionZone(final String src, String keyIdArg) + void createEncryptionZone(final String src, String keyNameArg) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException { final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); @@ -8445,16 +8445,15 @@ void createEncryptionZone(final String src, String keyIdArg) } boolean createdKey = false; - String keyId = keyIdArg; + String keyName = keyNameArg; boolean success = false; try { - KeyVersion keyVersion; - if (keyId == null || keyId.isEmpty()) { - keyId = UUID.randomUUID().toString(); - keyVersion = createNewKey(keyId, src); + if (keyName == null || keyName.isEmpty()) { + keyName = UUID.randomUUID().toString(); + createNewKey(keyName, src); createdKey = true; } else { - keyVersion = provider.getCurrentKey(keyId); + KeyVersion keyVersion = provider.getCurrentKey(keyName); if (keyVersion == null) { /* * It would be nice if we threw something more specific than @@ -8464,10 +8463,10 @@ void createEncryptionZone(final String src, String keyIdArg) * update this to match it, or better yet, just rethrow the * KeyProvider's exception. */ - throw new IOException("Key " + keyId + " doesn't exist."); + throw new IOException("Key " + keyName + " doesn't exist."); } } - createEncryptionZoneInt(src, keyId, keyVersion, cacheEntry != null); + createEncryptionZoneInt(src, keyName, cacheEntry != null); success = true; } catch (AccessControlException e) { logAuditEvent(false, "createEncryptionZone", src); @@ -8476,14 +8475,13 @@ void createEncryptionZone(final String src, String keyIdArg) RetryCache.setState(cacheEntry, success); if (!success && createdKey) { /* Unwind key creation. */ - provider.deleteKey(keyId); + provider.deleteKey(keyName); } } } - private void createEncryptionZoneInt(final String srcArg, String keyId, - final KeyVersion keyVersion, final boolean logRetryCache) throws - IOException { + private void createEncryptionZoneInt(final String srcArg, String keyName, + final boolean logRetryCache) throws IOException { String src = srcArg; HdfsFileStatus resultingStat = null; checkSuperuserPrivilege(); @@ -8497,9 +8495,9 @@ private void createEncryptionZoneInt(final String srcArg, String keyId, checkNameNodeSafeMode("Cannot create encryption zone on " + src); src = FSDirectory.resolvePath(src, pathComponents, dir); - final XAttr keyIdXAttr = dir.createEncryptionZone(src, keyId, keyVersion); + final XAttr ezXAttr = dir.createEncryptionZone(src, keyName); List xAttrs = Lists.newArrayListWithCapacity(1); - xAttrs.add(keyIdXAttr); + xAttrs.add(ezXAttr); getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); resultingStat = getAuditFileInfo(src, false); } finally { @@ -8512,14 +8510,14 @@ private void createEncryptionZoneInt(final String srcArg, String keyId, /** * Create a new key on the KeyProvider for an encryption zone. * - * @param keyIdArg id of the key + * @param keyNameArg name of the key * @param src path of the encryption zone. * @return KeyVersion of the created key * @throws IOException */ - private KeyVersion createNewKey(String keyIdArg, String src) + private KeyVersion createNewKey(String keyNameArg, String src) throws IOException { - Preconditions.checkNotNull(keyIdArg); + Preconditions.checkNotNull(keyNameArg); Preconditions.checkNotNull(src); final StringBuilder sb = new StringBuilder("hdfs://"); if (nameserviceId != null) { @@ -8529,14 +8527,14 @@ private KeyVersion createNewKey(String keyIdArg, String src) if (!src.endsWith("/")) { sb.append('/'); } - sb.append(keyIdArg); - final String keyId = sb.toString(); - providerOptions.setDescription(keyId); + sb.append(keyNameArg); + final String keyName = sb.toString(); + providerOptions.setDescription(keyName); providerOptions.setBitLength(codec.getCipherSuite() .getAlgorithmBlockSize()*8); KeyVersion version = null; try { - version = provider.createKey(keyIdArg, providerOptions); + version = provider.createKey(keyNameArg, providerOptions); } catch (NoSuchAlgorithmException e) { throw new IOException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 003b3171782..c47dd6f9f8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1413,9 +1413,9 @@ public AclStatus getAclStatus(String src) throws IOException { } @Override - public void createEncryptionZone(String src, String keyId) + public void createEncryptionZone(String src, String keyName) throws IOException { - namesystem.createEncryptionZone(src, keyId); + namesystem.createEncryptionZone(src, keyName); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java index f1ec3cbfc57..c0155fcb72b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java @@ -124,7 +124,7 @@ public String getName() { @Override public String getShortUsage() { - return "[" + getName() + " [-keyId ] -path " + "]\n"; + return "[" + getName() + " [-keyName ] -path " + "]\n"; } @Override @@ -132,7 +132,8 @@ public String getLongUsage() { final TableListing listing = getOptionDescriptionListing(); listing.addRow("", "The path of the encryption zone to create. " + "It must be an empty directory."); - listing.addRow("", "The keyId of the new encryption zone."); + listing.addRow("", "Name of the key to use for the " + + "encryption zone. A new key will be generated if unspecified."); return getShortUsage() + "\n" + "Create a new encryption zone.\n\n" + listing.toString(); @@ -146,8 +147,8 @@ public int run(Configuration conf, List args) throws IOException { return 1; } - final String keyId = - StringUtils.popOptionWithArgument("-keyId", args); + final String keyName = + StringUtils.popOptionWithArgument("-keyName", args); if (!args.isEmpty()) { System.err.println("Can't understand argument: " + args.get(0)); @@ -156,7 +157,7 @@ public int run(Configuration conf, List args) throws IOException { final DistributedFileSystem dfs = getDFS(conf); try { - dfs.createEncryptionZone(new Path(path), keyId); + dfs.createEncryptionZone(new Path(path), keyName); System.out.println("Added encryption zone " + path); } catch (IOException e) { System.err.println(prettifyException(e)); @@ -198,7 +199,7 @@ public int run(Configuration conf, List args) throws IOException { .wrapWidth(MAX_LINE_WIDTH).hideHeaders().build(); final List ezs = dfs.listEncryptionZones(); for (EncryptionZone ez : ezs) { - listing.addRow(ez.getPath(), ez.getKeyId()); + listing.addRow(ez.getPath(), ez.getKeyName()); } System.out.println(listing.toString()); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto index 6b091a572be..391b0aa5ff5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto @@ -35,7 +35,7 @@ import "hdfs.proto"; message CreateEncryptionZoneRequestProto { required string src = 1; - optional string keyId = 2; + optional string keyName = 2; } message CreateEncryptionZoneResponseProto { @@ -46,7 +46,7 @@ message ListEncryptionZonesRequestProto { message EncryptionZoneProto { required string path = 1; - required string keyId = 2; + required string keyName = 2; } message ListEncryptionZonesResponseProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java index 32ba055caa1..1b4468e545b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java @@ -93,12 +93,12 @@ public void tearDown() throws Exception { } /* Helper function to create a key in the Key Provider. */ - private void createAKey(String keyId, Configuration conf) + private void createAKey(String keyName, Configuration conf) throws NoSuchAlgorithmException, IOException { final KeyProvider provider = dfsCluster.getNameNode().getNamesystem().getProvider(); final KeyProvider.Options options = KeyProvider.options(conf); - provider.createKey(keyId, options); + provider.createKey(keyName, options); provider.flush(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index cf5cec64a79..7b268ed16d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -105,18 +105,18 @@ public void assertNumZones(final int numZones) throws IOException { } /** - * Checks that an encryption zone with the specified keyId and path (if not + * Checks that an encryption zone with the specified keyName and path (if not * null) is present. * * @throws IOException if a matching zone could not be found */ - public void assertZonePresent(String keyId, String path) throws IOException { + public void assertZonePresent(String keyName, String path) throws IOException { final List zones = dfsAdmin.listEncryptionZones(); boolean match = false; for (EncryptionZone zone : zones) { - boolean matchKey = (keyId == null); + boolean matchKey = (keyName == null); boolean matchPath = (path == null); - if (keyId != null && zone.getKeyId().equals(keyId)) { + if (keyName != null && zone.getKeyName().equals(keyName)) { matchKey = true; } if (path != null && zone.getPath().equals(path)) { @@ -127,7 +127,7 @@ public void assertZonePresent(String keyId, String path) throws IOException { break; } } - assertTrue("Did not find expected encryption zone with keyId " + keyId + + assertTrue("Did not find expected encryption zone with keyName " + keyName + " path " + path, match ); } @@ -135,11 +135,11 @@ public void assertZonePresent(String keyId, String path) throws IOException { /** * Helper function to create a key in the Key Provider. */ - private void createKey(String keyId) + private void createKey(String keyName) throws NoSuchAlgorithmException, IOException { KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); final KeyProvider.Options options = KeyProvider.options(conf); - provider.createKey(keyId, options); + provider.createKey(keyName, options); provider.flush(); } @@ -204,9 +204,9 @@ public void testBasicOperations() throws Exception { /* Test failure of creating an EZ passing a key that doesn't exist. */ final Path zone2 = new Path("/zone2"); fsWrapper.mkdir(zone2, FsPermission.getDirDefault(), false); - final String myKeyId = "mykeyid"; + final String myKeyName = "mykeyname"; try { - dfsAdmin.createEncryptionZone(zone2, myKeyId); + dfsAdmin.createEncryptionZone(zone2, myKeyName); fail("expected key doesn't exist"); } catch (IOException e) { assertExceptionContains("doesn't exist.", e); @@ -214,10 +214,10 @@ public void testBasicOperations() throws Exception { assertNumZones(1); /* Test success of creating an EZ when they key exists. */ - createKey(myKeyId); - dfsAdmin.createEncryptionZone(zone2, myKeyId); + createKey(myKeyName); + dfsAdmin.createEncryptionZone(zone2, myKeyName); assertNumZones(++numZones); - assertZonePresent(myKeyId, zone2.toString()); + assertZonePresent(myKeyName, zone2.toString()); /* Test failure of create encryption zones as a non super user. */ final UserGroupInformation user = UserGroupInformation. @@ -345,8 +345,8 @@ public void testReadWrite() throws Exception { // Roll the key of the encryption zone List zones = dfsAdmin.listEncryptionZones(); assertEquals("Expected 1 EZ", 1, zones.size()); - String keyId = zones.get(0).getKeyId(); - cluster.getNamesystem().getProvider().rollNewVersion(keyId); + String keyName = zones.get(0).getKeyName(); + cluster.getNamesystem().getProvider().rollNewVersion(keyName); // Read them back in and compare byte-by-byte validateFiles(baseFile, encFile1, len); // Write a new enc file and validate diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml index 36df642b104..2ff2f20ae41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml @@ -145,7 +145,7 @@ -fs NAMENODE -mkdir /foo -fs NAMENODE -ls /- - -createZone -path /foo -keyId doesntexist + -createZone -path /foo -keyName doesntexist -fs NAMENODE -rmdir /foo @@ -163,7 +163,7 @@ -fs NAMENODE -mkdir /foo -fs NAMENODE -ls /- - -createZone -path /foo -keyId mykey + -createZone -path /foo -keyName mykey -fs NAMENODE -rmdir /foo From 687ce1a5fca2d58a781e7382bf0333a16d39839d Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Wed, 23 Jul 2014 14:51:06 +0000 Subject: [PATCH 043/354] HDFS-6733. Creating encryption zone results in NPE when KeyProvider is null. (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612843 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 +++ .../hdfs/server/namenode/FSNamesystem.java | 5 ++++ .../hadoop/hdfs/TestEncryptionZones.java | 26 +++++++++++++++++-- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 7170acb225a..c447d49b52c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -59,3 +59,6 @@ fs-encryption (Unreleased) OPTIMIZATIONS BUG FIXES + + HDFS-6733. Creating encryption zone results in NPE when + KeyProvider is null. (clamb) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 31a89bfde2e..9b7cb199583 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8448,6 +8448,11 @@ void createEncryptionZone(final String src, String keyNameArg) String keyName = keyNameArg; boolean success = false; try { + if (provider == null) { + throw new IOException( + "Can't create an encryption zone for " + src + + " since no key provider is available."); + } if (keyName == null || keyName.isEmpty()) { keyName = UUID.randomUUID().toString(); createNewKey(keyName, src); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 7b268ed16d4..421396bdf38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -68,6 +68,7 @@ public class TestEncryptionZones { private MiniDFSCluster cluster; private HdfsAdmin dfsAdmin; private DistributedFileSystem fs; + private File testRootDir; protected FileSystemTestWrapper fsWrapper; protected FileContextTestWrapper fcWrapper; @@ -78,14 +79,14 @@ public void setup() throws IOException { fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); - File testRootDir = new File(testRoot).getAbsoluteFile(); + testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" ); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); - fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); + fsWrapper = new FileSystemTestWrapper(fs); fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); @@ -429,4 +430,25 @@ public void testCipherSuiteNegotiation() throws Exception { } } + @Test(timeout = 120000) + public void testCreateEZWithNoProvider() throws Exception { + + final Configuration clusterConf = cluster.getConfiguration(0); + clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ""); + cluster.restartNameNode(true); + /* Test failure of create EZ on a directory that doesn't exist. */ + final Path zone1 = new Path("/zone1"); + /* Normal creation of an EZ */ + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + try { + dfsAdmin.createEncryptionZone(zone1, null); + fail("expected exception"); + } catch (IOException e) { + assertExceptionContains("since no key provider is available", e); + } + clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" + ); + cluster.restartNameNode(true); + } } From 512b756973f31359510b8af01bb26c9ddbe06ed9 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Wed, 23 Jul 2014 15:23:03 +0000 Subject: [PATCH 044/354] HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in EZManager#createEncryptionZone. (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612849 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt | 3 +++ .../hadoop/hdfs/server/namenode/EncryptionZoneManager.java | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index c447d49b52c..e91ce99acb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -56,6 +56,9 @@ fs-encryption (Unreleased) HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang) + HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in + EZManager#createEncryptionZone. (clamb) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index f40d315676a..7b1331d1496 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -227,7 +227,6 @@ XAttr createEncryptionZone(String src, String keyName) // updating the xattr will call addEncryptionZone, // done this way to handle edit log loading dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE)); - ezi = getEncryptionZoneForPath(srcIIP); return ezXAttr; } From 552b4fb9f9a76b18605322c0b0e8072613d67773 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 23 Jul 2014 19:26:29 +0000 Subject: [PATCH 045/354] Merge from trunk to branch git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612928 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 21 +++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 + .../apache/hadoop/hdfs/DFSOutputStream.java | 50 +++--- .../datatransfer/DataTransferProtocol.java | 21 ++- .../hdfs/protocol/datatransfer/Receiver.java | 11 +- .../hdfs/protocol/datatransfer/Sender.java | 11 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 67 +++++--- .../hadoop/hdfs/server/balancer/Balancer.java | 6 +- .../BlockPlacementPolicyDefault.java | 18 ++- .../BlockPlacementPolicyWithNodeGroup.java | 7 +- .../blockmanagement/DatanodeManager.java | 7 +- .../hdfs/server/datanode/BPOfferService.java | 3 +- .../hdfs/server/datanode/BlockReceiver.java | 10 +- .../hadoop/hdfs/server/datanode/DataNode.java | 150 ++++++++++++++---- .../hdfs/server/datanode/DataXceiver.java | 25 +-- .../datanode/fsdataset/FsDatasetSpi.java | 9 +- .../fsdataset/impl/FsDatasetImpl.java | 74 ++++++--- .../datanode/fsdataset/impl/FsVolumeList.java | 21 ++- .../hdfs/server/namenode/FSNamesystem.java | 27 +++- .../hdfs/server/protocol/BlockCommand.java | 15 +- .../hdfs/tools/DFSZKFailoverController.java | 5 + .../src/main/proto/DatanodeProtocol.proto | 1 + .../src/main/proto/datatransfer.proto | 4 + .../hadoop-hdfs/src/main/proto/hdfs.proto | 7 + .../src/main/resources/hdfs-default.xml | 13 ++ .../apt/HDFSHighAvailabilityWithQJM.apt.vm | 4 +- .../src/site/apt/HdfsMultihoming.apt.vm | 145 +++++++++++++++++ .../org/apache/hadoop/hdfs/DFSTestUtil.java | 7 +- .../hadoop/hdfs/TestDataTransferProtocol.java | 54 +++---- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 4 +- .../server/datanode/BlockReportTestBase.java | 5 +- .../server/datanode/SimulatedFSDataset.java | 10 +- .../server/datanode/TestBlockRecovery.java | 6 +- .../server/datanode/TestBlockReplacement.java | 4 +- .../hdfs/server/datanode/TestDiskError.java | 5 +- .../datanode/TestSimulatedFSDataset.java | 4 +- .../fsdataset/impl/TestWriteToReplica.java | 27 ++-- .../namenode/TestFavoredNodesEndToEnd.java | 62 +++++--- .../hadoop/net/TestNetworkTopology.java | 45 ++++-- 39 files changed, 736 insertions(+), 232 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsMultihoming.apt.vm diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 37878727536..8c172bf03cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -307,6 +307,13 @@ Release 2.6.0 - UNRELEASED HDFS-6616. Add exclude-datanodes feature to WebHDFS redirection so that it will not redirect retries to the same datanode. (zhaoyunjiong via szetszwo) + HDFS-6702. Change DFSClient to pass the StorageType from the namenode to + datanodes and change datanode to write block replicas using the specified + storage type. (szetszwo) + + HDFS-6701. Make seed optional in NetworkTopology#sortByDistance. + (Ashwin Shankar via wang) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) @@ -344,6 +351,12 @@ Release 2.6.0 - UNRELEASED HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9) + HDFS-6704. Fix the command to launch JournalNode in HDFS-HA document. + (Akira AJISAKA via jing9) + + HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause + a null pointer exception. (Masatake Iwasaki via brandonli) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -589,6 +602,11 @@ Release 2.5.0 - UNRELEASED HDFS-6493. Change dfs.namenode.startup.delay.block.deletion to second instead of millisecond. (Juan Yu via wang) + HDFS-6680. BlockPlacementPolicyDefault does not choose favored nodes + correctly. (szetszwo) + + HDFS-6712. Document HDFS Multihoming Settings. (Arpit Agarwal) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) @@ -871,6 +889,9 @@ Release 2.5.0 - UNRELEASED HDFS-6378. NFS registration should timeout instead of hanging when portmap/rpcbind is not available (Abhiraj Butala via brandonli) + HDFS-6703. NFS: Files can be deleted from a read-only mount + (Srikanth Upputuri via brandonli) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 2c0bdb0794f..c16c15d34b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -214,6 +214,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version"; public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT"; + public static final String DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK = "dfs.namenode.randomize-block-locations-per-block"; + public static final boolean DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT = false; + public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index d161eff4968..8ee66352689 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -316,6 +316,7 @@ class DataStreamer extends Daemon { private DataInputStream blockReplyStream; private ResponseProcessor response = null; private volatile DatanodeInfo[] nodes = null; // list of targets for current block + private volatile StorageType[] storageTypes = null; private volatile String[] storageIDs = null; private final LoadingCache excludedNodes = CacheBuilder.newBuilder() @@ -420,10 +421,12 @@ private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat, } private void setPipeline(LocatedBlock lb) { - setPipeline(lb.getLocations(), lb.getStorageIDs()); + setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs()); } - private void setPipeline(DatanodeInfo[] nodes, String[] storageIDs) { + private void setPipeline(DatanodeInfo[] nodes, StorageType[] storageTypes, + String[] storageIDs) { this.nodes = nodes; + this.storageTypes = storageTypes; this.storageIDs = storageIDs; } @@ -449,7 +452,7 @@ private void endBlock() { this.setName("DataStreamer for file " + src); closeResponder(); closeStream(); - setPipeline(null, null); + setPipeline(null, null, null); stage = BlockConstructionStage.PIPELINE_SETUP_CREATE; } @@ -1034,10 +1037,12 @@ private void addDatanode2ExistingPipeline() throws IOException { //transfer replica final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1]; final DatanodeInfo[] targets = {nodes[d]}; - transfer(src, targets, lb.getBlockToken()); + final StorageType[] targetStorageTypes = {storageTypes[d]}; + transfer(src, targets, targetStorageTypes, lb.getBlockToken()); } private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets, + final StorageType[] targetStorageTypes, final Token blockToken) throws IOException { //transfer replica to the new datanode Socket sock = null; @@ -1059,7 +1064,7 @@ private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets, //send the TRANSFER_BLOCK request new Sender(out).transferBlock(block, blockToken, dfsClient.clientName, - targets); + targets, targetStorageTypes); out.flush(); //ack @@ -1138,16 +1143,15 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException { failed.add(nodes[errorIndex]); DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1]; - System.arraycopy(nodes, 0, newnodes, 0, errorIndex); - System.arraycopy(nodes, errorIndex+1, newnodes, errorIndex, - newnodes.length-errorIndex); + arraycopy(nodes, newnodes, errorIndex); + + final StorageType[] newStorageTypes = new StorageType[newnodes.length]; + arraycopy(storageTypes, newStorageTypes, errorIndex); final String[] newStorageIDs = new String[newnodes.length]; - System.arraycopy(storageIDs, 0, newStorageIDs, 0, errorIndex); - System.arraycopy(storageIDs, errorIndex+1, newStorageIDs, errorIndex, - newStorageIDs.length-errorIndex); + arraycopy(storageIDs, newStorageIDs, errorIndex); - setPipeline(newnodes, newStorageIDs); + setPipeline(newnodes, newStorageTypes, newStorageIDs); // Just took care of a node error while waiting for a node restart if (restartingNodeIndex >= 0) { @@ -1184,7 +1188,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException { // set up the pipeline again with the remaining nodes if (failPacket) { // for testing - success = createBlockOutputStream(nodes, newGS, isRecovery); + success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery); failPacket = false; try { // Give DNs time to send in bad reports. In real situations, @@ -1193,7 +1197,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException { Thread.sleep(2000); } catch (InterruptedException ie) {} } else { - success = createBlockOutputStream(nodes, newGS, isRecovery); + success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery); } if (restartingNodeIndex >= 0) { @@ -1245,6 +1249,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException { private LocatedBlock nextBlockOutputStream() throws IOException { LocatedBlock lb = null; DatanodeInfo[] nodes = null; + StorageType[] storageTypes = null; int count = dfsClient.getConf().nBlockWriteRetry; boolean success = false; ExtendedBlock oldBlock = block; @@ -1267,11 +1272,12 @@ private LocatedBlock nextBlockOutputStream() throws IOException { bytesSent = 0; accessToken = lb.getBlockToken(); nodes = lb.getLocations(); + storageTypes = lb.getStorageTypes(); // // Connect to first DataNode in the list. // - success = createBlockOutputStream(nodes, 0L, false); + success = createBlockOutputStream(nodes, storageTypes, 0L, false); if (!success) { DFSClient.LOG.info("Abandoning " + block); @@ -1292,8 +1298,8 @@ private LocatedBlock nextBlockOutputStream() throws IOException { // connects to the first datanode in the pipeline // Returns true if success, otherwise return failure. // - private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS, - boolean recoveryFlag) { + private boolean createBlockOutputStream(DatanodeInfo[] nodes, + StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) { if (nodes.length == 0) { DFSClient.LOG.info("nodes are empty for write pipeline of block " + block); @@ -1335,9 +1341,10 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS, // Xmit header info to datanode // + BlockConstructionStage bcs = recoveryFlag? stage.getRecoveryStage(): stage; // send the request - new Sender(out).writeBlock(block, accessToken, dfsClient.clientName, - nodes, null, recoveryFlag? stage.getRecoveryStage() : stage, + new Sender(out).writeBlock(block, nodeStorageTypes[0], accessToken, + dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, nodes.length, block.getNumBytes(), bytesSent, newGS, checksum, cachingStrategy.get()); @@ -2203,4 +2210,9 @@ ExtendedBlock getBlock() { public long getFileId() { return fileId; } + + private static void arraycopy(T[] srcs, T[] dsts, int skipIndex) { + System.arraycopy(srcs, 0, dsts, 0, skipIndex); + System.arraycopy(srcs, skipIndex+1, dsts, skipIndex, dsts.length-skipIndex); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java index d620c6b502b..d54d5bed002 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; @@ -71,11 +72,20 @@ public void readBlock(final ExtendedBlock blk, /** * Write a block to a datanode pipeline. - * + * The receiver datanode of this call is the next datanode in the pipeline. + * The other downstream datanodes are specified by the targets parameter. + * Note that the receiver {@link DatanodeInfo} is not required in the + * parameter list since the receiver datanode knows its info. However, the + * {@link StorageType} for storing the replica in the receiver datanode is a + * parameter since the receiver datanode may support multiple storage types. + * * @param blk the block being written. + * @param storageType for storing the replica in the receiver datanode. * @param blockToken security token for accessing the block. * @param clientName client's name. - * @param targets target datanodes in the pipeline. + * @param targets other downstream datanodes in the pipeline. + * @param targetStorageTypes target {@link StorageType}s corresponding + * to the target datanodes. * @param source source datanode. * @param stage pipeline stage. * @param pipelineSize the size of the pipeline. @@ -84,9 +94,11 @@ public void readBlock(final ExtendedBlock blk, * @param latestGenerationStamp the latest generation stamp of the block. */ public void writeBlock(final ExtendedBlock blk, + final StorageType storageType, final Token blockToken, final String clientName, final DatanodeInfo[] targets, + final StorageType[] targetStorageTypes, final DatanodeInfo source, final BlockConstructionStage stage, final int pipelineSize, @@ -110,7 +122,8 @@ public void writeBlock(final ExtendedBlock blk, public void transferBlock(final ExtendedBlock blk, final Token blockToken, final String clientName, - final DatanodeInfo[] targets) throws IOException; + final DatanodeInfo[] targets, + final StorageType[] targetStorageTypes) throws IOException; /** * Request short circuit access file descriptors from a DataNode. @@ -148,11 +161,13 @@ public void requestShortCircuitFds(final ExtendedBlock blk, * It is used for balancing purpose. * * @param blk the block being replaced. + * @param storageType the {@link StorageType} for storing the block. * @param blockToken security token for accessing the block. * @param delHint the hint for deleting the block in the original datanode. * @param source the source datanode for receiving the block. */ public void replaceBlock(final ExtendedBlock blk, + final StorageType storageType, final Token blockToken, final String delHint, final DatanodeInfo source) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java index 5a80f689da3..a09437c0b0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto; @@ -121,10 +122,13 @@ private void opReadBlock() throws IOException { /** Receive OP_WRITE_BLOCK */ private void opWriteBlock(DataInputStream in) throws IOException { final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in)); + final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList()); writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), + PBHelper.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), - PBHelper.convert(proto.getTargetsList()), + targets, + PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length), PBHelper.convert(proto.getSource()), fromProto(proto.getStage()), proto.getPipelineSize(), @@ -140,10 +144,12 @@ private void opWriteBlock(DataInputStream in) throws IOException { private void opTransferBlock(DataInputStream in) throws IOException { final OpTransferBlockProto proto = OpTransferBlockProto.parseFrom(vintPrefixed(in)); + final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList()); transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), - PBHelper.convert(proto.getTargetsList())); + targets, + PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length)); } /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */ @@ -176,6 +182,7 @@ private void opRequestShortCircuitShm(DataInputStream in) throws IOException { private void opReplaceBlock(DataInputStream in) throws IOException { OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in)); replaceBlock(PBHelper.convert(proto.getHeader().getBlock()), + PBHelper.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getToken()), proto.getDelHint(), PBHelper.convert(proto.getSource())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java index c03f33fb276..68da52399c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto; @@ -111,9 +112,11 @@ public void readBlock(final ExtendedBlock blk, @Override public void writeBlock(final ExtendedBlock blk, + final StorageType storageType, final Token blockToken, final String clientName, final DatanodeInfo[] targets, + final StorageType[] targetStorageTypes, final DatanodeInfo source, final BlockConstructionStage stage, final int pipelineSize, @@ -130,7 +133,9 @@ public void writeBlock(final ExtendedBlock blk, OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder() .setHeader(header) + .setStorageType(PBHelper.convertStorageType(storageType)) .addAllTargets(PBHelper.convert(targets, 1)) + .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1)) .setStage(toProto(stage)) .setPipelineSize(pipelineSize) .setMinBytesRcvd(minBytesRcvd) @@ -150,12 +155,14 @@ public void writeBlock(final ExtendedBlock blk, public void transferBlock(final ExtendedBlock blk, final Token blockToken, final String clientName, - final DatanodeInfo[] targets) throws IOException { + final DatanodeInfo[] targets, + final StorageType[] targetStorageTypes) throws IOException { OpTransferBlockProto proto = OpTransferBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader( blk, clientName, blockToken)) .addAllTargets(PBHelper.convert(targets)) + .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes)) .build(); send(out, Op.TRANSFER_BLOCK, proto); @@ -196,11 +203,13 @@ public void requestShortCircuitShm(String clientName) throws IOException { @Override public void replaceBlock(final ExtendedBlock blk, + final StorageType storageType, final Token blockToken, final String delHint, final DatanodeInfo source) throws IOException { OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) + .setStorageType(PBHelper.convertStorageType(storageType)) .setDelHint(delHint) .setSource(PBHelper.convertDatanodeInfo(source)) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index edbd284dfc4..bdf2f06b420 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -155,6 +155,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; @@ -679,14 +680,8 @@ public static LocatedBlock convert(LocatedBlockProto proto) { targets[i] = PBHelper.convert(locs.get(i)); } - final int storageTypesCount = proto.getStorageTypesCount(); - final StorageType[] storageTypes; - if (storageTypesCount == 0) { - storageTypes = null; - } else { - Preconditions.checkState(storageTypesCount == locs.size()); - storageTypes = convertStorageTypeProtos(proto.getStorageTypesList()); - } + final StorageType[] storageTypes = convertStorageTypes( + proto.getStorageTypesList(), locs.size()); final int storageIDsCount = proto.getStorageIDsCount(); final String[] storageIDs; @@ -974,6 +969,20 @@ public static BlockCommand convert(BlockCommandProto blkCmd) { targets[i] = PBHelper.convert(targetList.get(i)); } + StorageType[][] targetStorageTypes = new StorageType[targetList.size()][]; + List targetStorageTypesList = blkCmd.getTargetStorageTypesList(); + if (targetStorageTypesList.isEmpty()) { // missing storage types + for(int i = 0; i < targetStorageTypes.length; i++) { + targetStorageTypes[i] = new StorageType[targets[i].length]; + Arrays.fill(targetStorageTypes[i], StorageType.DEFAULT); + } + } else { + for(int i = 0; i < targetStorageTypes.length; i++) { + List p = targetStorageTypesList.get(i).getStorageTypesList(); + targetStorageTypes[i] = p.toArray(new StorageType[p.size()]); + } + } + List targetStorageUuidsList = blkCmd.getTargetStorageUuidsList(); String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][]; for(int i = 0; i < targetStorageIDs.length; i++) { @@ -996,7 +1005,7 @@ public static BlockCommand convert(BlockCommandProto blkCmd) { throw new AssertionError("Unknown action type: " + blkCmd.getAction()); } return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets, - targetStorageIDs); + targetStorageTypes, targetStorageIDs); } public static BlockIdCommand convert(BlockIdCommandProto blkIdCmd) { @@ -1620,8 +1629,25 @@ private static StorageState convertState(State state) { } } - private static StorageTypeProto convertStorageType( - StorageType type) { + public static List convertStorageTypes( + StorageType[] types) { + return convertStorageTypes(types, 0); + } + + public static List convertStorageTypes( + StorageType[] types, int startIdx) { + if (types == null) { + return null; + } + final List protos = new ArrayList( + types.length); + for (int i = startIdx; i < types.length; ++i) { + protos.add(convertStorageType(types[i])); + } + return protos; + } + + public static StorageTypeProto convertStorageType(StorageType type) { switch(type) { case DISK: return StorageTypeProto.DISK; @@ -1636,7 +1662,7 @@ private static StorageTypeProto convertStorageType( public static DatanodeStorage convert(DatanodeStorageProto s) { return new DatanodeStorage(s.getStorageUuid(), PBHelper.convertState(s.getState()), - PBHelper.convertType(s.getStorageType())); + PBHelper.convertStorageType(s.getStorageType())); } private static State convertState(StorageState state) { @@ -1649,7 +1675,7 @@ private static State convertState(StorageState state) { } } - private static StorageType convertType(StorageTypeProto type) { + public static StorageType convertStorageType(StorageTypeProto type) { switch(type) { case DISK: return StorageType.DISK; @@ -1661,11 +1687,16 @@ private static StorageType convertType(StorageTypeProto type) { } } - private static StorageType[] convertStorageTypeProtos( - List storageTypesList) { - final StorageType[] storageTypes = new StorageType[storageTypesList.size()]; - for (int i = 0; i < storageTypes.length; ++i) { - storageTypes[i] = PBHelper.convertType(storageTypesList.get(i)); + public static StorageType[] convertStorageTypes( + List storageTypesList, int expectedSize) { + final StorageType[] storageTypes = new StorageType[expectedSize]; + if (storageTypesList.size() != expectedSize) { // missing storage types + Preconditions.checkState(storageTypesList.isEmpty()); + Arrays.fill(storageTypes, StorageType.DEFAULT); + } else { + for (int i = 0; i < storageTypes.length; ++i) { + storageTypes[i] = convertStorageType(storageTypesList.get(i)); + } } return storageTypes; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 7482b2fcdce..5dbdd643cdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -368,7 +369,7 @@ private void dispatch() { in = new DataInputStream(new BufferedInputStream(unbufIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); - sendRequest(out, eb, accessToken); + sendRequest(out, eb, StorageType.DEFAULT, accessToken); receiveResponse(in); bytesMoved.addAndGet(block.getNumBytes()); LOG.info("Successfully moved " + this); @@ -400,8 +401,9 @@ private void dispatch() { /* Send a block replace request to the output stream*/ private void sendRequest(DataOutputStream out, ExtendedBlock eb, + StorageType storageType, Token accessToken) throws IOException { - new Sender(out).replaceBlock(eb, accessToken, + new Sender(out).replaceBlock(eb, storageType, accessToken, source.getStorageID(), proxySource.getDatanode()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index accdddf5250..e2026c1dfbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -145,14 +145,14 @@ DatanodeStorageInfo[] chooseTarget(String src, List results = new ArrayList(); boolean avoidStaleNodes = stats != null && stats.isAvoidingStaleDataNodesForWrite(); - for (int i = 0; i < Math.min(favoredNodes.size(), numOfReplicas); i++) { + for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas; i++) { DatanodeDescriptor favoredNode = favoredNodes.get(i); // Choose a single node which is local to favoredNode. // 'results' is updated within chooseLocalNode final DatanodeStorageInfo target = chooseLocalStorage(favoredNode, favoriteAndExcludedNodes, blocksize, getMaxNodesPerRack(results.size(), numOfReplicas)[1], - results, avoidStaleNodes, storageType); + results, avoidStaleNodes, storageType, false); if (target == null) { LOG.warn("Could not find a target for file " + src + " with favored node " + favoredNode); @@ -271,7 +271,7 @@ private Node chooseTarget(int numOfReplicas, try { if (numOfResults == 0) { writer = chooseLocalStorage(writer, excludedNodes, blocksize, - maxNodesPerRack, results, avoidStaleNodes, storageType) + maxNodesPerRack, results, avoidStaleNodes, storageType, true) .getDatanodeDescriptor(); if (--numOfReplicas == 0) { return writer; @@ -345,12 +345,14 @@ protected DatanodeStorageInfo chooseLocalStorage(Node localMachine, int maxNodesPerRack, List results, boolean avoidStaleNodes, - StorageType storageType) + StorageType storageType, + boolean fallbackToLocalRack) throws NotEnoughReplicasException { // if no local machine, randomly choose one node - if (localMachine == null) + if (localMachine == null) { return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageType); + } if (preferLocalNode && localMachine instanceof DatanodeDescriptor) { DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine; // otherwise try local machine first @@ -363,7 +365,11 @@ protected DatanodeStorageInfo chooseLocalStorage(Node localMachine, } } } - } + } + + if (!fallbackToLocalRack) { + return null; + } // try a node on local rack return chooseLocalRack(localMachine, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageType); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java index 1069b4e3d75..b3ff6b9b1f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java @@ -70,7 +70,8 @@ public void initialize(Configuration conf, FSClusterStats stats, protected DatanodeStorageInfo chooseLocalStorage(Node localMachine, Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes, - StorageType storageType) throws NotEnoughReplicasException { + StorageType storageType, boolean fallbackToLocalRack + ) throws NotEnoughReplicasException { // if no local machine, randomly choose one node if (localMachine == null) return chooseRandom(NodeBase.ROOT, excludedNodes, @@ -97,6 +98,10 @@ protected DatanodeStorageInfo chooseLocalStorage(Node localMachine, if (chosenStorage != null) { return chosenStorage; } + + if (!fallbackToLocalRack) { + return null; + } // try a node on local rack return chooseLocalRack(localMachine, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageType); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 791c6dff5be..69b2b695415 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -345,7 +345,8 @@ private boolean isInactive(DatanodeInfo datanode) { /** Sort the located blocks by the distance to the target host. */ public void sortLocatedBlocks(final String targethost, - final List locatedblocks) { + final List locatedblocks, + boolean randomizeBlockLocationsPerBlock) { //sort the blocks // As it is possible for the separation of node manager and datanode, // here we should get node but not datanode only . @@ -372,8 +373,8 @@ public void sortLocatedBlocks(final String targethost, --lastActiveIndex; } int activeLen = lastActiveIndex + 1; - networktopology.sortByDistance(client, b.getLocations(), activeLen, - b.getBlock().getBlockId()); + networktopology.sortByDistance(client, b.getLocations(), activeLen, b + .getBlock().getBlockId(), randomizeBlockLocationsPerBlock); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index cab61364ed2..0a6549de8f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -575,7 +575,8 @@ private boolean processCommandFromActive(DatanodeCommand cmd, switch(cmd.getAction()) { case DatanodeProtocol.DNA_TRANSFER: // Send a copy of a block to another datanode - dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets()); + dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), + bcmd.getTargets(), bcmd.getTargetStorageTypes()); dn.metrics.incrBlocksReplicated(bcmd.getBlocks().length); break; case DatanodeProtocol.DNA_INVALIDATE: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 3d9ccdcca90..2f3909ba564 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -37,6 +37,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSOutputSummer; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -122,7 +123,8 @@ class BlockReceiver implements Closeable { private boolean syncOnClose; private long restartBudget; - BlockReceiver(final ExtendedBlock block, final DataInputStream in, + BlockReceiver(final ExtendedBlock block, final StorageType storageType, + final DataInputStream in, final String inAddr, final String myAddr, final BlockConstructionStage stage, final long newGs, final long minBytesRcvd, final long maxBytesRcvd, @@ -162,11 +164,11 @@ class BlockReceiver implements Closeable { // Open local disk out // if (isDatanode) { //replication or move - replicaInfo = datanode.data.createTemporary(block); + replicaInfo = datanode.data.createTemporary(storageType, block); } else { switch (stage) { case PIPELINE_SETUP_CREATE: - replicaInfo = datanode.data.createRbw(block); + replicaInfo = datanode.data.createRbw(storageType, block); datanode.notifyNamenodeReceivingBlock( block, replicaInfo.getStorageUuid()); break; @@ -198,7 +200,7 @@ class BlockReceiver implements Closeable { case TRANSFER_RBW: case TRANSFER_FINALIZED: // this is a transfer destination - replicaInfo = datanode.data.createTemporary(block); + replicaInfo = datanode.data.createTemporary(storageType, block); break; default: throw new IOException("Unsupported stage " + stage + " while receiving block " + block + " from " + inAddr); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 5f32e29cd29..b55abed7e46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -19,11 +19,66 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY; +import static org.apache.hadoop.util.ExitUtil.terminate; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.protobuf.BlockingService; +import java.io.BufferedOutputStream; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.PrintStream; +import java.lang.management.ManagementFactory; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.channels.SocketChannel; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.management.ObjectName; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -39,10 +94,23 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.net.DomainPeerServer; import org.apache.hadoop.hdfs.net.TcpPeerServer; -import org.apache.hadoop.hdfs.protocol.*; -import org.apache.hadoop.hdfs.protocol.datatransfer.*; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; +import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; +import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; +import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer; @@ -50,9 +118,20 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService; -import org.apache.hadoop.hdfs.protocolPB.*; -import org.apache.hadoop.hdfs.security.token.block.*; +import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; +import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; +import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; +import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; @@ -65,7 +144,11 @@ import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; -import org.apache.hadoop.hdfs.server.protocol.*; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.Param; import org.apache.hadoop.http.HttpConfig; @@ -88,22 +171,21 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.util.*; +import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.JvmPauseMonitor; +import org.apache.hadoop.util.ServicePlugin; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; -import javax.management.ObjectName; - -import java.io.*; -import java.lang.management.ManagementFactory; -import java.net.*; -import java.nio.channels.SocketChannel; -import java.security.PrivilegedExceptionAction; -import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import static org.apache.hadoop.util.ExitUtil.terminate; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.protobuf.BlockingService; /********************************************************** * DataNode is a class (and program) that stores a set of @@ -1475,8 +1557,8 @@ int getXmitsInProgress() { return xmitsInProgress.get(); } - private void transferBlock(ExtendedBlock block, DatanodeInfo xferTargets[]) - throws IOException { + private void transferBlock(ExtendedBlock block, DatanodeInfo[] xferTargets, + StorageType[] xferTargetStorageTypes) throws IOException { BPOfferService bpos = getBPOSForBlock(block); DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId()); @@ -1512,16 +1594,17 @@ private void transferBlock(ExtendedBlock block, DatanodeInfo xferTargets[]) LOG.info(bpReg + " Starting thread to transfer " + block + " to " + xfersBuilder); - new Daemon(new DataTransfer(xferTargets, block, + new Daemon(new DataTransfer(xferTargets, xferTargetStorageTypes, block, BlockConstructionStage.PIPELINE_SETUP_CREATE, "")).start(); } } void transferBlocks(String poolId, Block blocks[], - DatanodeInfo xferTargets[][]) { + DatanodeInfo xferTargets[][], StorageType[][] xferTargetStorageTypes) { for (int i = 0; i < blocks.length; i++) { try { - transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i]); + transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i], + xferTargetStorageTypes[i]); } catch (IOException ie) { LOG.warn("Failed to transfer block " + blocks[i], ie); } @@ -1624,6 +1707,7 @@ CHECKSUM_SIZE depends on CHECKSUM_TYPE (usually, 4 for CRC32) */ private class DataTransfer implements Runnable { final DatanodeInfo[] targets; + final StorageType[] targetStorageTypes; final ExtendedBlock b; final BlockConstructionStage stage; final private DatanodeRegistration bpReg; @@ -1634,7 +1718,8 @@ private class DataTransfer implements Runnable { * Connect to the first item in the target list. Pass along the * entire target list, the block, and the data. */ - DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage, + DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes, + ExtendedBlock b, BlockConstructionStage stage, final String clientname) { if (DataTransferProtocol.LOG.isDebugEnabled()) { DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": " @@ -1644,6 +1729,7 @@ private class DataTransfer implements Runnable { + ", targests=" + Arrays.asList(targets)); } this.targets = targets; + this.targetStorageTypes = targetStorageTypes; this.b = b; this.stage = stage; BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId()); @@ -1702,7 +1788,8 @@ public void run() { false, false, true, DataNode.this, null, cachingStrategy); DatanodeInfo srcNode = new DatanodeInfo(bpReg); - new Sender(out).writeBlock(b, accessToken, clientname, targets, srcNode, + new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken, + clientname, targets, targetStorageTypes, srcNode, stage, 0, 0, 0, 0, blockSender.getChecksum(), cachingStrategy); // send data & checksum @@ -2403,7 +2490,8 @@ private void checkReadAccess(final ExtendedBlock block) throws IOException { * @param client client name */ void transferReplicaForPipelineRecovery(final ExtendedBlock b, - final DatanodeInfo[] targets, final String client) throws IOException { + final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, + final String client) throws IOException { final long storedGS; final long visible; final BlockConstructionStage stage; @@ -2436,7 +2524,7 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b, b.setNumBytes(visible); if (targets.length > 0) { - new DataTransfer(targets, b, stage, client).run(); + new DataTransfer(targets, targetStorageTypes, b, stage, client).run(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 7b730905156..5ef6cc7ee22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -45,6 +45,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.hdfs.ExtendedBlockId; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -524,9 +525,11 @@ public void readBlock(final ExtendedBlock block, @Override public void writeBlock(final ExtendedBlock block, + final StorageType storageType, final Token blockToken, final String clientname, final DatanodeInfo[] targets, + final StorageType[] targetStorageTypes, final DatanodeInfo srcDataNode, final BlockConstructionStage stage, final int pipelineSize, @@ -590,12 +593,13 @@ public void writeBlock(final ExtendedBlock block, if (isDatanode || stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) { // open a block receiver - blockReceiver = new BlockReceiver(block, in, + blockReceiver = new BlockReceiver(block, storageType, in, peer.getRemoteAddressString(), peer.getLocalAddressString(), stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, clientname, srcDataNode, datanode, requestedChecksum, cachingStrategy); + storageUuid = blockReceiver.getStorageUuid(); } else { storageUuid = datanode.data.recoverClose( @@ -636,10 +640,10 @@ public void writeBlock(final ExtendedBlock block, HdfsConstants.SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(unbufMirrorIn); - new Sender(mirrorOut).writeBlock(originalBlock, blockToken, - clientname, targets, srcDataNode, stage, pipelineSize, - minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, - cachingStrategy); + new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], + blockToken, clientname, targets, targetStorageTypes, srcDataNode, + stage, pipelineSize, minBytesRcvd, maxBytesRcvd, + latestGenerationStamp, requestedChecksum, cachingStrategy); mirrorOut.flush(); @@ -754,7 +758,8 @@ public void writeBlock(final ExtendedBlock block, public void transferBlock(final ExtendedBlock blk, final Token blockToken, final String clientName, - final DatanodeInfo[] targets) throws IOException { + final DatanodeInfo[] targets, + final StorageType[] targetStorageTypes) throws IOException { checkAccess(socketOut, true, blk, blockToken, Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY); previousOpClientName = clientName; @@ -763,7 +768,8 @@ public void transferBlock(final ExtendedBlock blk, final DataOutputStream out = new DataOutputStream( getOutputStream()); try { - datanode.transferReplicaForPipelineRecovery(blk, targets, clientName); + datanode.transferReplicaForPipelineRecovery(blk, targets, + targetStorageTypes, clientName); writeResponse(Status.SUCCESS, null, out); } finally { IOUtils.closeStream(out); @@ -941,6 +947,7 @@ public void copyBlock(final ExtendedBlock block, @Override public void replaceBlock(final ExtendedBlock block, + final StorageType storageType, final Token blockToken, final String delHint, final DatanodeInfo proxySource) throws IOException { @@ -1026,8 +1033,8 @@ public void replaceBlock(final ExtendedBlock block, DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto( checksumInfo.getChecksum()); // open a block receiver and check if the block does not exist - blockReceiver = new BlockReceiver( - block, proxyReply, proxySock.getRemoteSocketAddress().toString(), + blockReceiver = new BlockReceiver(block, storageType, + proxyReply, proxySock.getRemoteSocketAddress().toString(), proxySock.getLocalSocketAddress().toString(), null, 0, 0, 0, "", null, datanode, remoteChecksum, CachingStrategy.newDropBehind()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index 8eb083a93f8..5e4f55e7339 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; @@ -176,8 +177,8 @@ public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff, * @return the meta info of the replica which is being written to * @throws IOException if an error occurs */ - public ReplicaInPipelineInterface createTemporary(ExtendedBlock b - ) throws IOException; + public ReplicaInPipelineInterface createTemporary(StorageType storageType, + ExtendedBlock b) throws IOException; /** * Creates a RBW replica and returns the meta info of the replica @@ -186,8 +187,8 @@ public ReplicaInPipelineInterface createTemporary(ExtendedBlock b * @return the meta info of the replica which is being written to * @throws IOException if an error occurs */ - public ReplicaInPipelineInterface createRbw(ExtendedBlock b - ) throws IOException; + public ReplicaInPipelineInterface createRbw(StorageType storageType, + ExtendedBlock b) throws IOException; /** * Recovers a RBW replica and returns the meta info of the replica diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 40d3e81ad62..b068c664fe3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -17,6 +17,28 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; +import java.io.File; +import java.io.FileDescriptor; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executor; + +import javax.management.NotCompliantMBeanException; +import javax.management.ObjectName; +import javax.management.StandardMBean; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -24,12 +46,37 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.StorageType; -import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; +import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.Storage; -import org.apache.hadoop.hdfs.server.datanode.*; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.*; +import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; +import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.datanode.DataStorage; +import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; +import org.apache.hadoop.hdfs.server.datanode.Replica; +import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException; +import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; +import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; +import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; +import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException; +import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; +import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered; +import org.apache.hadoop.hdfs.server.datanode.StorageLocation; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; @@ -43,15 +90,6 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; -import javax.management.NotCompliantMBeanException; -import javax.management.ObjectName; -import javax.management.StandardMBean; -import java.io.*; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.*; -import java.util.concurrent.Executor; - /************************************************** * FSDataset manages a set of data blocks. Each block * has a unique name and an extent on disk. @@ -736,8 +774,8 @@ private void bumpReplicaGS(ReplicaInfo replicaInfo, } @Override // FsDatasetSpi - public synchronized ReplicaInPipeline createRbw(ExtendedBlock b) - throws IOException { + public synchronized ReplicaInPipeline createRbw(StorageType storageType, + ExtendedBlock b) throws IOException { ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId()); if (replicaInfo != null) { @@ -746,7 +784,7 @@ public synchronized ReplicaInPipeline createRbw(ExtendedBlock b) " and thus cannot be created."); } // create a new block - FsVolumeImpl v = volumes.getNextVolume(b.getNumBytes()); + FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes()); // create a rbw file to hold block in the designated volume File f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock()); ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), @@ -874,8 +912,8 @@ public synchronized ReplicaInPipeline convertTemporaryToRbw( } @Override // FsDatasetSpi - public synchronized ReplicaInPipeline createTemporary(ExtendedBlock b) - throws IOException { + public synchronized ReplicaInPipeline createTemporary(StorageType storageType, + ExtendedBlock b) throws IOException { ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId()); if (replicaInfo != null) { throw new ReplicaAlreadyExistsException("Block " + b + @@ -883,7 +921,7 @@ public synchronized ReplicaInPipeline createTemporary(ExtendedBlock b) " and thus cannot be created."); } - FsVolumeImpl v = volumes.getNextVolume(b.getNumBytes()); + FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes()); // create a temporary file to hold block in the designated volume File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock()); ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index 76b3ee8ba9a..59a5c9021cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -18,13 +18,17 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy; -import org.apache.hadoop.util.Time; import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.Time; class FsVolumeList { /** @@ -52,11 +56,18 @@ int numberOfFailedVolumes() { * by a single thread and next volume is chosen with no concurrent * update to {@link #volumes}. * @param blockSize free space needed on the volume + * @param storageType the desired {@link StorageType} * @return next volume to store the block in. */ - // TODO should choose volume with storage type - synchronized FsVolumeImpl getNextVolume(long blockSize) throws IOException { - return blockChooser.chooseVolume(volumes, blockSize); + synchronized FsVolumeImpl getNextVolume(StorageType storageType, + long blockSize) throws IOException { + final List list = new ArrayList(volumes.size()); + for(FsVolumeImpl v : volumes) { + if (v.getStorageType() == storageType) { + list.add(v); + } + } + return blockChooser.chooseVolume(list, blockSize); } long getDfsUsed() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9b7cb199583..9807c4f5b8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -86,6 +86,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT; + import static org.apache.hadoop.util.Time.now; import java.io.BufferedWriter; @@ -549,6 +552,8 @@ private void logAuditEvent(boolean succeeded, private final FSImage fsImage; + private boolean randomizeBlockLocationsPerBlock; + /** * Notify that loading of this FSDirectory is complete, and * it is imageLoaded for use @@ -861,6 +866,10 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { alwaysUseDelegationTokensForTests = conf.getBoolean( DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT); + + this.randomizeBlockLocationsPerBlock = conf.getBoolean( + DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK, + DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT); this.dtSecretManager = createDelegationTokenSecretManager(conf); this.dir = new FSDirectory(this, conf); @@ -1761,17 +1770,17 @@ LocatedBlocks getBlockLocations(String clientMachine, String src, LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true, true); if (blocks != null) { - blockManager.getDatanodeManager().sortLocatedBlocks( - clientMachine, blocks.getLocatedBlocks()); - + blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine, + blocks.getLocatedBlocks(), randomizeBlockLocationsPerBlock); + // lastBlock is not part of getLocatedBlocks(), might need to sort it too LocatedBlock lastBlock = blocks.getLastLocatedBlock(); if (lastBlock != null) { ArrayList lastBlockList = Lists.newArrayListWithCapacity(1); lastBlockList.add(lastBlock); - blockManager.getDatanodeManager().sortLocatedBlocks( - clientMachine, lastBlockList); + blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine, + lastBlockList, randomizeBlockLocationsPerBlock); } } return blocks; @@ -2580,11 +2589,13 @@ private void startFileInternal(FSPermissionChecker pc, String src, // Path is within an EZ and we have provided encryption parameters. // Make sure that the generated EDEK matches the settings of the EZ. String ezKeyName = dir.getKeyName(iip); - if (!ezKeyName.equals(edek.getKeyName())) { + if (!ezKeyName.equals(edek.getEncryptionKeyName())) { throw new RetryStartFileException(); } - feInfo = new FileEncryptionInfo(suite, edek.getEncryptedKey() - .getMaterial(), edek.getIv(), edek.getKeyVersionName()); + feInfo = new FileEncryptionInfo(suite, + edek.getEncryptedKeyVersion().getMaterial(), + edek.getEncryptedKeyIv(), + edek.getEncryptionKeyVersionName()); Preconditions.checkNotNull(feInfo); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java index 1798d664f93..f17d702f483 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; @@ -50,6 +51,7 @@ public class BlockCommand extends DatanodeCommand { final String poolId; final Block[] blocks; final DatanodeInfo[][] targets; + final StorageType[][] targetStorageTypes; final String[][] targetStorageIDs; /** @@ -62,17 +64,20 @@ public BlockCommand(int action, String poolId, this.poolId = poolId; blocks = new Block[blocktargetlist.size()]; targets = new DatanodeInfo[blocks.length][]; + targetStorageTypes = new StorageType[blocks.length][]; targetStorageIDs = new String[blocks.length][]; for(int i = 0; i < blocks.length; i++) { BlockTargetPair p = blocktargetlist.get(i); blocks[i] = p.block; targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets); + targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets); targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets); } } private static final DatanodeInfo[][] EMPTY_TARGET_DATANODES = {}; + private static final StorageType[][] EMPTY_TARGET_STORAGE_TYPES = {}; private static final String[][] EMPTY_TARGET_STORAGEIDS = {}; /** @@ -81,7 +86,7 @@ public BlockCommand(int action, String poolId, */ public BlockCommand(int action, String poolId, Block blocks[]) { this(action, poolId, blocks, EMPTY_TARGET_DATANODES, - EMPTY_TARGET_STORAGEIDS); + EMPTY_TARGET_STORAGE_TYPES, EMPTY_TARGET_STORAGEIDS); } /** @@ -89,11 +94,13 @@ public BlockCommand(int action, String poolId, Block blocks[]) { * @param blocks blocks related to the action */ public BlockCommand(int action, String poolId, Block[] blocks, - DatanodeInfo[][] targets, String[][] targetStorageIDs) { + DatanodeInfo[][] targets, StorageType[][] targetStorageTypes, + String[][] targetStorageIDs) { super(action); this.poolId = poolId; this.blocks = blocks; this.targets = targets; + this.targetStorageTypes = targetStorageTypes; this.targetStorageIDs = targetStorageIDs; } @@ -109,6 +116,10 @@ public DatanodeInfo[][] getTargets() { return targets; } + public StorageType[][] getTargetStorageTypes() { + return targetStorageTypes; + } + public String[][] getTargetStorageIDs() { return targetStorageIDs; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java index 8b7a9956c61..a42b1e31895 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java @@ -122,6 +122,11 @@ public static DFSZKFailoverController create(Configuration conf) { "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); + if (nnId == null) { + String msg = "Could not get the namenode ID of this node. " + + "You may run zkfc on the node other than namenode."; + throw new HadoopIllegalArgumentException(msg); + } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index d6cc3d37777..2afcf057f70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -113,6 +113,7 @@ message BlockCommandProto { repeated BlockProto blocks = 3; repeated DatanodeInfosProto targets = 4; repeated StorageUuidsProto targetStorageUuids = 5; + repeated StorageTypesProto targetStorageTypes = 6; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto index d9147138948..9b4ba339d23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto @@ -107,17 +107,21 @@ message OpWriteBlockProto { */ required ChecksumProto requestedChecksum = 9; optional CachingStrategyProto cachingStrategy = 10; + optional StorageTypeProto storageType = 11 [default = DISK]; + repeated StorageTypeProto targetStorageTypes = 12; } message OpTransferBlockProto { required ClientOperationHeaderProto header = 1; repeated DatanodeInfoProto targets = 2; + repeated StorageTypeProto targetStorageTypes = 3; } message OpReplaceBlockProto { required BaseHeaderProto header = 1; required string delHint = 2; required DatanodeInfoProto source = 3; + optional StorageTypeProto storageType = 4 [default = DISK]; } message OpCopyBlockProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index da53be1006e..2557b48ed30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -136,6 +136,13 @@ enum StorageTypeProto { SSD = 2; } +/** + * A list of storage types. + */ +message StorageTypesProto { + repeated StorageTypeProto storageTypes = 1; +} + /** * A list of storage IDs. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index f80b9f85e51..fea88166c24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2039,4 +2039,17 @@ + + dfs.namenode.randomize-block-locations-per-block + false + When fetching replica locations of a block, the replicas + are sorted based on network distance. This configuration parameter + determines whether the replicas at the same network distance are randomly + shuffled. By default, this is false, such that repeated requests for a block's + replicas always result in the same order. This potentially improves page cache + behavior. However, for some network topologies, it is desirable to shuffle this + order for better load balancing. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm index 3a34dffb682..ff6a42c2592 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm @@ -416,8 +416,8 @@ HDFS High Availability Using the Quorum Journal Manager After all of the necessary configuration options have been set, you must start the JournalNode daemons on the set of machines where they will run. This - can be done by running the command "" and waiting - for the daemon to start on each of the relevant machines. + can be done by running the command "" and + waiting for the daemon to start on each of the relevant machines. Once the JournalNodes have been started, one must initially synchronize the two HA NameNodes' on-disk metadata. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsMultihoming.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsMultihoming.apt.vm new file mode 100644 index 00000000000..2be45671e28 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsMultihoming.apt.vm @@ -0,0 +1,145 @@ +~~ Licensed under the Apache License, Version 2.0 (the "License"); +~~ you may not use this file except in compliance with the License. +~~ You may obtain a copy of the License at +~~ +~~ http://www.apache.org/licenses/LICENSE-2.0 +~~ +~~ Unless required by applicable law or agreed to in writing, software +~~ distributed under the License is distributed on an "AS IS" BASIS, +~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +~~ See the License for the specific language governing permissions and +~~ limitations under the License. See accompanying LICENSE file. + + --- + Hadoop Distributed File System-${project.version} - Support for Multi-Homed Networks + --- + --- + ${maven.build.timestamp} + +HDFS Support for Multihomed Networks + + This document is targetted to cluster administrators deploying <<>> in + multihomed networks. Similar support for <<>>/<<>> is + work in progress and will be documented when available. + +%{toc|section=1|fromDepth=0} + +* Multihoming Background + + In multihomed networks the cluster nodes are connected to more than one + network interface. There could be multiple reasons for doing so. + + [[1]] <>: Security requirements may dictate that intra-cluster + traffic be confined to a different network than the network used to + transfer data in and out of the cluster. + + [[2]] <>: Intra-cluster traffic may use one or more high bandwidth + interconnects like Fiber Channel, Infiniband or 10GbE. + + [[3]] <>: The nodes may have multiple network adapters + connected to a single network to handle network adapter failure. + + + Note that NIC Bonding (also known as NIC Teaming or Link + Aggregation) is a related but separate topic. The following settings + are usually not applicable to a NIC bonding configuration which handles + multiplexing and failover transparently while presenting a single 'logical + network' to applications. + +* Fixing Hadoop Issues In Multihomed Environments + +** Ensuring HDFS Daemons Bind All Interfaces + + By default <<>> endpoints are specified as either hostnames or IP addresses. + In either case <<>> daemons will bind to a single IP address making + the daemons unreachable from other networks. + + The solution is to have separate setting for server endpoints to force binding + the wildcard IP address <<>> i.e. <<<0.0.0.0>>>. Do NOT supply a port + number with any of these settings. + +---- + + dfs.namenode.rpc-bind-host + 0.0.0.0 + + The actual address the RPC server will bind to. If this optional address is + set, it overrides only the hostname portion of dfs.namenode.rpc-address. + It can also be specified per name node or name service for HA/Federation. + This is useful for making the name node listen on all interfaces by + setting it to 0.0.0.0. + + + + + dfs.namenode.servicerpc-bind-host + 0.0.0.0 + + The actual address the service RPC server will bind to. If this optional address is + set, it overrides only the hostname portion of dfs.namenode.servicerpc-address. + It can also be specified per name node or name service for HA/Federation. + This is useful for making the name node listen on all interfaces by + setting it to 0.0.0.0. + + + + + dfs.namenode.http-bind-host + 0.0.0.0 + + The actual adress the HTTP server will bind to. If this optional address + is set, it overrides only the hostname portion of dfs.namenode.http-address. + It can also be specified per name node or name service for HA/Federation. + This is useful for making the name node HTTP server listen on all + interfaces by setting it to 0.0.0.0. + + + + + dfs.namenode.https-bind-host + 0.0.0.0 + + The actual adress the HTTPS server will bind to. If this optional address + is set, it overrides only the hostname portion of dfs.namenode.https-address. + It can also be specified per name node or name service for HA/Federation. + This is useful for making the name node HTTPS server listen on all + interfaces by setting it to 0.0.0.0. + + +---- + +** Clients use Hostnames when connecting to DataNodes + + By default <<>> clients connect to DataNodes using the IP address + provided by the NameNode. Depending on the network configuration this + IP address may be unreachable by the clients. The fix is letting clients perform + their own DNS resolution of the DataNode hostname. The following setting + enables this behavior. + +---- + + dfs.client.use.datanode.hostname + true + Whether clients should use datanode hostnames when + connecting to datanodes. + + +---- + +** DataNodes use HostNames when connecting to other DataNodes + + Rarely, the NameNode-resolved IP address for a DataNode may be unreachable + from other DataNodes. The fix is to force DataNodes to perform their own + DNS resolution for inter-DataNode connections. The following setting enables + this behavior. + +---- + + dfs.datanode.use.datanode.hostname + true + Whether datanodes should use datanode hostnames when + connecting to other datanodes for data transfer. + + +---- + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index bebb946d27c..949713e6f44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -380,7 +380,7 @@ public static boolean allBlockReplicasCorrupt(MiniDFSCluster cluster, */ public static void waitForReplication(MiniDFSCluster cluster, ExtendedBlock b, int racks, int replicas, int neededReplicas) - throws IOException, TimeoutException, InterruptedException { + throws TimeoutException, InterruptedException { int curRacks = 0; int curReplicas = 0; int curNeededReplicas = 0; @@ -414,7 +414,7 @@ public static void waitForReplication(MiniDFSCluster cluster, ExtendedBlock b, */ public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns, Path file, ExtendedBlock b, int corruptRepls) - throws IOException, TimeoutException, InterruptedException { + throws TimeoutException, InterruptedException { int count = 0; final int ATTEMPTS = 50; int repls = ns.getBlockManager().numCorruptReplicas(b.getLocalBlock()); @@ -839,7 +839,8 @@ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, // send the request new Sender(out).transferBlock(b, new Token(), - dfsClient.clientName, new DatanodeInfo[]{datanodes[1]}); + dfsClient.clientName, new DatanodeInfo[]{datanodes[1]}, + new StorageType[]{StorageType.DEFAULT}); out.flush(); return BlockOpResponseProto.parseDelimitedFrom(in); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 244603eac62..bcb68e9ce71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -125,17 +125,16 @@ private void sendRecvData(String testDescription, throw eof; } - LOG.info("Received: " +new String(retBuf)); - LOG.info("Expected: " + StringUtils.byteToHexString(recvBuf.toByteArray())); + String received = StringUtils.byteToHexString(retBuf); + String expected = StringUtils.byteToHexString(recvBuf.toByteArray()); + LOG.info("Received: " + received); + LOG.info("Expected: " + expected); if (eofExpected) { throw new IOException("Did not recieve IOException when an exception " + "is expected while reading from " + datanode); } - - byte[] needed = recvBuf.toByteArray(); - assertEquals(StringUtils.byteToHexString(needed), - StringUtils.byteToHexString(retBuf)); + assertEquals(expected, received); } finally { IOUtils.closeSocket(sock); } @@ -184,10 +183,7 @@ private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long n String description, Boolean eofExcepted) throws IOException { sendBuf.reset(); recvBuf.reset(); - sender.writeBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - new DatanodeInfo[1], null, stage, - 0, block.getNumBytes(), block.getNumBytes(), newGS, - DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy()); + writeBlock(block, stage, newGS, DEFAULT_CHECKSUM); if (eofExcepted) { sendResponse(Status.ERROR, null, null, recvOut); sendRecvData(description, true); @@ -343,10 +339,7 @@ public void testDataTransferProtocol() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); try { cluster.waitActive(); - DFSClient dfsClient = new DFSClient( - new InetSocketAddress("localhost", cluster.getNameNodePort()), - conf); - datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0]; + datanode = cluster.getFileSystem().getDataNodeStats(DatanodeReportType.LIVE)[0]; dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); FileSystem fileSys = cluster.getFileSystem(); @@ -381,23 +374,14 @@ public void testDataTransferProtocol() throws IOException { DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM); Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum(); - sender.writeBlock(new ExtendedBlock(poolId, newBlockId), - BlockTokenSecretManager.DUMMY_TOKEN, "cl", - new DatanodeInfo[1], null, - BlockConstructionStage.PIPELINE_SETUP_CREATE, - 0, 0L, 0L, 0L, - badChecksum, CachingStrategy.newDefaultStrategy()); + writeBlock(poolId, newBlockId, badChecksum); recvBuf.reset(); sendResponse(Status.ERROR, null, null, recvOut); sendRecvData("wrong bytesPerChecksum while writing", true); sendBuf.reset(); recvBuf.reset(); - sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId), - BlockTokenSecretManager.DUMMY_TOKEN, "cl", - new DatanodeInfo[1], null, - BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L, - DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy()); + writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM); PacketHeader hdr = new PacketHeader( 4, // size of packet @@ -416,11 +400,7 @@ public void testDataTransferProtocol() throws IOException { // test for writing a valid zero size block sendBuf.reset(); recvBuf.reset(); - sender.writeBlock(new ExtendedBlock(poolId, ++newBlockId), - BlockTokenSecretManager.DUMMY_TOKEN, "cl", - new DatanodeInfo[1], null, - BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0L, 0L, 0L, - DEFAULT_CHECKSUM, CachingStrategy.newDefaultStrategy()); + writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM); hdr = new PacketHeader( 8, // size of packet @@ -532,4 +512,18 @@ public void testPacketHeader() throws IOException { assertTrue(hdr.sanityCheck(99)); assertFalse(hdr.sanityCheck(100)); } + + void writeBlock(String poolId, long blockId, DataChecksum checksum) throws IOException { + writeBlock(new ExtendedBlock(poolId, blockId), + BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, checksum); + } + + void writeBlock(ExtendedBlock block, BlockConstructionStage stage, + long newGS, DataChecksum checksum) throws IOException { + sender.writeBlock(block, StorageType.DEFAULT, + BlockTokenSecretManager.DUMMY_TOKEN, "cl", + new DatanodeInfo[1], new StorageType[1], null, stage, + 0, block.getNumBytes(), block.getNumBytes(), newGS, + checksum, CachingStrategy.newDefaultStrategy()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index e527439bc28..6c8547ebf8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -550,8 +550,10 @@ public void testConvertBlockCommand() { dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo(); dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo(); String[][] storageIDs = {{"s00"}, {"s10", "s11"}}; + StorageType[][] storageTypes = {{StorageType.DEFAULT}, + {StorageType.DEFAULT, StorageType.DEFAULT}}; BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1", - blocks, dnInfos, storageIDs); + blocks, dnInfos, storageTypes, storageIDs); BlockCommandProto bcProto = PBHelper.convert(bc); BlockCommand bc2 = PBHelper.convert(bcProto); assertEquals(bc.getAction(), bc2.getAction()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index d2f58a877eb..9ea6c5186a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -324,7 +325,7 @@ public void blockReport_02() throws IOException { public void blockReport_03() throws IOException { final String METHOD_NAME = GenericTestUtils.getMethodName(); Path filePath = new Path("/" + METHOD_NAME + ".dat"); - ArrayList blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath); + writeFile(METHOD_NAME, FILE_SIZE, filePath); // all blocks belong to the same file, hence same BP DataNode dn = cluster.getDataNodes().get(DN_N0); @@ -363,7 +364,7 @@ public void blockReport_04() throws IOException { // Create a bogus new block which will not be present on the namenode. ExtendedBlock b = new ExtendedBlock( poolId, rand.nextLong(), 1024L, rand.nextLong()); - dn.getFSDataset().createRbw(b); + dn.getFSDataset().createRbw(StorageType.DEFAULT, b); DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId); StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index baf046af847..e3db5350029 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -744,14 +744,14 @@ public synchronized ReplicaInPipelineInterface recoverRbw(ExtendedBlock b, } @Override // FsDatasetSpi - public synchronized ReplicaInPipelineInterface createRbw(ExtendedBlock b) - throws IOException { - return createTemporary(b); + public synchronized ReplicaInPipelineInterface createRbw( + StorageType storageType, ExtendedBlock b) throws IOException { + return createTemporary(storageType, b); } @Override // FsDatasetSpi - public synchronized ReplicaInPipelineInterface createTemporary(ExtendedBlock b) - throws IOException { + public synchronized ReplicaInPipelineInterface createTemporary( + StorageType storageType, ExtendedBlock b) throws IOException { if (isValidBlock(b)) { throw new ReplicaAlreadyExistsException("Block " + b + " is valid, and cannot be written to."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 4ff942b1f8e..a3622a465ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -531,7 +532,7 @@ public void testNoReplicaUnderRecovery() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } - dn.data.createRbw(block); + dn.data.createRbw(StorageType.DEFAULT, block); try { dn.syncBlock(rBlock, initBlockRecords(dn)); fail("Sync should fail"); @@ -554,7 +555,8 @@ public void testNotMatchedReplicaID() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } - ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block); + ReplicaInPipelineInterface replicaInfo = dn.data.createRbw( + StorageType.DEFAULT, block); ReplicaOutputStreams streams = null; try { streams = replicaInfo.createStreams(true, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java index 02faa595e4c..478b6d1546b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -264,7 +265,8 @@ private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source, sock.setKeepAlive(true); // sendRequest DataOutputStream out = new DataOutputStream(sock.getOutputStream()); - new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN, + new Sender(out).replaceBlock(block, StorageType.DEFAULT, + BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(), sourceProxy); out.flush(); // receiveResponse diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index f75bf2465ff..798b7b7c705 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -147,9 +148,9 @@ public void testReplicationError() throws Exception { DataChecksum checksum = DataChecksum.newDataChecksum( DataChecksum.Type.CRC32, 512); - new Sender(out).writeBlock(block.getBlock(), + new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT, BlockTokenSecretManager.DUMMY_TOKEN, "", - new DatanodeInfo[0], null, + new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L, checksum, CachingStrategy.newDefaultStrategy()); out.flush(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java index d03e5ea0252..bd6c3de2266 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -65,7 +66,8 @@ int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId) ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual // data written - ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b); + ReplicaInPipelineInterface bInfo = fsdataset.createRbw( + StorageType.DEFAULT, b); ReplicaOutputStreams out = bInfo.createStreams(true, DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512)); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java index dfefc1e8437..b8246c31913 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; @@ -147,7 +148,7 @@ private ExtendedBlock[] setup(String bpid, FsDatasetImpl dataSet) throws IOExcep }; ReplicaMap replicasMap = dataSet.volumeMap; - FsVolumeImpl vol = dataSet.volumes.getNextVolume(0); + FsVolumeImpl vol = dataSet.volumes.getNextVolume(StorageType.DEFAULT, 0); ReplicaInfo replicaInfo = new FinalizedReplica( blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile()); replicasMap.add(bpid, replicaInfo); @@ -357,7 +358,7 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw } try { - dataSet.createRbw(blocks[FINALIZED]); + dataSet.createRbw(StorageType.DEFAULT, blocks[FINALIZED]); Assert.fail("Should not have created a replica that's already " + "finalized " + blocks[FINALIZED]); } catch (ReplicaAlreadyExistsException e) { @@ -375,7 +376,7 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw } try { - dataSet.createRbw(blocks[TEMPORARY]); + dataSet.createRbw(StorageType.DEFAULT, blocks[TEMPORARY]); Assert.fail("Should not have created a replica that had created as " + "temporary " + blocks[TEMPORARY]); } catch (ReplicaAlreadyExistsException e) { @@ -385,7 +386,7 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw 0L, blocks[RBW].getNumBytes()); // expect to be successful try { - dataSet.createRbw(blocks[RBW]); + dataSet.createRbw(StorageType.DEFAULT, blocks[RBW]); Assert.fail("Should not have created a replica that had created as RBW " + blocks[RBW]); } catch (ReplicaAlreadyExistsException e) { @@ -401,7 +402,7 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw } try { - dataSet.createRbw(blocks[RWR]); + dataSet.createRbw(StorageType.DEFAULT, blocks[RWR]); Assert.fail("Should not have created a replica that was waiting to be " + "recovered " + blocks[RWR]); } catch (ReplicaAlreadyExistsException e) { @@ -417,7 +418,7 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw } try { - dataSet.createRbw(blocks[RUR]); + dataSet.createRbw(StorageType.DEFAULT, blocks[RUR]); Assert.fail("Should not have created a replica that was under recovery " + blocks[RUR]); } catch (ReplicaAlreadyExistsException e) { @@ -434,45 +435,45 @@ private void testWriteToRbw(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throw e.getMessage().contains(ReplicaNotFoundException.NON_EXISTENT_REPLICA)); } - dataSet.createRbw(blocks[NON_EXISTENT]); + dataSet.createRbw(StorageType.DEFAULT, blocks[NON_EXISTENT]); } private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException { try { - dataSet.createTemporary(blocks[FINALIZED]); + dataSet.createTemporary(StorageType.DEFAULT, blocks[FINALIZED]); Assert.fail("Should not have created a temporary replica that was " + "finalized " + blocks[FINALIZED]); } catch (ReplicaAlreadyExistsException e) { } try { - dataSet.createTemporary(blocks[TEMPORARY]); + dataSet.createTemporary(StorageType.DEFAULT, blocks[TEMPORARY]); Assert.fail("Should not have created a replica that had created as" + "temporary " + blocks[TEMPORARY]); } catch (ReplicaAlreadyExistsException e) { } try { - dataSet.createTemporary(blocks[RBW]); + dataSet.createTemporary(StorageType.DEFAULT, blocks[RBW]); Assert.fail("Should not have created a replica that had created as RBW " + blocks[RBW]); } catch (ReplicaAlreadyExistsException e) { } try { - dataSet.createTemporary(blocks[RWR]); + dataSet.createTemporary(StorageType.DEFAULT, blocks[RWR]); Assert.fail("Should not have created a replica that was waiting to be " + "recovered " + blocks[RWR]); } catch (ReplicaAlreadyExistsException e) { } try { - dataSet.createTemporary(blocks[RUR]); + dataSet.createTemporary(StorageType.DEFAULT, blocks[RUR]); Assert.fail("Should not have created a replica that was under recovery " + blocks[RUR]); } catch (ReplicaAlreadyExistsException e) { } - dataSet.createTemporary(blocks[NON_EXISTENT]); + dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java index ea5bb7a91ed..4f110372be8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java @@ -18,32 +18,41 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import java.util.ArrayList; -import java.util.Random; -import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Random; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.junit.Test; +import org.apache.log4j.Level; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; +import org.junit.Test; public class TestFavoredNodesEndToEnd { + { + ((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)).getLogger().setLevel(Level.ALL); + } + private static MiniDFSCluster cluster; private static Configuration conf; private final static int NUM_DATA_NODES = 10; @@ -79,7 +88,7 @@ public void testFavoredNodesEndToEnd() throws Exception { InetSocketAddress datanode[] = getDatanodes(rand); Path p = new Path("/filename"+i); FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true, - 4096, (short)3, (long)4096, null, datanode); + 4096, (short)3, 4096L, null, datanode); out.write(SOME_BYTES); out.close(); BlockLocation[] locations = getBlockLocations(p); @@ -98,14 +107,13 @@ public void testWhenFavoredNodesNotPresent() throws Exception { //get some other nodes. In other words, the write to hdfs should not fail //and if we do getBlockLocations on the file, we should see one blklocation //and three hosts for that - Random rand = new Random(System.currentTimeMillis()); InetSocketAddress arbitraryAddrs[] = new InetSocketAddress[3]; for (int i = 0; i < 3; i++) { arbitraryAddrs[i] = getArbitraryLocalHostAddr(); } Path p = new Path("/filename-foo-bar"); FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true, - 4096, (short)3, (long)4096, null, arbitraryAddrs); + 4096, (short)3, 4096L, null, arbitraryAddrs); out.write(SOME_BYTES); out.close(); getBlockLocations(p); @@ -113,35 +121,41 @@ public void testWhenFavoredNodesNotPresent() throws Exception { @Test(timeout=180000) public void testWhenSomeNodesAreNotGood() throws Exception { + // 4 favored nodes + final InetSocketAddress addrs[] = new InetSocketAddress[4]; + final String[] hosts = new String[addrs.length]; + for (int i = 0; i < addrs.length; i++) { + addrs[i] = datanodes.get(i).getXferAddress(); + hosts[i] = addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort(); + } + //make some datanode not "good" so that even if the client prefers it, //the namenode would not give it as a replica to write to DatanodeInfo d = cluster.getNameNode().getNamesystem().getBlockManager() .getDatanodeManager().getDatanodeByXferAddr( - datanodes.get(0).getXferAddress().getAddress().getHostAddress(), - datanodes.get(0).getXferAddress().getPort()); + addrs[0].getAddress().getHostAddress(), addrs[0].getPort()); //set the decommission status to true so that //BlockPlacementPolicyDefault.isGoodTarget returns false for this dn d.setDecommissioned(); - InetSocketAddress addrs[] = new InetSocketAddress[3]; - for (int i = 0; i < 3; i++) { - addrs[i] = datanodes.get(i).getXferAddress(); - } Path p = new Path("/filename-foo-bar-baz"); + final short replication = (short)3; FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true, - 4096, (short)3, (long)4096, null, addrs); + 4096, replication, 4096L, null, addrs); out.write(SOME_BYTES); out.close(); //reset the state d.stopDecommission(); + BlockLocation[] locations = getBlockLocations(p); + Assert.assertEquals(replication, locations[0].getNames().length);; //also make sure that the datanode[0] is not in the list of hosts - String datanode0 = - datanodes.get(0).getXferAddress().getAddress().getHostAddress() - + ":" + datanodes.get(0).getXferAddress().getPort(); - for (int i = 0; i < 3; i++) { - if (locations[0].getNames()[i].equals(datanode0)) { - fail(datanode0 + " not supposed to be a replica for the block"); - } + for (int i = 0; i < replication; i++) { + final String loc = locations[0].getNames()[i]; + int j = 0; + for(; j < hosts.length && !loc.equals(hosts[j]); j++); + Assert.assertTrue("j=" + j, j > 0); + Assert.assertTrue("loc=" + loc + " not in host list " + + Arrays.asList(hosts) + ", j=" + j, j < hosts.length); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java index 2e6383cc267..faf946004ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java @@ -60,7 +60,14 @@ public void setupDatanodes() { DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"), DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"), DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"), - DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2") + DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"), + DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"), + DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"), }; for (int i = 0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); @@ -107,7 +114,7 @@ public void testCreateInvalidTopology() throws Exception { @Test public void testRacks() throws Exception { - assertEquals(cluster.getNumOfRacks(), 5); + assertEquals(cluster.getNumOfRacks(), 6); assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1])); assertFalse(cluster.isOnSameRack(dataNodes[1], dataNodes[2])); assertTrue(cluster.isOnSameRack(dataNodes[2], dataNodes[3])); @@ -133,7 +140,7 @@ public void testSortByDistance() throws Exception { testNodes[1] = dataNodes[2]; testNodes[2] = dataNodes[0]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEADBEEF); + testNodes.length, 0xDEADBEEF, false); assertTrue(testNodes[0] == dataNodes[0]); assertTrue(testNodes[1] == dataNodes[1]); assertTrue(testNodes[2] == dataNodes[2]); @@ -146,7 +153,7 @@ public void testSortByDistance() throws Exception { dtestNodes[3] = dataNodes[9]; dtestNodes[4] = dataNodes[10]; cluster.sortByDistance(dataNodes[8], dtestNodes, - dtestNodes.length - 2, 0xDEADBEEF); + dtestNodes.length - 2, 0xDEADBEEF, false); assertTrue(dtestNodes[0] == dataNodes[8]); assertTrue(dtestNodes[1] == dataNodes[11]); assertTrue(dtestNodes[2] == dataNodes[12]); @@ -158,7 +165,7 @@ public void testSortByDistance() throws Exception { testNodes[1] = dataNodes[3]; testNodes[2] = dataNodes[0]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEADBEEF); + testNodes.length, 0xDEADBEEF, false); assertTrue(testNodes[0] == dataNodes[0]); assertTrue(testNodes[1] == dataNodes[1]); assertTrue(testNodes[2] == dataNodes[3]); @@ -168,7 +175,7 @@ public void testSortByDistance() throws Exception { testNodes[1] = dataNodes[3]; testNodes[2] = dataNodes[1]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEADBEEF); + testNodes.length, 0xDEADBEEF, false); assertTrue(testNodes[0] == dataNodes[1]); assertTrue(testNodes[1] == dataNodes[3]); assertTrue(testNodes[2] == dataNodes[5]); @@ -178,7 +185,7 @@ public void testSortByDistance() throws Exception { testNodes[1] = dataNodes[5]; testNodes[2] = dataNodes[3]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEADBEEF); + testNodes.length, 0xDEADBEEF, false); assertTrue(testNodes[0] == dataNodes[1]); assertTrue(testNodes[1] == dataNodes[3]); assertTrue(testNodes[2] == dataNodes[5]); @@ -188,7 +195,7 @@ public void testSortByDistance() throws Exception { testNodes[1] = dataNodes[5]; testNodes[2] = dataNodes[3]; cluster.sortByDistance(dataNodes[0], testNodes, - testNodes.length, 0xDEAD); + testNodes.length, 0xDEAD, false); // sortByDistance does not take the "data center" layer into consideration // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here assertTrue(testNodes[0] == dataNodes[1]); @@ -204,7 +211,27 @@ public void testSortByDistance() throws Exception { testNodes[1] = dataNodes[6]; testNodes[2] = dataNodes[7]; cluster.sortByDistance(dataNodes[i], testNodes, - testNodes.length, 0xBEADED+i); + testNodes.length, 0xBEADED+i, false); + if (first == null) { + first = testNodes[0]; + } else { + if (first != testNodes[0]) { + foundRandom = true; + break; + } + } + } + assertTrue("Expected to find a different first location", foundRandom); + // Array of rack local nodes with randomizeBlockLocationsPerBlock set to + // true + // Expect random order of block locations for same block + first = null; + for (int i = 1; i <= 4; i++) { + testNodes[0] = dataNodes[13]; + testNodes[1] = dataNodes[14]; + testNodes[2] = dataNodes[15]; + cluster.sortByDistance(dataNodes[15 + i], testNodes, testNodes.length, + 0xBEADED, true); if (first == null) { first = testNodes[0]; } else { From ef9b6a45c437a56f9ebf198cba902e06e875f27c Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Thu, 24 Jul 2014 17:28:31 +0000 Subject: [PATCH 046/354] HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI. Contributed by Vinayakumar B. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613195 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/main/webapps/hdfs/dfshealth.html | 1 - .../hadoop-hdfs/src/main/webapps/hdfs/index.html | 13 +------------ .../src/main/webapps/secondary/index.html | 11 ----------- 4 files changed, 4 insertions(+), 24 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bbeebd19843..217bf4df094 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -252,6 +252,9 @@ Trunk (Unreleased) HDFS-5794. Fix the inconsistency of layout version number of ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9) + HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI. + (Vinayakumar B via wheat 9) + Release 2.6.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index fadba070721..8fdf73ba19c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -66,7 +66,6 @@


Hadoop, 2014.

-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html index 99bb13b326c..aa62a372396 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html @@ -18,18 +18,7 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> - + Hadoop Administration - - -

Hadoop Administration

- - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html index 97e0207e06f..f7ef858b9e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html @@ -21,15 +21,4 @@ Hadoop Administration - - -

Hadoop Administration

- - \ No newline at end of file From f2137d7c0e19176d5ad7e28c6abcfc03eac49ec3 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Thu, 24 Jul 2014 17:47:08 +0000 Subject: [PATCH 047/354] HADOOP-10894. Fix dead link in ToolRunner documentation. (Contributed by Akira Ajisaka) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613200 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/util/ToolRunner.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 491daf04e9f..5ea931c649e 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -819,6 +819,9 @@ Release 2.5.0 - UNRELEASED HADOOP-10890. TestDFVariations.testMount fails intermittently. (Yongjun Zhang via Arpit Agarwal) + HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka + via Arpit Agarwal) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java index 49581000ca3..16872d0891e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java @@ -30,7 +30,7 @@ *

ToolRunner can be used to run classes implementing * Tool interface. It works in conjunction with * {@link GenericOptionsParser} to parse the - * + * * generic hadoop command line arguments and modifies the * Configuration of the Tool. The * application-specific options are passed along without being modified. From a7855e1c3376fee23eb2ed61f9ae4ad3c9754722 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Thu, 24 Jul 2014 17:59:45 +0000 Subject: [PATCH 048/354] HDFS-6723. New NN webUI no longer displays decommissioned state for dead node. Contributed by Ming Ma. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613220 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 217bf4df094..45109896397 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -983,6 +983,9 @@ Release 2.5.0 - UNRELEASED HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal via cmccabe) + HDFS-6723. New NN webUI no longer displays decommissioned state for dead node. + (Ming Ma via wheat9) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 8fdf73ba19c..25895261982 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -282,7 +282,7 @@ {name} ({xferaddr}) {lastContact} - Dead{?decomissioned}, Decomissioned{/decomissioned} + Dead{?decommissioned}, Decommissioned{/decommissioned} - - - From 8c6e172a0ad8f06a4f9b70d61d9f3f7789405815 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 24 Jul 2014 18:28:00 +0000 Subject: [PATCH 049/354] HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode is in startup mode. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613237 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../web/resources/NamenodeWebHdfsMethods.java | 3 +- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 39 +++++++++- .../hadoop/hdfs/web/TestWebHDFSForHA.java | 76 +++++++++++++++++-- 4 files changed, 112 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 45109896397..376c272a576 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -367,6 +367,9 @@ Release 2.6.0 - UNRELEASED HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli) + HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode + is in startup mode. (jing9) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 92a58f9822e..d7235b38727 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.net.Node; @@ -190,7 +191,7 @@ private static NamenodeProtocols getRPCServer(NameNode namenode) throws IOException { final NamenodeProtocols np = namenode.getRpcServer(); if (np == null) { - throw new IOException("Namenode is in startup mode"); + throw new RetriableException("Namenode is in startup mode"); } return np; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index e9c74c6de30..14312110aa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -39,14 +39,18 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestDFSClientRetries; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; -import org.apache.hadoop.hdfs.TestDFSClientRetries; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Assert; import org.junit.Test; +import org.mockito.internal.util.reflection.Whitebox; /** Test WebHDFS */ public class TestWebHDFS { @@ -445,4 +449,37 @@ public void testWebHdfsRenameSnapshot() throws Exception { } } } + + /** + * Make sure a RetriableException is thrown when rpcServer is null in + * NamenodeWebHdfsMethods. + */ + @Test + public void testRaceWhileNNStartup() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + cluster.waitActive(); + final NameNode namenode = cluster.getNameNode(); + final NamenodeProtocols rpcServer = namenode.getRpcServer(); + Whitebox.setInternalState(namenode, "rpcServer", null); + + final Path foo = new Path("/foo"); + final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsFileSystem.SCHEME); + try { + webHdfs.mkdirs(foo); + fail("Expected RetriableException"); + } catch (RetriableException e) { + GenericTestUtils.assertExceptionContains("Namenode is in startup mode", + e); + } + Whitebox.setInternalState(namenode, "rpcServer", rpcServer); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index 772e367f93c..0340b952259 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -18,6 +18,15 @@ package org.apache.hadoop.hdfs.web; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +import java.io.IOException; +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -29,18 +38,14 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.junit.Assert; import org.junit.Test; - -import java.io.IOException; -import java.net.URI; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; +import org.mockito.internal.util.reflection.Whitebox; public class TestWebHDFSForHA { private static final String LOGICAL_NAME = "minidfs"; @@ -182,4 +187,61 @@ public void testMultipleNamespacesConfigured() throws Exception { } } } + + /** + * Make sure the WebHdfsFileSystem will retry based on RetriableException when + * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up. + */ + @Test (timeout=120000) + public void testRetryWhileNNStartup() throws Exception { + final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); + MiniDFSCluster cluster = null; + final Map resultMap = new HashMap(); + + try { + cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) + .numDataNodes(0).build(); + HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); + cluster.waitActive(); + cluster.transitionToActive(0); + + final NameNode namenode = cluster.getNameNode(0); + final NamenodeProtocols rpcServer = namenode.getRpcServer(); + Whitebox.setInternalState(namenode, "rpcServer", null); + + new Thread() { + @Override + public void run() { + boolean result = false; + FileSystem fs = null; + try { + fs = FileSystem.get(WEBHDFS_URI, conf); + final Path dir = new Path("/test"); + result = fs.mkdirs(dir); + } catch (IOException e) { + result = false; + } finally { + IOUtils.cleanup(null, fs); + } + synchronized (TestWebHDFSForHA.this) { + resultMap.put("mkdirs", result); + TestWebHDFSForHA.this.notifyAll(); + } + } + }.start(); + + Thread.sleep(1000); + Whitebox.setInternalState(namenode, "rpcServer", rpcServer); + synchronized (this) { + while (!resultMap.containsKey("mkdirs")) { + this.wait(); + } + Assert.assertTrue(resultMap.get("mkdirs")); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } From e171254d56bfff467a67a6cf9160595c941f50c0 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 24 Jul 2014 23:42:06 +0000 Subject: [PATCH 050/354] Name node cannot start if the path of a file under construction contains .snapshot. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613329 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSImageFormat.java | 11 ++ .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 137 ++++++++++++++++++ .../test/resources/hadoop-0.23-reserved.tgz | Bin 0 -> 4558 bytes .../src/test/resources/hadoop-1-reserved.tgz | Bin 0 -> 2572 bytes 5 files changed, 151 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 376c272a576..498454916d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -908,6 +908,9 @@ Release 2.5.0 - UNRELEASED HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code when xattr doesn't exist. (Charles Lamb via umamahesh) + HDFS-6696. Name node cannot start if the path of a file under + construction contains ".snapshot". (wang) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 49a035cfff0..5b6d269546b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -614,6 +614,16 @@ private void loadFullNameINodes(long numFiles, DataInput in, Counter counter) INodeDirectory parentINode = fsDir.rootDir; for (long i = 0; i < numFiles; i++) { pathComponents = FSImageSerialization.readPathComponents(in); + for (int j=0; j < pathComponents.length; j++) { + byte[] newComponent = renameReservedComponentOnUpgrade + (pathComponents[j], getLayoutVersion()); + if (!Arrays.equals(newComponent, pathComponents[j])) { + String oldPath = DFSUtil.byteArray2PathString(pathComponents); + pathComponents[j] = newComponent; + String newPath = DFSUtil.byteArray2PathString(pathComponents); + LOG.info("Renaming reserved path " + oldPath + " to " + newPath); + } + } final INode newNode = loadINode( pathComponents[pathComponents.length-1], false, in, counter); @@ -926,6 +936,7 @@ LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) { oldnode = namesystem.dir.getInode(cons.getId()).asFile(); inSnapshot = true; } else { + path = renameReservedPathsOnUpgrade(path, getLayoutVersion()); final INodesInPath iip = fsDir.getLastINodeInPath(path); oldnode = INodeFile.valueOf(iip.getINode(0), path); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 1e1f668f210..f5dbdceaa17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -70,6 +70,9 @@ public class TestDFSUpgradeFromImage { private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz"; + private static final String HADOOP1_RESERVED_IMAGE = "hadoop-1-reserved.tgz"; + private static final String HADOOP023_RESERVED_IMAGE = + "hadoop-0.23-reserved.tgz"; private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz"; private static class ReferenceFileInfo { @@ -325,6 +328,140 @@ public void testUpgradeFromCorruptRel22Image() throws IOException { } } + /** + * Test upgrade from a branch-1.2 image with reserved paths + */ + @Test + public void testUpgradeFromRel1ReservedImage() throws Exception { + unpackStorage(HADOOP1_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(conf) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { + "/.my-reserved", + "/.user-snapshot", + "/.user-snapshot/.user-snapshot", + "/.user-snapshot/open", + "/dir1", + "/dir1/.user-snapshot", + "/dir2", + "/dir2/.user-snapshot", + "/user", + "/user/andrew", + "/user/andrew/.user-snapshot", + }; + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList toList = new ArrayList(); + toList.add(new Path("/")); + ArrayList found = new ArrayList(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + /** + * Test upgrade from a 0.23.11 image with reserved paths + */ + @Test + public void testUpgradeFromRel023ReservedImage() throws Exception { + unpackStorage(HADOOP023_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(conf) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { + "/.user-snapshot", + "/dir1", + "/dir1/.user-snapshot", + "/dir2", + "/dir2/.user-snapshot" + }; + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList toList = new ArrayList(); + toList.add(new Path("/")); + ArrayList found = new ArrayList(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + /** * Test upgrade from 2.0 image with a variety of .snapshot and .reserved * paths to test renaming on upgrade diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz new file mode 100644 index 0000000000000000000000000000000000000000..0f53f2adb27e1a8379e4bc0d74a9c98700228559 GIT binary patch literal 4558 zcmeIzjZ=~b90zc^)~u~`xy*u;?bI}N;Zm9<$Zbsxbv2fyXv%4pQuBq2R1|%h;P<~ zeptpvWNe1L8OaWZ@A>5u*ZIeWy?kc0q`jGDp7_P~#A;tABY)SB3$pWoDe^z~n z@UbVq?de_xxdKiu$QgD2)*Q;zc(1z{x|-t`jrbZ}Ji4-`dV^-LC2`$uhT0F%^$dID z*<)_RPXUidWHW4v+ZG&T_G7+;yk~W|#m1N7@P|P40EE9&0~yjoq)r-1oAbGPC^Y69 z8*UBLHtxT8w^o18V|(T;gT>RwS5d_GrsdiZBgM9ybh2w)ue`X!X`xyt-rQ$n?F*Pj zOV^?=_{DSz@iURwnUG0LbjJ-8CLJ4BH~-3ZbK?xeVx1T!iP~+uER+OLLZFgBUsli| z$oIv4?Tdy*&cHr_=RuB&Nmxu9ZAnQ6S`$(H@2}jdNaHDFq-60XB?4c4RrVKg+Hh$+ zG29<_xad@|PgWUuL;pZDXLxP=g%B=}DkQfLiO%=YzRG?;<|j>etHx!hpD(9jwp_kD zaKdQNx_^aAR%hm<5XW0a|J|H7{tV%qFWpKK3SE6DoVv2sADd>W=)F~X!D#QcSngA(hYnuF&RDD?uM=ITa*`plqUukOJuK}kg|b^0H}}d(-S~nC(zD~ zB4Fd*=CbIYch=R4dGsZQaXhgON%sm(o-s$6KT9ao%7A;PPU7mW6tBOW)?UT#i0U0s z5BXN{>Wax?n0VL64BigXXDW8i_ap%KB|%3S?<|{en!a6?l^L(sQ>%;_m zLCNwwX1mBVRc@3xjna$Y07YK9J9w$lzb_qy%wqq1QPi}*v?-CMJkZmnOnW>MknJq# zG2N7P%GE5$hz)IESxXOy?0(g3Ek`FuhEy(jgjOm9v^_CQO_#zjWhA7PpqQ;9+R-^v6L)y0ibQ&z8N~|R#}8p9>HR|8l?ZOCO&F|uJD~&=HUH92 z;nvdzI5FVIt*zd~Sr(6}nT6OwB(fda-?>x<;=Y@GJKNnLK;BphoqAfs?ttbZ1PKt0 zhuiMlVpmC5)O%cKmn(rCa^z z!TydX=5Slq?Y>Kko7or1p$FZ{as$T(^VH7ntp>}`#KN`CWB45`g4a4DE0`5{_BCar z1P@nf8Zcp^d+_jYNRpk~`fCXSIQ2sj!9K{~h0QKk#Z^8?ZFcLc&Hndwge=7vnaJ*F zY*XmbMMY9*hIDb zTGLCRZ3uk#3v%j32*$+I<)L6c>gErz7Sj_;X6Q&#A?i+>-DDQjmUKOORujJyMG_Wf z9RbY(R-UaKwH+`WrEon~=^6` z*{t(|7K}iJSb$>(_t?T%U&Kzm{UGD4^4O^^r`T)f1^15)MPJx2)4Iq7a?CVt$f**Rs>nq|P3ENNKBsw56s7fr+-Q%qJLTq+)*8nTJK# z%EyJu5}OhlW+5gbxkV%j&QLTJuoxLWp`bWY`D}OlX?MH*1MB@KynlJY>5g_fg*@8M zerm9b_z8Bi-6+bVw?-thjbEaA;P}gh4_x4|3 z5-@|ZOsf(*j6Kl$`x~uDr$ws^RJ-@Pb^Y-pF%n386994p%`AQe zFRR5%y*m1Rg7$+@d_Xa%-wc~ThV)~$=-he$7t71_DUqebHsn5jHKbryxA9w>n_4Lo zig88{5}-583K2DaBrXQvS{;XN1?fUZggJBQ|GLXKt&jLI=Xy*9U{B9ex3AG5Y z$mN*?qfS#wos|%?UKi02A+hDAZte-|JolOlMOr%or`PsU%=W1L< zeNg~)zPBS2{qrveZ_%xnNBSyyrQ9yKR{TW0v$+$^y@VlYTd1V7Ryx!B=7W6H_L>r( zlo)vaOWIb>u4j^g%+|{Cs?X-*0v@zyS1|T>qslSrSx%08{L9-LV<{DTccQtv2FlzL z)R=C}ot=iRU+iuL`I9e}d7Glf0Inrns4NU|BcekKzSL#oh3<*HYCYd;M^mJ`{6?2yq0OYXuy<;W2c{YI5wiVx^ zZD7I%?ME$u)=1ec^; zTx^l{^ezi?E*;RoRPz+VVLB?>^_2J9aD2cLxoVZ_rAMFAzYkJAO)5KX_n{95J{q~NDI-Wd8mh9#HC>elkZavDdlYeMnI zmG!01lXf`wM;J!Rjuv&x|S(ec%FR_UvwgfT#vXJbTQ#6BE0p0;qA4p zz6{|bf1Kw9%Q!o4w(zU`tj}TQ#THHg`zE#w*$!P0mIxefVvO~gcJhKoW`Z5Sy2m*& z!j5|5RCOV1?ztR}iY=h?W;25J1pAE((@$TX_jgSzXY6ih)kRzH^yhncH&6N}#S~(s zu~WaBL5EDVD44ES;G{RqNN1dW#>%SUn1RpfC+Z|L%UQtoIC7U9+(3v(`P-tGQU75P zJR@5&@%o<@379>Bzjoxc4UjGl{RL8Fh%r399%lr3pDWF(Jt@mDbl2;77DfbEx8A5n z^ Date: Thu, 24 Jul 2014 23:49:22 +0000 Subject: [PATCH 051/354] HADOOP-10891. Add EncryptedKeyVersion factory method to KeyProviderCryptoExtension. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613332 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../key/KeyProviderCryptoExtension.java | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 5ea931c649e..55914f83197 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -189,6 +189,9 @@ Trunk (Unreleased) HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey in the REST API. (asuresh via tucu) + HADOOP-10891. Add EncryptedKeyVersion factory method to + KeyProviderCryptoExtension. (wang) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java index 0ba73f1519d..227e19b4841 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java @@ -79,6 +79,30 @@ protected EncryptedKeyVersion(String keyName, this.encryptedKeyVersion = encryptedKeyVersion; } + /** + * Factory method to create a new EncryptedKeyVersion that can then be + * passed into {@link #decryptEncryptedKey}. Note that the fields of the + * returned EncryptedKeyVersion will only partially be populated; it is not + * necessarily suitable for operations besides decryption. + * + * @param encryptionKeyVersionName Version name of the encryption key used + * to encrypt the encrypted key. + * @param encryptedKeyIv Initialization vector of the encrypted + * key. The IV of the encryption key used to + * encrypt the encrypted key is derived from + * this IV. + * @param encryptedKeyMaterial Key material of the encrypted key. + * @return EncryptedKeyVersion suitable for decryption. + */ + public static EncryptedKeyVersion createForDecryption(String + encryptionKeyVersionName, byte[] encryptedKeyIv, + byte[] encryptedKeyMaterial) { + KeyVersion encryptedKeyVersion = new KeyVersion(null, null, + encryptedKeyMaterial); + return new EncryptedKeyVersion(null, encryptionKeyVersionName, + encryptedKeyIv, encryptedKeyVersion); + } + /** * @return Name of the encryption key used to encrypt the encrypted key. */ From 2bb650146ddb36830ea9c0d248fd3df1f6aa7534 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Fri, 25 Jul 2014 07:02:53 +0000 Subject: [PATCH 052/354] HDFS-5919. FileJournalManager doesn't purge empty and corrupt inprogress edits files (vinayakumarb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613355 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/namenode/FileJournalManager.java | 31 +++++++++++++++++-- .../TestNNStorageRetentionManager.java | 11 +++++-- 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 498454916d8..9fb5ba132e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -370,6 +370,9 @@ Release 2.6.0 - UNRELEASED HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode is in startup mode. (jing9) + HDFS-5919. FileJournalManager doesn't purge empty and corrupt inprogress edits + files (vinayakumarb) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index a41ff1390c5..362c316cc2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -71,6 +71,8 @@ public class FileJournalManager implements JournalManager { NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)"); private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile( NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)"); + private static final Pattern EDITS_INPROGRESS_STALE_REGEX = Pattern.compile( + NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+).*(\\S+)"); private File currentInProgress = null; @@ -162,8 +164,7 @@ public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException { LOG.info("Purging logs older than " + minTxIdToKeep); File[] files = FileUtil.listFiles(sd.getCurrentDir()); - List editLogs = - FileJournalManager.matchEditLogs(files); + List editLogs = matchEditLogs(files, true); for (EditLogFile log : editLogs) { if (log.getFirstTxId() < minTxIdToKeep && log.getLastTxId() < minTxIdToKeep) { @@ -244,8 +245,13 @@ private void discardEditLogSegments(long startTxId) throws IOException { public static List matchEditLogs(File logDir) throws IOException { return matchEditLogs(FileUtil.listFiles(logDir)); } - + static List matchEditLogs(File[] filesInStorage) { + return matchEditLogs(filesInStorage, false); + } + + private static List matchEditLogs(File[] filesInStorage, + boolean forPurging) { List ret = Lists.newArrayList(); for (File f : filesInStorage) { String name = f.getName(); @@ -256,6 +262,7 @@ static List matchEditLogs(File[] filesInStorage) { long startTxId = Long.parseLong(editsMatch.group(1)); long endTxId = Long.parseLong(editsMatch.group(2)); ret.add(new EditLogFile(f, startTxId, endTxId)); + continue; } catch (NumberFormatException nfe) { LOG.error("Edits file " + f + " has improperly formatted " + "transaction ID"); @@ -270,12 +277,30 @@ static List matchEditLogs(File[] filesInStorage) { long startTxId = Long.parseLong(inProgressEditsMatch.group(1)); ret.add( new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, true)); + continue; } catch (NumberFormatException nfe) { LOG.error("In-progress edits file " + f + " has improperly " + "formatted transaction ID"); // skip } } + if (forPurging) { + // Check for in-progress stale edits + Matcher staleInprogressEditsMatch = EDITS_INPROGRESS_STALE_REGEX + .matcher(name); + if (staleInprogressEditsMatch.matches()) { + try { + long startTxId = Long.valueOf(staleInprogressEditsMatch.group(1)); + ret.add(new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, + true)); + continue; + } catch (NumberFormatException nfe) { + LOG.error("In-progress stale edits file " + f + " has improperly " + + "formatted transaction ID"); + // skip + } + } + } } return ret; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index 8f08ef39f87..346d94962bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -212,18 +212,25 @@ public void testRetainExtraLogsLimitedSegments() throws IOException { tc.addImage("/foo1/current/" + getImageFileName(300), false); tc.addImage("/foo1/current/" + getImageFileName(400), false); + // Segments containing txns upto txId 250 are extra and should be purged. tc.addLog("/foo2/current/" + getFinalizedEditsFileName(1, 100), true); - // Without lowering the max segments to retain, we'd retain all segments - // going back to txid 150 (300 - 150). tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 175), true); + tc.addLog("/foo2/current/" + getInProgressEditsFileName(176) + ".empty", + true); tc.addLog("/foo2/current/" + getFinalizedEditsFileName(176, 200), true); tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 225), true); + tc.addLog("/foo2/current/" + getInProgressEditsFileName(226) + ".corrupt", + true); tc.addLog("/foo2/current/" + getFinalizedEditsFileName(226, 240), true); // Only retain 2 extra segments. The 301-350 and 351-400 segments are // considered required, not extra. tc.addLog("/foo2/current/" + getFinalizedEditsFileName(241, 275), false); tc.addLog("/foo2/current/" + getFinalizedEditsFileName(276, 300), false); + tc.addLog("/foo2/current/" + getInProgressEditsFileName(301) + ".empty", + false); tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 350), false); + tc.addLog("/foo2/current/" + getInProgressEditsFileName(351) + ".corrupt", + false); tc.addLog("/foo2/current/" + getFinalizedEditsFileName(351, 400), false); tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false); runTest(tc); From 1e553858f930e43fac62986549a178cdcf39384c Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Fri, 25 Jul 2014 16:13:07 +0000 Subject: [PATCH 053/354] YARN-2214. FairScheduler: preemptContainerPreCheck() in FSParentQueue delays convergence towards fairness. (Ashwin Shankar via kasha) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613459 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../scheduler/fair/FSLeafQueue.java | 19 ++++- .../scheduler/fair/FSParentQueue.java | 5 -- .../scheduler/fair/FSQueue.java | 13 ---- .../scheduler/fair/TestFairScheduler.java | 73 +++++++++++++++++++ 5 files changed, 91 insertions(+), 22 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 1712abeb9e4..99b60b1aeda 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -65,6 +65,9 @@ Release 2.6.0 - UNRELEASED YARN-1342. Recover container tokens upon nodemanager restart. (Jason Lowe via devaraj) + YARN-2214. FairScheduler: preemptContainerPreCheck() in FSParentQueue delays + convergence towards fairness. (Ashwin Shankar via kasha) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index 8f957382e6a..3b3f6ce2296 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -224,16 +224,17 @@ public Resource assignContainer(FSSchedulerNode node) { @Override public RMContainer preemptContainer() { RMContainer toBePreempted = null; - if (LOG.isDebugEnabled()) { - LOG.debug("Queue " + getName() + " is going to preempt a container " + - "from its applications."); - } // If this queue is not over its fair share, reject if (!preemptContainerPreCheck()) { return toBePreempted; } + if (LOG.isDebugEnabled()) { + LOG.debug("Queue " + getName() + " is going to preempt a container " + + "from its applications."); + } + // Choose the app that is most over fair share Comparator comparator = policy.getComparator(); AppSchedulable candidateSched = null; @@ -328,4 +329,14 @@ public void recoverContainer(Resource clusterResource, SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer) { // TODO Auto-generated method stub } + + /** + * Helper method to check if the queue should preempt containers + * + * @return true if check passes (can preempt) or false otherwise + */ + private boolean preemptContainerPreCheck() { + return parent.getPolicy().checkIfUsageOverFairShare(getResourceUsage(), + getFairShare()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java index 5ab60afbca2..9af72a511e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java @@ -164,11 +164,6 @@ public Resource assignContainer(FSSchedulerNode node) { public RMContainer preemptContainer() { RMContainer toBePreempted = null; - // If this queue is not over its fair share, reject - if (!preemptContainerPreCheck()) { - return toBePreempted; - } - // Find the childQueue which is most over fair share FSQueue candidateQueue = null; Comparator comparator = policy.getComparator(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 716e1ee6874..1e94046100a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -187,17 +187,4 @@ protected boolean assignContainerPreCheck(FSSchedulerNode node) { } return true; } - - /** - * Helper method to check if the queue should preempt containers - * - * @return true if check passes (can preempt) or false otherwise - */ - protected boolean preemptContainerPreCheck() { - if (this == scheduler.getQueueManager().getRootQueue()) { - return true; - } - return parent.getPolicy() - .checkIfUsageOverFairShare(getResourceUsage(), getFairShare()); - } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index df157e75001..33ec3184a91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -1221,6 +1221,79 @@ public void testChoiceOfPreemptedContainers() throws Exception { scheduler.getSchedulerApp(app4).getPreemptionContainers().isEmpty()); } + @Test + public void testPreemptionIsNotDelayedToNextRound() throws Exception { + conf.setLong(FairSchedulerConfiguration.PREEMPTION_INTERVAL, 5000); + conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, 10000); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "false"); + + MockClock clock = new MockClock(); + scheduler.setClock(clock); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println("8"); + out.println(""); + out.println(""); + out.println(""); + out.println(""); + out.println("2"); + out.println(""); + out.print("10"); + out.println(""); + out.close(); + + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + // Add a node of 8G + RMNode node1 = MockNodes.newNodeInfo(1, + Resources.createResource(8 * 1024, 8), 1, "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + // Run apps in queueA.A1 and queueB + ApplicationAttemptId app1 = createSchedulingRequest(1 * 1024, 1, + "queueA.queueA1", "user1", 7, 1); + // createSchedulingRequestExistingApplication(1 * 1024, 1, 2, app1); + ApplicationAttemptId app2 = createSchedulingRequest(1 * 1024, 1, "queueB", + "user2", 1, 1); + + scheduler.update(); + + NodeUpdateSchedulerEvent nodeUpdate1 = new NodeUpdateSchedulerEvent(node1); + for (int i = 0; i < 8; i++) { + scheduler.handle(nodeUpdate1); + } + + // verify if the apps got the containers they requested + assertEquals(7, scheduler.getSchedulerApp(app1).getLiveContainers().size()); + assertEquals(1, scheduler.getSchedulerApp(app2).getLiveContainers().size()); + + // Now submit an app in queueA.queueA2 + ApplicationAttemptId app3 = createSchedulingRequest(1 * 1024, 1, + "queueA.queueA2", "user3", 7, 1); + scheduler.update(); + + // Let 11 sec pass + clock.tick(11); + + scheduler.update(); + Resource toPreempt = scheduler.resToPreempt(scheduler.getQueueManager() + .getLeafQueue("queueA.queueA2", false), clock.getTime()); + assertEquals(2980, toPreempt.getMemory()); + + // verify if the 3 containers required by queueA2 are preempted in the same + // round + scheduler.preemptResources(toPreempt); + assertEquals(3, scheduler.getSchedulerApp(app1).getPreemptionContainers() + .size()); + } + @Test (timeout = 5000) /** * Tests the timing of decision to preempt tasks. From 77363b9d839e47bef2325b8682eabe00d4c83354 Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Fri, 25 Jul 2014 17:12:22 +0000 Subject: [PATCH 054/354] YARN-2335. Annotate all hadoop-sls APIs as @Private. (Wei Yan via kasha) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613478 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/yarn/sls/RumenToSLSConverter.java | 4 ++++ .../main/java/org/apache/hadoop/yarn/sls/SLSRunner.java | 4 ++++ .../org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java | 4 ++++ .../apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java | 4 ++++ .../org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java | 5 +++++ .../apache/hadoop/yarn/sls/nodemanager/NMSimulator.java | 4 ++++ .../org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java | 8 +++++++- .../yarn/sls/scheduler/CapacitySchedulerMetrics.java | 5 +++++ .../hadoop/yarn/sls/scheduler/ContainerSimulator.java | 4 ++++ .../hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java | 4 ++++ .../hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java | 4 ++++ .../sls/scheduler/NodeUpdateSchedulerEventWrapper.java | 4 ++++ .../apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java | 4 ++++ .../yarn/sls/scheduler/ResourceSchedulerWrapper.java | 3 +++ .../hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java | 4 ++++ .../hadoop/yarn/sls/scheduler/SchedulerMetrics.java | 4 ++++ .../hadoop/yarn/sls/scheduler/SchedulerWrapper.java | 4 ++++ .../org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java | 6 ++++++ .../java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java | 4 ++++ .../java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java | 4 ++++ hadoop-yarn-project/CHANGES.txt | 2 ++ 21 files changed, 88 insertions(+), 1 deletion(-) diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java index 567963412b1..2d4b4ae5264 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java @@ -21,6 +21,8 @@ import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.Options; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.ObjectWriter; @@ -42,6 +44,8 @@ import java.util.TreeMap; import java.util.TreeSet; +@Private +@Unstable public class RumenToSLSConverter { private static final String EOL = System.getProperty("line.separator"); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java index 501d11e0ccd..9baa73626d5 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java @@ -32,6 +32,8 @@ import java.util.Random; import java.util.Arrays; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.tools.rumen.JobTraceReader; import org.apache.hadoop.tools.rumen.LoggedJob; @@ -66,6 +68,8 @@ import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.map.ObjectMapper; +@Private +@Unstable public class SLSRunner { // RM, Runner private ResourceManager rm; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java index 67c09940120..5af4eaa2de4 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java @@ -29,6 +29,8 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -70,6 +72,8 @@ import org.apache.hadoop.yarn.sls.scheduler.TaskRunner; import org.apache.hadoop.yarn.sls.utils.SLSUtils; +@Private +@Unstable public abstract class AMSimulator extends TaskRunner.Task { // resource manager protected ResourceManager rm; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java index d789e1ca2aa..d24510ba6fd 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java @@ -27,6 +27,8 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -45,6 +47,8 @@ import org.apache.hadoop.yarn.sls.SLSRunner; import org.apache.log4j.Logger; +@Private +@Unstable public class MRAMSimulator extends AMSimulator { /* Vocabulary Used: diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java index 21785cc67d9..8fd5b3f770a 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java @@ -18,6 +18,11 @@ package org.apache.hadoop.yarn.sls.conf; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +@Private +@Unstable public class SLSConfiguration { // sls public static final String PREFIX = "yarn.sls."; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java index b0e2e37fa47..4112685e152 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java @@ -27,6 +27,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.DelayQueue; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -54,6 +56,8 @@ import org.apache.hadoop.yarn.sls.scheduler.TaskRunner; import org.apache.hadoop.yarn.sls.utils.SLSUtils; +@Private +@Unstable public class NMSimulator extends TaskRunner.Task { // node resource private RMNode node; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 76671572ce8..1d573822d9b 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -21,6 +21,8 @@ import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.net.Node; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -36,6 +38,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode .UpdatedContainerInfo; +@Private +@Unstable public class NodeInfo { private static int NODE_ID = 0; @@ -43,6 +47,8 @@ public static NodeId newNodeID(String host, int port) { return NodeId.newInstance(host, port); } + @Private + @Unstable private static class FakeRMNodeImpl implements RMNode { private NodeId nodeId; private String hostName; @@ -164,7 +170,7 @@ public void setResourceOption(ResourceOption resourceOption) { perNode = resourceOption; } } - + public static RMNode newNodeInfo(String rackName, String hostName, final ResourceOption resourceOption, int port) { final NodeId nodeId = newNodeID(hostName, port); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java index 1d878537061..a73f48c4d7e 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java @@ -18,6 +18,11 @@ package org.apache.hadoop.yarn.sls.scheduler; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +@Private +@Unstable public class CapacitySchedulerMetrics extends SchedulerMetrics { public CapacitySchedulerMetrics() { diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ContainerSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ContainerSimulator.java index 27a50d3b00f..86229763e15 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ContainerSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ContainerSimulator.java @@ -21,9 +21,13 @@ import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; +@Private +@Unstable public class ContainerSimulator implements Delayed { // id private ContainerId id; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java index b706b3ee6d8..f427dcd557a 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.sls.scheduler; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair .AppSchedulable; @@ -28,6 +30,8 @@ import com.codahale.metrics.Gauge; import org.apache.hadoop.yarn.sls.SLSRunner; +@Private +@Unstable public class FairSchedulerMetrics extends SchedulerMetrics { private int totalMemoryMB = Integer.MAX_VALUE; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java index 882b3e1215b..6ab2e1d0107 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java @@ -18,12 +18,16 @@ package org.apache.hadoop.yarn.sls.scheduler; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo .FifoScheduler; import com.codahale.metrics.Gauge; +@Private +@Unstable public class FifoSchedulerMetrics extends SchedulerMetrics { public FifoSchedulerMetrics() { diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/NodeUpdateSchedulerEventWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/NodeUpdateSchedulerEventWrapper.java index 4bf93138053..12dfe8baa16 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/NodeUpdateSchedulerEventWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/NodeUpdateSchedulerEventWrapper.java @@ -18,9 +18,13 @@ package org.apache.hadoop.yarn.sls.scheduler; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event .NodeUpdateSchedulerEvent; +@Private +@Unstable public class NodeUpdateSchedulerEventWrapper extends NodeUpdateSchedulerEvent { public NodeUpdateSchedulerEventWrapper(NodeUpdateSchedulerEvent event) { diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index bbe24c883c7..da9b56fd546 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.sls.scheduler; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.net.Node; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -33,6 +35,8 @@ import java.util.Collections; import java.util.List; +@Private +@Unstable public class RMNodeWrapper implements RMNode { private RMNode node; private List updates; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java index 455808543b6..0bd0c87d2d0 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java @@ -36,6 +36,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configurable; @@ -92,6 +93,8 @@ import com.codahale.metrics.SlidingWindowReservoir; import com.codahale.metrics.Timer; +@Private +@Unstable final public class ResourceSchedulerWrapper extends AbstractYarnScheduler implements SchedulerWrapper, ResourceScheduler, Configurable { diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java index 6a84e5838c4..44a872198d6 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.yarn.sls.scheduler; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.yarn.sls.SLSRunner; import org.apache.hadoop.yarn.sls.conf.SLSConfiguration; @@ -100,6 +102,8 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +@Private +@Unstable public class SLSCapacityScheduler extends CapacityScheduler implements SchedulerWrapper,Configurable { private static final String EOL = System.getProperty("line.separator"); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java index 814d1334011..ecf516d7c98 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java @@ -21,6 +21,8 @@ import java.util.HashSet; import java.util.Set; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.server.resourcemanager.scheduler .ResourceScheduler; @@ -30,6 +32,8 @@ import com.codahale.metrics.Gauge; import com.codahale.metrics.MetricRegistry; +@Private +@Unstable public abstract class SchedulerMetrics { protected ResourceScheduler scheduler; protected Set trackedQueues; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerWrapper.java index 44629f5347f..524b8bf23e1 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerWrapper.java @@ -19,11 +19,15 @@ import java.util.Set; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import com.codahale.metrics.MetricRegistry; +@Private +@Unstable public interface SchedulerWrapper { public MetricRegistry getMetrics(); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java index efb573c9ec3..c936dd93180 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java @@ -25,9 +25,15 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.exceptions.YarnException; +@Private +@Unstable public class TaskRunner { + @Private + @Unstable public abstract static class Task implements Runnable, Delayed { private long start; private long end; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java index 70ab96d711c..f62f02471b9 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.yarn.sls.utils; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.tools.rumen.JobTraceReader; @@ -36,6 +38,8 @@ import java.util.List; import java.util.Iterator; +@Private +@Unstable public class SLSUtils { public static String[] getRackHostName(String hostname) { diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java index e6dd8467898..45301a18a54 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java @@ -30,6 +30,8 @@ import javax.servlet.http.HttpServletResponse; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event .SchedulerEventType; import org.mortbay.jetty.Handler; @@ -49,6 +51,8 @@ import com.codahale.metrics.MetricRegistry; import org.mortbay.jetty.handler.ResourceHandler; +@Private +@Unstable public class SLSWebApp extends HttpServlet { private static final long serialVersionUID = 1905162041950251407L; private transient Server server; diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 99b60b1aeda..69ec1b25a11 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -439,6 +439,8 @@ Release 2.5.0 - UNRELEASED YARN-2319. Made the MiniKdc instance start/close before/after the class of TestRMWebServicesDelegationTokens. (Wenwu Peng via zjshen) + YARN-2335. Annotate all hadoop-sls APIs as @Private. (Wei Yan via kasha) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES From cc0464f4fe793d9894776e062193e6609b7c1689 Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Fri, 25 Jul 2014 18:17:50 +0000 Subject: [PATCH 055/354] HDFS-6752. Avoid Address bind errors in TestDatanodeConfig#testMemlockLimit (vinayakumarb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613486 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java | 2 ++ 2 files changed, 5 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9fb5ba132e1..502771a1160 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -373,6 +373,9 @@ Release 2.6.0 - UNRELEASED HDFS-5919. FileJournalManager doesn't purge empty and corrupt inprogress edits files (vinayakumarb) + HDFS-6752. Avoid Address bind errors in TestDatanodeConfig#testMemlockLimit + (vinayakumarb) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java index cfa14cca41b..9cdb763f032 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java @@ -53,6 +53,8 @@ public static void setUp() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, 0); conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "localhost:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "localhost:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "localhost:0"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); } From b9e4be452349317619f8f7641e51f2493f499b29 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 25 Jul 2014 18:33:10 +0000 Subject: [PATCH 056/354] HDFS-6724. Decrypt EDEK before creating CryptoInputStream/CryptoOutputStream. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1613490 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 ++ .../org/apache/hadoop/hdfs/DFSClient.java | 44 ++++++++++++++++--- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 38 ++++++++++++++++ .../hdfs/server/namenode/FSNamesystem.java | 42 +++--------------- .../hadoop/hdfs/TestEncryptionZones.java | 4 ++ 5 files changed, 90 insertions(+), 41 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index e91ce99acb8..99e2c12adaf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -59,6 +59,9 @@ fs-encryption (Unreleased) HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in EZManager#createEncryptionZone. (clamb) + HDFS-6724. Decrypt EDEK before creating + CryptoInputStream/CryptoOutputStream. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index a6f02fbfab5..a88a8b10118 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; +import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension + .EncryptedKeyVersion; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; @@ -76,6 +79,7 @@ import java.net.SocketAddress; import java.net.URI; import java.net.UnknownHostException; +import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; @@ -100,6 +104,7 @@ import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.crypto.CryptoOutputStream; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStorageLocation; import org.apache.hadoop.fs.CacheFlag; @@ -256,7 +261,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, private final CryptoCodec codec; @VisibleForTesting List cipherSuites; - + @VisibleForTesting + KeyProviderCryptoExtension provider; /** * DFSClient configuration */ @@ -591,7 +597,12 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, this.codec = CryptoCodec.getInstance(conf); this.cipherSuites = Lists.newArrayListWithCapacity(1); cipherSuites.add(codec.getCipherSuite()); - + provider = DFSUtil.createKeyProviderCryptoExtension(conf); + if (provider == null) { + LOG.info("No KeyProvider found."); + } else { + LOG.info("Found KeyProvider: " + provider.toString()); + } int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); @@ -1291,6 +1302,25 @@ public BlockStorageLocation[] getBlockStorageLocations( return volumeBlockLocations; } + /** + * Decrypts a EDEK by consulting the KeyProvider. + */ + private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo + feInfo) throws IOException { + if (provider == null) { + throw new IOException("No KeyProvider is configured, cannot access" + + " an encrypted file"); + } + EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption( + feInfo.getEzKeyVersionName(), feInfo.getIV(), + feInfo.getEncryptedDataEncryptionKey()); + try { + return provider.decryptEncryptedKey(ekv); + } catch (GeneralSecurityException e) { + throw new IOException(e); + } + } + /** * Wraps the stream in a CryptoInputStream if the underlying file is * encrypted. @@ -1300,13 +1330,14 @@ public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis) final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo(); if (feInfo != null) { // File is encrypted, wrap the stream in a crypto stream. + KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo); final CryptoInputStream cryptoIn = new CryptoInputStream(dfsis, CryptoCodec.getInstance(conf, - feInfo.getCipherSuite()), feInfo.getEncryptedDataEncryptionKey(), + feInfo.getCipherSuite()), decrypted.getMaterial(), feInfo.getIV()); return new HdfsDataInputStream(cryptoIn); } else { - // No key/IV pair so no encryption. + // No FileEncryptionInfo so no encryption. return new HdfsDataInputStream(dfsis); } } @@ -1329,12 +1360,13 @@ public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos, final FileEncryptionInfo feInfo = dfsos.getFileEncryptionInfo(); if (feInfo != null) { // File is encrypted, wrap the stream in a crypto stream. + KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo); final CryptoOutputStream cryptoOut = new CryptoOutputStream(dfsos, codec, - feInfo.getEncryptedDataEncryptionKey(), feInfo.getIV(), startPos); + decrypted.getMaterial(), feInfo.getIV(), startPos); return new HdfsDataOutputStream(cryptoOut, statistics, startPos); } else { - // No key/IV present so no encryption. + // No FileEncryptionInfo present so no encryption. return new HdfsDataOutputStream(dfsos, statistics, startPos); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 5e83575d733..10c6ece3c54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -68,6 +68,9 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -1695,4 +1698,39 @@ public static void assertAllResultsEqual(Collection objects) } } } + + /** + * Creates a new KeyProviderCryptoExtension by wrapping the + * KeyProvider specified in the given Configuration. + * + * @param conf Configuration specifying a single, non-transient KeyProvider. + * @return new KeyProviderCryptoExtension, or null if no provider was found. + * @throws IOException if the KeyProvider is improperly specified in + * the Configuration + */ + public static KeyProviderCryptoExtension createKeyProviderCryptoExtension( + final Configuration conf) throws IOException { + final List providers = KeyProviderFactory.getProviders(conf); + if (providers == null || providers.size() == 0) { + return null; + } + if (providers.size() > 1) { + StringBuilder builder = new StringBuilder(); + builder.append("Found multiple KeyProviders but only one is permitted ["); + String prefix = " "; + for (KeyProvider kp: providers) { + builder.append(prefix + kp.toString()); + prefix = ", "; + } + builder.append("]"); + throw new IOException(builder.toString()); + } + KeyProviderCryptoExtension provider = KeyProviderCryptoExtension + .createKeyProviderCryptoExtension(providers.get(0)); + if (provider.isTransient()) { + throw new IOException("KeyProvider " + provider.toString() + + " was found but it is a transient provider."); + } + return provider; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 514ab072d90..60352faafb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -141,7 +141,6 @@ import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; @@ -766,7 +765,12 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { */ FSNamesystem(Configuration conf, FSImage fsImage, boolean ignoreRetryCache) throws IOException { - initializeKeyProvider(conf); + provider = DFSUtil.createKeyProviderCryptoExtension(conf); + if (provider == null) { + LOG.info("No KeyProvider found."); + } else { + LOG.info("Found KeyProvider: " + provider.toString()); + } providerOptions = KeyProvider.options(conf); this.codec = CryptoCodec.getInstance(conf); if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, @@ -926,40 +930,8 @@ void addCacheEntry(byte[] clientId, int callId) { } } - private void initializeKeyProvider(final Configuration conf) { - try { - final List providers = KeyProviderFactory.getProviders(conf); - if (providers == null) { - return; - } - - if (providers.size() == 0) { - LOG.info("No KeyProviders found."); - return; - } - - if (providers.size() > 1) { - final String err = - "Multiple KeyProviders found. Only one is permitted."; - LOG.error(err); - throw new RuntimeException(err); - } - provider = KeyProviderCryptoExtension - .createKeyProviderCryptoExtension(providers.get(0)); - if (provider.isTransient()) { - final String err = - "A KeyProvider was found but it is a transient provider."; - LOG.error(err); - throw new RuntimeException(err); - } - LOG.info("Found KeyProvider: " + provider.toString()); - } catch (IOException e) { - LOG.error("Exception while initializing KeyProvider", e); - } - } - @VisibleForTesting - public KeyProvider getProvider() { + public KeyProviderCryptoExtension getProvider() { return provider; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 421396bdf38..a53e47e6e85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -90,6 +90,10 @@ public void setup() throws IOException { fcWrapper = new FileContextTestWrapper( FileContext.getFileContext(cluster.getURI(), conf)); dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); + // Need to set the client's KeyProvider to the NN's for JKS, + // else the updates do not get flushed properly + fs.getClient().provider = cluster.getNameNode().getNamesystem() + .getProvider(); } @After From 79d214121b29104c14bf9d64c444bf75568cc4d4 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 25 Jul 2014 19:02:19 +0000 Subject: [PATCH 057/354] Fix up HDFS CHANGES.txt for 2.5.0 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613494 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 43 ++++++++++----------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 502771a1160..65f0665653a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -398,6 +398,15 @@ Release 2.5.0 - UNRELEASED HDFS-6406. Add capability for NFS gateway to reject connections from unprivileged ports. (atm) + HDFS-2006. Ability to support storing extended attributes per file. + + HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API. + (Akira Ajisaka via wheat9) + + HDFS-6278. Create HTML5-based UI for SNN. (wheat9) + + HDFS-6279. Create new index page for JN / DN. (wheat9) + IMPROVEMENTS HDFS-6007. Update documentation about short-circuit local reads (iwasakims @@ -415,9 +424,6 @@ Release 2.5.0 - UNRELEASED HDFS-6158. Clean up dead code for OfflineImageViewer. (wheat9) - HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API. - (Akira Ajisaka via wheat9) - HDFS-6164. Remove lsr in OfflineImageViewer. (wheat9) HDFS-6167. Relocate the non-public API classes in the hdfs.client package. @@ -445,10 +451,6 @@ Release 2.5.0 - UNRELEASED HDFS-6265. Prepare HDFS codebase for JUnit 4.11. (cnauroth) - HDFS-6278. Create HTML5-based UI for SNN. (wheat9) - - HDFS-6279. Create new index page for JN / DN. (wheat9) - HDFS-5693. Few NN metrics data points were collected via JMX when NN is under heavy load. (Ming Ma via jing9) @@ -820,9 +822,6 @@ Release 2.5.0 - UNRELEASED HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs. (Yi Liu via umamahesh) - HDFS-6375. Listing extended attributes with the search permission. - (Charles Lamb via wang) - HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml (decstery via cmccabe) @@ -917,6 +916,18 @@ Release 2.5.0 - UNRELEASED HDFS-6696. Name node cannot start if the path of a file under construction contains ".snapshot". (wang) + HDFS-6312. WebHdfs HA failover is broken on secure clusters. + (daryn via tucu) + + HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes + from the tree and deleting them from the inode map (kihwal via cmccabe) + + HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal + via cmccabe) + + HDFS-6723. New NN webUI no longer displays decommissioned state for dead node. + (Ming Ma via wheat9) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) @@ -986,18 +997,6 @@ Release 2.5.0 - UNRELEASED HDFS-6492. Support create-time xattrs and atomically setting multiple xattrs. (wang) - HDFS-6312. WebHdfs HA failover is broken on secure clusters. - (daryn via tucu) - - HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes - from the tree and deleting them from the inode map (kihwal via cmccabe) - - HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal - via cmccabe) - - HDFS-6723. New NN webUI no longer displays decommissioned state for dead node. - (Ming Ma via wheat9) - Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES From 10286e98ad3e07607b5a368b8f9b75ae99db1062 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 25 Jul 2014 19:06:05 +0000 Subject: [PATCH 058/354] Fix up Common CHANGES.txt for 2.5.0 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613498 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 54 +++++++++---------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 55914f83197..419d45c6831 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -503,6 +503,9 @@ Release 2.5.0 - UNRELEASED HADOOP-8943. Support multiple group mapping providers. (Kai Zheng via brandonli) + HADOOP-9361 Strictly define the expected behavior of filesystem APIs and + write tests to verify compliance (stevel) + IMPROVEMENTS HADOOP-10451. Remove unused field and imports from SaslRpcServer. @@ -597,9 +600,6 @@ Release 2.5.0 - UNRELEASED HADOOP-10747. Support configurable retries on SASL connection failures in RPC client. (cnauroth) - HADOOP-10674. Improve PureJavaCrc32 performance and use java.util.zip.CRC32 - for Java 7 and above. (szetszwo) - HADOOP-10754. Reenable several HA ZooKeeper-related tests on Windows. (cnauroth) @@ -611,9 +611,6 @@ Release 2.5.0 - UNRELEASED HADOOP-10767. Clean up unused code in Ls shell command. (cnauroth) - HADOOP-9361 Strictly define the expected behavior of filesystem APIs and - write tests to verify compliance (stevel) - HADOOP-9651 Filesystems to throw FileAlreadyExistsException in createFile(path, overwrite=false) when the file exists (stevel) @@ -626,6 +623,9 @@ Release 2.5.0 - UNRELEASED OPTIMIZATIONS + HADOOP-10674. Improve PureJavaCrc32 performance and use java.util.zip.CRC32 + for Java 7 and above. (szetszwo) + BUG FIXES HADOOP-10378. Typo in help printed by hdfs dfs -help. @@ -780,27 +780,6 @@ Release 2.5.0 - UNRELEASED HADOOP-10801 dead link in site.xml (Akira AJISAKA via stevel) - BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS - - HADOOP-10520. Extended attributes definition and FileSystem APIs for - extended attributes. (Yi Liu via wang) - - HADOOP-10546. Javadoc and other small fixes for extended attributes in - hadoop-common. (Charles Lamb via wang) - - HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang) - - HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh) - - HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh) - - HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh) - - HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh) - - HADOOP-10561. Copy command with preserve option should handle Xattrs. - (Yi Liu via cnauroth) - HADOOP-10590. ServiceAuthorizationManager is not threadsafe. (Benoy Antony via vinayakumarb) HADOOP-10711. Cleanup some extra dependencies from hadoop-auth. (rkanter via tucu) @@ -825,6 +804,27 @@ Release 2.5.0 - UNRELEASED HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka via Arpit Agarwal) + BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS + + HADOOP-10520. Extended attributes definition and FileSystem APIs for + extended attributes. (Yi Liu via wang) + + HADOOP-10546. Javadoc and other small fixes for extended attributes in + hadoop-common. (Charles Lamb via wang) + + HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang) + + HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh) + + HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh) + + HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh) + + HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh) + + HADOOP-10561. Copy command with preserve option should handle Xattrs. + (Yi Liu via cnauroth) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES From d4fec3493351c619a0278929ae2d5c8cd67cbfbe Mon Sep 17 00:00:00 2001 From: Jian He Date: Fri, 25 Jul 2014 20:42:37 +0000 Subject: [PATCH 059/354] YARN-2211. Persist AMRMToken master key in RMStateStore for RM recovery. Contributed by Xuan Gong git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613515 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../yarn/client/ProtocolHATestBase.java | 1 + .../TestApplicationMasterServiceOnHA.java | 6 +- .../pom.xml | 31 +++++ .../RMSecretManagerService.java | 6 +- .../resourcemanager/ResourceManager.java | 3 + .../recovery/FileSystemRMStateStore.java | 55 +++++++- .../recovery/MemoryRMStateStore.java | 15 +++ .../recovery/NullRMStateStore.java | 7 + .../recovery/RMStateStore.java | 20 ++- .../recovery/ZKRMStateStore.java | 45 ++++++- .../records/AMRMTokenSecretManagerState.java | 76 +++++++++++ .../pb/AMRMTokenSecretManagerStatePBImpl.java | 126 ++++++++++++++++++ .../security/AMRMTokenSecretManager.java | 48 ++++++- ...yarn_server_resourcemanager_recovery.proto | 30 +++++ .../server/resourcemanager/TestRMRestart.java | 9 +- .../recovery/RMStateStoreTestBase.java | 71 +++++++++- .../recovery/TestFSRMStateStore.java | 2 +- .../recovery/TestZKRMStateStore.java | 1 + .../rmapp/TestRMAppTransitions.java | 2 +- .../attempt/TestRMAppAttemptTransitions.java | 3 +- .../scheduler/capacity/TestUtils.java | 5 +- .../security/TestAMRMTokens.java | 4 +- 23 files changed, 535 insertions(+), 34 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/AMRMTokenSecretManagerState.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/AMRMTokenSecretManagerStatePBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 69ec1b25a11..e6fbea9350e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -68,6 +68,9 @@ Release 2.6.0 - UNRELEASED YARN-2214. FairScheduler: preemptContainerPreCheck() in FSParentQueue delays convergence towards fairness. (Ashwin Shankar via kasha) + YARN-2211. Persist AMRMToken master key in RMStateStore for RM recovery. + (Xuan Gong via jianhe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index 15bfa28d20d..72cb1b1684c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -267,6 +267,7 @@ public void run() { protected void startHACluster(int numOfNMs, boolean overrideClientRMService, boolean overrideRTS, boolean overrideApplicationMasterService) throws Exception { + conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true); conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); cluster = new MiniYARNClusterForHATesting(TestRMFailover.class.getName(), 2, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceOnHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceOnHA.java index 4771ccba9db..0b42ac3c6b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceOnHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceOnHA.java @@ -54,11 +54,9 @@ public void initiate() throws Exception { amClient = ClientRMProxy .createRMProxy(this.conf, ApplicationMasterProtocol.class); - AMRMTokenIdentifier id = - new AMRMTokenIdentifier(attemptId); Token appToken = - new Token(id, this.cluster.getResourceManager() - .getRMContext().getAMRMTokenSecretManager()); + this.cluster.getResourceManager().getRMContext() + .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId); appToken.setService(new Text("appToken service")); UserGroupInformation.setLoginUser(UserGroupInformation .createRemoteUser(UserGroupInformation.getCurrentUser() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index c2a94ead159..0f89bbe38a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -244,6 +244,37 @@ + + + org.apache.hadoop + hadoop-maven-plugins + + + compile-protoc + generate-sources + + protoc + + + ${protobuf.version} + ${protoc.path} + + ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto + ${basedir}/../../hadoop-yarn-api/src/main/proto + ${basedir}/../hadoop-yarn-server-common/src/main/proto + ${basedir}/src/main/proto + + + ${basedir}/src/main/proto + + yarn_server_resourcemanager_recovery.proto + + + ${project.build.directory}/generated-sources/java + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java index 9fdde6589a3..d0d7d16a276 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMSecretManagerService.java @@ -60,7 +60,7 @@ public RMSecretManagerService(Configuration conf, RMContextImpl rmContext) { clientToAMSecretManager = createClientToAMTokenSecretManager(); rmContext.setClientToAMTokenSecretManager(clientToAMSecretManager); - amRmTokenSecretManager = createAMRMTokenSecretManager(conf); + amRmTokenSecretManager = createAMRMTokenSecretManager(conf, this.rmContext); rmContext.setAMRMTokenSecretManager(amRmTokenSecretManager); rmDTSecretManager = @@ -115,8 +115,8 @@ protected NMTokenSecretManagerInRM createNMTokenSecretManager( } protected AMRMTokenSecretManager createAMRMTokenSecretManager( - Configuration conf) { - return new AMRMTokenSecretManager(conf); + Configuration conf, RMContext rmContext) { + return new AMRMTokenSecretManager(conf, rmContext); } protected ClientToAMTokenSecretManagerInRM createClientToAMTokenSecretManager() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 409ba4893b9..7dceda249da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -1026,6 +1026,9 @@ public void recover(RMState state) throws Exception { // recover RMdelegationTokenSecretManager rmContext.getRMDelegationTokenSecretManager().recover(state); + // recover AMRMTokenSecretManager + rmContext.getAMRMTokenSecretManager().recover(state); + // recover applications rmAppManager.recover(state); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index 0e605a9b07d..243c7a19912 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -22,6 +22,7 @@ import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -43,16 +44,18 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; - import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Epoch; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.AMRMTokenSecretManagerStatePBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl; @@ -76,6 +79,8 @@ public class FileSystemRMStateStore extends RMStateStore { protected static final String ROOT_DIR_NAME = "FSRMStateRoot"; protected static final RMStateVersion CURRENT_VERSION_INFO = RMStateVersion .newInstance(1, 1); + protected static final String AMRMTOKEN_SECRET_MANAGER_NODE = + "AMRMTokenSecretManagerNode"; protected FileSystem fs; @@ -89,6 +94,7 @@ public class FileSystemRMStateStore extends RMStateStore { @VisibleForTesting Path fsWorkingPath; + Path amrmTokenSecretManagerRoot; @Override public synchronized void initInternal(Configuration conf) throws Exception{ @@ -96,6 +102,8 @@ public synchronized void initInternal(Configuration conf) rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); rmDTSecretManagerRoot = new Path(rootDirPath, RM_DT_SECRET_MANAGER_ROOT); rmAppRoot = new Path(rootDirPath, RM_APP_ROOT); + amrmTokenSecretManagerRoot = + new Path(rootDirPath, AMRMTOKEN_SECRET_MANAGER_ROOT); } @Override @@ -113,6 +121,7 @@ protected synchronized void startInternal() throws Exception { fs = fsWorkingPath.getFileSystem(conf); fs.mkdirs(rmDTSecretManagerRoot); fs.mkdirs(rmAppRoot); + fs.mkdirs(amrmTokenSecretManagerRoot); } @Override @@ -180,9 +189,32 @@ public synchronized RMState loadState() throws Exception { loadRMDTSecretManagerState(rmState); // recover RM applications loadRMAppState(rmState); + // recover AMRMTokenSecretManager + loadAMRMTokenSecretManagerState(rmState); return rmState; } + private void loadAMRMTokenSecretManagerState(RMState rmState) + throws Exception { + checkAndResumeUpdateOperation(amrmTokenSecretManagerRoot); + Path amrmTokenSecretManagerStateDataDir = + new Path(amrmTokenSecretManagerRoot, AMRMTOKEN_SECRET_MANAGER_NODE); + FileStatus status; + try { + status = fs.getFileStatus(amrmTokenSecretManagerStateDataDir); + assert status.isFile(); + } catch (FileNotFoundException ex) { + return; + } + byte[] data = readFile(amrmTokenSecretManagerStateDataDir, status.getLen()); + AMRMTokenSecretManagerStatePBImpl stateData = + new AMRMTokenSecretManagerStatePBImpl( + AMRMTokenSecretManagerStateProto.parseFrom(data)); + rmState.amrmTokenSecretManagerState = + AMRMTokenSecretManagerState.newInstance( + stateData.getCurrentMasterKey(), stateData.getNextMasterKey()); + } + private void loadRMAppState(RMState rmState) throws Exception { try { List attempts = @@ -597,4 +629,25 @@ Path getNodePath(Path root, String nodeName) { return new Path(root, nodeName); } + @Override + public synchronized void storeOrUpdateAMRMTokenSecretManagerState( + AMRMTokenSecretManagerState amrmTokenSecretManagerState, + boolean isUpdate){ + Path nodeCreatePath = + getNodePath(amrmTokenSecretManagerRoot, AMRMTOKEN_SECRET_MANAGER_NODE); + AMRMTokenSecretManagerState data = + AMRMTokenSecretManagerState.newInstance(amrmTokenSecretManagerState); + byte[] stateData = data.getProto().toByteArray(); + try { + if (isUpdate) { + updateFile(nodeCreatePath, stateData); + } else { + writeFile(nodeCreatePath, stateData); + } + } catch (Exception ex) { + LOG.info("Error storing info for AMRMTokenSecretManager", ex); + notifyStoreOperationFailed(ex); + } + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index 05cbb09630f..369f89a545e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; @@ -72,6 +73,10 @@ public synchronized RMState loadState() throws Exception { state.rmSecretManagerState.getTokenState()); returnState.rmSecretManagerState.dtSequenceNumber = state.rmSecretManagerState.dtSequenceNumber; + returnState.amrmTokenSecretManagerState = + state.amrmTokenSecretManagerState == null ? null + : AMRMTokenSecretManagerState + .newInstance(state.amrmTokenSecretManagerState); return returnState; } @@ -267,6 +272,16 @@ protected RMStateVersion getCurrentVersion() { return null; } + @Override + public void storeOrUpdateAMRMTokenSecretManagerState( + AMRMTokenSecretManagerState amrmTokenSecretManagerState, + boolean isUpdate) { + if (amrmTokenSecretManagerState != null) { + state.amrmTokenSecretManagerState = AMRMTokenSecretManagerState + .newInstance(amrmTokenSecretManagerState); + } + } + @Override public void deleteStore() throws Exception { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java index 690f0bef94d..ea7087176c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java @@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; @@ -138,6 +139,12 @@ protected RMStateVersion getCurrentVersion() { return null; } + @Override + public void storeOrUpdateAMRMTokenSecretManagerState( + AMRMTokenSecretManagerState state, boolean isUpdate) { + //DO Nothing + } + @Override public void deleteStore() throws Exception { // Do nothing diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index 5b75b429697..e2c4e7e47fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -45,16 +45,14 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEvent; import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEventType; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNewSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; @@ -85,6 +83,8 @@ public abstract class RMStateStore extends AbstractService { protected static final String DELEGATION_TOKEN_PREFIX = "RMDelegationToken_"; protected static final String DELEGATION_TOKEN_SEQUENCE_NUMBER_PREFIX = "RMDTSequenceNumber_"; + protected static final String AMRMTOKEN_SECRET_MANAGER_ROOT = + "AMRMTokenSecretManagerRoot"; protected static final String VERSION_NODE = "RMVersionNode"; protected static final String EPOCH_NODE = "EpochNode"; @@ -412,6 +412,8 @@ public static class RMState { RMDTSecretManagerState rmSecretManagerState = new RMDTSecretManagerState(); + AMRMTokenSecretManagerState amrmTokenSecretManagerState = null; + public Map getApplicationState() { return appState; } @@ -419,6 +421,10 @@ public Map getApplicationState() { public RMDTSecretManagerState getRMDTSecretManagerState() { return rmSecretManagerState; } + + public AMRMTokenSecretManagerState getAMRMTokenSecretManagerState() { + return amrmTokenSecretManagerState; + } } private Dispatcher rmDispatcher; @@ -713,6 +719,14 @@ public synchronized void removeRMDTMasterKey(DelegationKey delegationKey) { protected abstract void removeRMDTMasterKeyState(DelegationKey delegationKey) throws Exception; + /** + * Blocking API Derived classes must implement this method to store or update + * the state of AMRMToken Master Key + */ + public abstract void storeOrUpdateAMRMTokenSecretManagerState( + AMRMTokenSecretManagerState amrmTokenSecretManagerState, + boolean isUpdate); + /** * Non-blocking API * ResourceManager services call this to remove an application from the state diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 68b4632d3b5..5644ad9e34a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -44,18 +44,19 @@ import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.RMZKUtils; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; - import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Epoch; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.AMRMTokenSecretManagerStatePBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl; @@ -128,6 +129,9 @@ public class ZKRMStateStore extends RMStateStore { * | |----- Key_1 * | |----- Key_2 * .... + * |--- AMRMTOKEN_SECRET_MANAGER_ROOT + * |----- currentMasterKey + * |----- nextMasterKey * */ private String zkRootNodePath; @@ -136,6 +140,7 @@ public class ZKRMStateStore extends RMStateStore { private String dtMasterKeysRootPath; private String delegationTokensRootPath; private String dtSequenceNumberPath; + private String amrmTokenSecretManagerRoot; @VisibleForTesting protected String znodeWorkingPath; @@ -255,6 +260,8 @@ public synchronized void initInternal(Configuration conf) throws Exception { RM_DELEGATION_TOKENS_ROOT_ZNODE_NAME); dtSequenceNumberPath = getNodePath(rmDTSecretManagerRoot, RM_DT_SEQUENTIAL_NUMBER_ZNODE_NAME); + amrmTokenSecretManagerRoot = + getNodePath(zkRootNodePath, AMRMTOKEN_SECRET_MANAGER_ROOT); } @Override @@ -275,6 +282,7 @@ public synchronized void startInternal() throws Exception { createRootDir(dtMasterKeysRootPath); createRootDir(delegationTokensRootPath); createRootDir(dtSequenceNumberPath); + createRootDir(amrmTokenSecretManagerRoot); } private void createRootDir(final String rootPath) throws Exception { @@ -427,9 +435,27 @@ public synchronized RMState loadState() throws Exception { loadRMDTSecretManagerState(rmState); // recover RM applications loadRMAppState(rmState); + // recover AMRMTokenSecretManager + loadAMRMTokenSecretManagerState(rmState); return rmState; } + private void loadAMRMTokenSecretManagerState(RMState rmState) + throws Exception { + byte[] data = getDataWithRetries(amrmTokenSecretManagerRoot, true); + if (data == null) { + LOG.warn("There is no data saved"); + return; + } + AMRMTokenSecretManagerStatePBImpl stateData = + new AMRMTokenSecretManagerStatePBImpl( + AMRMTokenSecretManagerStateProto.parseFrom(data)); + rmState.amrmTokenSecretManagerState = + AMRMTokenSecretManagerState.newInstance( + stateData.getCurrentMasterKey(), stateData.getNextMasterKey()); + + } + private synchronized void loadRMDTSecretManagerState(RMState rmState) throws Exception { loadRMDelegationKeyState(rmState); @@ -1112,4 +1138,19 @@ protected synchronized ZooKeeper getNewZooKeeper() return zk; } + @Override + public synchronized void storeOrUpdateAMRMTokenSecretManagerState( + AMRMTokenSecretManagerState amrmTokenSecretManagerState, + boolean isUpdate) { + AMRMTokenSecretManagerState data = + AMRMTokenSecretManagerState.newInstance(amrmTokenSecretManagerState); + byte[] stateData = data.getProto().toByteArray(); + try { + setDataWithRetries(amrmTokenSecretManagerRoot, stateData, -1); + } catch (Exception ex) { + LOG.info("Error storing info for AMRMTokenSecretManager", ex); + notifyStoreOperationFailed(ex); + } + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/AMRMTokenSecretManagerState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/AMRMTokenSecretManagerState.java new file mode 100644 index 00000000000..89b4ff0fc7a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/AMRMTokenSecretManagerState.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.recovery.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; +import org.apache.hadoop.yarn.util.Records; + +/** + * Contains all the state data that needs to be stored persistently + * for {@link AMRMTokenSecretManager} + */ +@Public +@Unstable +public abstract class AMRMTokenSecretManagerState { + public static AMRMTokenSecretManagerState newInstance( + MasterKey currentMasterKey, MasterKey nextMasterKey) { + AMRMTokenSecretManagerState data = + Records.newRecord(AMRMTokenSecretManagerState.class); + data.setCurrentMasterKey(currentMasterKey); + data.setNextMasterKey(nextMasterKey); + return data; + } + + public static AMRMTokenSecretManagerState newInstance( + AMRMTokenSecretManagerState state) { + AMRMTokenSecretManagerState data = + Records.newRecord(AMRMTokenSecretManagerState.class); + data.setCurrentMasterKey(state.getCurrentMasterKey()); + data.setNextMasterKey(state.getNextMasterKey()); + return data; + } + + /** + * {@link AMRMTokenSecretManager} current Master key + */ + @Public + @Unstable + public abstract MasterKey getCurrentMasterKey(); + + @Public + @Unstable + public abstract void setCurrentMasterKey(MasterKey currentMasterKey); + + /** + * {@link AMRMTokenSecretManager} next Master key + */ + @Public + @Unstable + public abstract MasterKey getNextMasterKey(); + + @Public + @Unstable + public abstract void setNextMasterKey(MasterKey nextMasterKey); + + public abstract AMRMTokenSecretManagerStateProto getProto(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/AMRMTokenSecretManagerStatePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/AMRMTokenSecretManagerStatePBImpl.java new file mode 100644 index 00000000000..6ce0c546ada --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/AMRMTokenSecretManagerStatePBImpl.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; + +public class AMRMTokenSecretManagerStatePBImpl extends AMRMTokenSecretManagerState{ + AMRMTokenSecretManagerStateProto proto = + AMRMTokenSecretManagerStateProto.getDefaultInstance(); + AMRMTokenSecretManagerStateProto.Builder builder = null; + boolean viaProto = false; + + private MasterKey currentMasterKey = null; + private MasterKey nextMasterKey = null; + + public AMRMTokenSecretManagerStatePBImpl() { + builder = AMRMTokenSecretManagerStateProto.newBuilder(); + } + + public AMRMTokenSecretManagerStatePBImpl(AMRMTokenSecretManagerStateProto proto) { + this.proto = proto; + viaProto = true; + } + + public AMRMTokenSecretManagerStateProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToBuilder() { + if (this.currentMasterKey != null) { + builder.setCurrentMasterKey(convertToProtoFormat(this.currentMasterKey)); + } + if (this.nextMasterKey != null) { + builder.setNextMasterKey(convertToProtoFormat(this.nextMasterKey)); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = AMRMTokenSecretManagerStateProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public MasterKey getCurrentMasterKey() { + AMRMTokenSecretManagerStateProtoOrBuilder p = viaProto ? proto : builder; + if (this.currentMasterKey != null) { + return this.currentMasterKey; + } + if (!p.hasCurrentMasterKey()) { + return null; + } + this.currentMasterKey = convertFromProtoFormat(p.getCurrentMasterKey()); + return this.currentMasterKey; + } + + @Override + public void setCurrentMasterKey(MasterKey currentMasterKey) { + maybeInitBuilder(); + if (currentMasterKey == null) + builder.clearCurrentMasterKey(); + this.currentMasterKey = currentMasterKey; + } + + @Override + public MasterKey getNextMasterKey() { + AMRMTokenSecretManagerStateProtoOrBuilder p = viaProto ? proto : builder; + if (this.nextMasterKey != null) { + return this.nextMasterKey; + } + if (!p.hasNextMasterKey()) { + return null; + } + this.nextMasterKey = convertFromProtoFormat(p.getNextMasterKey()); + return this.nextMasterKey; + } + + @Override + public void setNextMasterKey(MasterKey nextMasterKey) { + maybeInitBuilder(); + if (nextMasterKey == null) + builder.clearNextMasterKey(); + this.nextMasterKey = nextMasterKey; + } + + private MasterKeyProto convertToProtoFormat(MasterKey t) { + return ((MasterKeyPBImpl) t).getProto(); + } + + private MasterKeyPBImpl convertFromProtoFormat(MasterKeyProto p) { + return new MasterKeyPBImpl(p); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java index c498b529bf6..a3132bc19e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java @@ -38,6 +38,10 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.security.MasterKeyData; import com.google.common.annotations.VisibleForTesting; @@ -66,6 +70,7 @@ public class AMRMTokenSecretManager extends private final Timer timer; private final long rollingInterval; private final long activationDelay; + private RMContext rmContext; private final Set appAttemptSet = new HashSet(); @@ -73,7 +78,8 @@ public class AMRMTokenSecretManager extends /** * Create an {@link AMRMTokenSecretManager} */ - public AMRMTokenSecretManager(Configuration conf) { + public AMRMTokenSecretManager(Configuration conf, RMContext rmContext) { + this.rmContext = rmContext; this.timer = new Timer(); this.rollingInterval = conf @@ -98,6 +104,11 @@ public AMRMTokenSecretManager(Configuration conf) { public void start() { if (this.currentMasterKey == null) { this.currentMasterKey = createNewMasterKey(); + AMRMTokenSecretManagerState state = + AMRMTokenSecretManagerState.newInstance( + this.currentMasterKey.getMasterKey(), null); + rmContext.getStateStore().storeOrUpdateAMRMTokenSecretManagerState(state, + false); } this.timer.scheduleAtFixedRate(new MasterKeyRoller(), rollingInterval, rollingInterval); @@ -130,6 +141,12 @@ void rollMasterKey() { try { LOG.info("Rolling master-key for amrm-tokens"); this.nextMasterKey = createNewMasterKey(); + AMRMTokenSecretManagerState state = + AMRMTokenSecretManagerState.newInstance( + this.currentMasterKey.getMasterKey(), + this.nextMasterKey.getMasterKey()); + rmContext.getStateStore().storeOrUpdateAMRMTokenSecretManagerState(state, + true); this.timer.schedule(new NextKeyActivator(), this.activationDelay); } finally { this.writeLock.unlock(); @@ -225,8 +242,8 @@ public byte[] retrievePassword(AMRMTokenIdentifier identifier) LOG.debug("Trying to retrieve password for " + applicationAttemptId); } if (!appAttemptSet.contains(applicationAttemptId)) { - throw new InvalidToken("Password not found for ApplicationAttempt " - + applicationAttemptId); + throw new InvalidToken(applicationAttemptId + + " not found in AMRMTokenSecretManager."); } if (identifier.getKeyId() == this.currentMasterKey.getMasterKey() .getKeyId()) { @@ -238,9 +255,7 @@ public byte[] retrievePassword(AMRMTokenIdentifier identifier) return createPassword(identifier.getBytes(), this.nextMasterKey.getSecretKey()); } - throw new InvalidToken("Given AMRMToken for application : " - + applicationAttemptId.toString() - + " seems to have been generated illegally."); + throw new InvalidToken("Invalid AMRMToken from " + applicationAttemptId); } finally { this.readLock.unlock(); } @@ -291,4 +306,25 @@ protected byte[] createPassword(AMRMTokenIdentifier identifier) { this.readLock.unlock(); } } + + public void recover(RMState state) { + if (state.getAMRMTokenSecretManagerState() != null) { + // recover the current master key + MasterKey currentKey = + state.getAMRMTokenSecretManagerState().getCurrentMasterKey(); + this.currentMasterKey = + new MasterKeyData(currentKey, createSecretKey(currentKey.getBytes() + .array())); + + // recover the next master key if not null + MasterKey nextKey = + state.getAMRMTokenSecretManagerState().getNextMasterKey(); + if (nextKey != null) { + this.nextMasterKey = + new MasterKeyData(nextKey, createSecretKey(nextKey.getBytes() + .array())); + this.timer.schedule(new NextKeyActivator(), this.activationDelay); + } + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto new file mode 100644 index 00000000000..ae56b9fd346 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.yarn.proto"; +option java_outer_classname = "YarnServerResourceManagerRecoveryProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +package hadoop.yarn; + +import "yarn_server_common_protos.proto"; + +message AMRMTokenSecretManagerStateProto { + optional MasterKeyProto current_master_key = 1; + optional MasterKeyProto next_master_key = 2; +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index 8966af7e2a5..dc3e9f18178 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -1250,11 +1250,10 @@ public void testAppAttemptTokensRestoredOnRMRestart() throws Exception { .getEncoded()); // assert AMRMTokenSecretManager also knows about the AMRMToken password - // TODO: fix this on YARN-2211 -// Token amrmToken = loadedAttempt1.getAMRMToken(); -// Assert.assertArrayEquals(amrmToken.getPassword(), -// rm2.getRMContext().getAMRMTokenSecretManager().retrievePassword( -// amrmToken.decodeIdentifier())); + Token amrmToken = loadedAttempt1.getAMRMToken(); + Assert.assertArrayEquals(amrmToken.getPassword(), + rm2.getRMContext().getAMRMTokenSecretManager().retrievePassword( + amrmToken.decodeIdentifier())); rm1.stop(); rm2.stop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java index 04f034818c0..a61f23f5a71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java @@ -55,10 +55,12 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMDTSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; @@ -176,8 +178,12 @@ void testRMAppStateStore(RMStateStoreHelper stateStoreHelper) TestDispatcher dispatcher = new TestDispatcher(); store.setRMDispatcher(dispatcher); - AMRMTokenSecretManager appTokenMgr = spy( - new AMRMTokenSecretManager(conf)); + RMContext rmContext = mock(RMContext.class); + when(rmContext.getStateStore()).thenReturn(store); + + AMRMTokenSecretManager appTokenMgr = + spy(new AMRMTokenSecretManager(conf, rmContext)); + MasterKeyData masterKeyData = appTokenMgr.createNewMasterKey(); when(appTokenMgr.getMasterKey()).thenReturn(masterKeyData); @@ -576,4 +582,65 @@ protected void modifyRMDelegationTokenState() throws Exception { } + public void testAMRMTokenSecretManagerStateStore( + RMStateStoreHelper stateStoreHelper) throws Exception { + System.out.println("Start testing"); + RMStateStore store = stateStoreHelper.getRMStateStore(); + TestDispatcher dispatcher = new TestDispatcher(); + store.setRMDispatcher(dispatcher); + + RMContext rmContext = mock(RMContext.class); + when(rmContext.getStateStore()).thenReturn(store); + Configuration conf = new YarnConfiguration(); + AMRMTokenSecretManager appTokenMgr = + new AMRMTokenSecretManager(conf, rmContext); + + //create and save the first masterkey + MasterKeyData firstMasterKeyData = appTokenMgr.createNewMasterKey(); + + AMRMTokenSecretManagerState state1 = + AMRMTokenSecretManagerState.newInstance( + firstMasterKeyData.getMasterKey(), null); + rmContext.getStateStore().storeOrUpdateAMRMTokenSecretManagerState(state1, + false); + + // load state + store = stateStoreHelper.getRMStateStore(); + store.setRMDispatcher(dispatcher); + RMState state = store.loadState(); + Assert.assertNotNull(state.getAMRMTokenSecretManagerState()); + Assert.assertEquals(firstMasterKeyData.getMasterKey(), state + .getAMRMTokenSecretManagerState().getCurrentMasterKey()); + Assert.assertNull(state + .getAMRMTokenSecretManagerState().getNextMasterKey()); + + //create and save the second masterkey + MasterKeyData secondMasterKeyData = appTokenMgr.createNewMasterKey(); + AMRMTokenSecretManagerState state2 = + AMRMTokenSecretManagerState + .newInstance(firstMasterKeyData.getMasterKey(), + secondMasterKeyData.getMasterKey()); + rmContext.getStateStore().storeOrUpdateAMRMTokenSecretManagerState(state2, + true); + + // load state + store = stateStoreHelper.getRMStateStore(); + store.setRMDispatcher(dispatcher); + RMState state_2 = store.loadState(); + Assert.assertNotNull(state_2.getAMRMTokenSecretManagerState()); + Assert.assertEquals(firstMasterKeyData.getMasterKey(), state_2 + .getAMRMTokenSecretManagerState().getCurrentMasterKey()); + Assert.assertEquals(secondMasterKeyData.getMasterKey(), state_2 + .getAMRMTokenSecretManagerState().getNextMasterKey()); + + // re-create the masterKeyData based on the recovered masterkey + // should have the same secretKey + appTokenMgr.recover(state_2); + Assert.assertEquals(appTokenMgr.getCurrnetMasterKeyData().getSecretKey(), + firstMasterKeyData.getSecretKey()); + Assert.assertEquals(appTokenMgr.getNextMasterKeyData().getSecretKey(), + secondMasterKeyData.getSecretKey()); + + store.close(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index ea90c3d76f3..f5b3e8a8a67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -38,7 +38,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; @@ -161,6 +160,7 @@ public void testFSRMStateStore() throws Exception { testEpoch(fsTester); testAppDeletion(fsTester); testDeleteStore(fsTester); + testAMRMTokenSecretManagerStateStore(fsTester); } finally { cluster.shutdown(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index e56f7757967..1dee533ac05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -123,6 +123,7 @@ public void testZKRMStateStoreRealZK() throws Exception { testEpoch(zkTester); testAppDeletion(zkTester); testDeleteStore(zkTester); + testAMRMTokenSecretManagerStateStore(zkTester); } private Configuration createHARMConf( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 0fd3c3c5c99..9ea51b120fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -193,7 +193,7 @@ public void setUp() throws Exception { this.rmContext = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, - null, new AMRMTokenSecretManager(conf), + null, new AMRMTokenSecretManager(conf, this.rmContext), new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), new ClientToAMTokenSecretManagerInRM(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index 1de35fcfa98..01a6973e69c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -134,7 +134,8 @@ public class TestRMAppAttemptTransitions { private RMAppAttempt applicationAttempt; private Configuration conf = new Configuration(); - private AMRMTokenSecretManager amRMTokenManager = spy(new AMRMTokenSecretManager(conf)); + private AMRMTokenSecretManager amRMTokenManager = + spy(new AMRMTokenSecretManager(conf, rmContext)); private ClientToAMTokenSecretManagerInRM clientToAMTokenManager = spy(new ClientToAMTokenSecretManagerInRM()); private NMTokenSecretManagerInRM nmTokenManager = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index db28dcaa558..e5486617bb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -86,13 +86,12 @@ public EventHandler getEventHandler() { Configuration conf = new Configuration(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); - RMContext rmContext = + RMContextImpl rmContext = new RMContextImpl(nullDispatcher, cae, null, null, null, - new AMRMTokenSecretManager(conf), + new AMRMTokenSecretManager(conf, null), new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), new ClientToAMTokenSecretManagerInRM(), writer); - return rmContext; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java index b11aadd7912..14385c4c69b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java @@ -184,8 +184,8 @@ public void testTokenExpiry() throws Exception { // The exception will still have the earlier appAttemptId as it picks it // up from the token. Assert.assertTrue(t.getCause().getMessage().contains( - "Password not found for ApplicationAttempt " + - applicationAttemptId.toString())); + applicationAttemptId.toString() + + " not found in AMRMTokenSecretManager.")); } } finally { From 57d8f829d930091d82ec50b2ff7d327d0301e9d6 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 25 Jul 2014 22:12:37 +0000 Subject: [PATCH 060/354] HDFS-6755. There is an unnecessary sleep in the code path where DFSOutputStream#close gives up its attempt to contact the namenode (mitdesai21 via cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613522 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++++ .../src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 65f0665653a..2b6f1c6750f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -317,6 +317,10 @@ Release 2.6.0 - UNRELEASED HDFS-6701. Make seed optional in NetworkTopology#sortByDistance. (Ashwin Shankar via wang) + HDFS-6755. There is an unnecessary sleep in the code path where + DFSOutputStream#close gives up its attempt to contact the namenode + (mitdesai21 via cmccabe) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index a7cb92fa269..debf83ca1ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -2136,12 +2136,12 @@ private void completeFile(ExtendedBlock last) throws IOException { throw new IOException(msg); } try { - Thread.sleep(localTimeout); if (retries == 0) { throw new IOException("Unable to close file because the last block" + " does not have enough number of replicas."); } retries--; + Thread.sleep(localTimeout); localTimeout *= 2; if (Time.now() - localstart > 5000) { DFSClient.LOG.info("Could not complete " + src + " retrying..."); From e85a3fecc68b48a3dc9af5daa466a24f3b39545b Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 25 Jul 2014 23:56:47 +0000 Subject: [PATCH 061/354] HDFS-6750. The DataNode should use its shared memory segment to mark short-circuit replicas that have been unlinked as stale (cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613537 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/datanode/ShortCircuitRegistry.java | 29 +++++- .../fsdataset/impl/FsDatasetImpl.java | 8 ++ .../hdfs/shortcircuit/DfsClientShm.java | 21 ++-- .../shortcircuit/DfsClientShmManager.java | 10 +- .../hdfs/shortcircuit/ShortCircuitShm.java | 10 +- .../shortcircuit/TestShortCircuitCache.java | 98 +++++++++++++++++++ 7 files changed, 163 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2b6f1c6750f..d3748106f8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -321,6 +321,9 @@ Release 2.6.0 - UNRELEASED DFSOutputStream#close gives up its attempt to contact the namenode (mitdesai21 via cmccabe) + HDFS-6750. The DataNode should use its shared memory segment to mark + short-circuit replicas that have been unlinked as stale (cmccabe) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java index 9dba6a2085d..a252a17855a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java @@ -74,7 +74,7 @@ * DN also marks the block's slots as "unanchorable" to prevent additional * clients from initiating these operations in the future. * - * The counterpart fo this class on the client is {@link DfsClientShmManager}. + * The counterpart of this class on the client is {@link DfsClientShmManager}. */ public class ShortCircuitRegistry { public static final Log LOG = LogFactory.getLog(ShortCircuitRegistry.class); @@ -217,7 +217,32 @@ public synchronized boolean processBlockMunlockRequest( } return allowMunlock; } - + + /** + * Invalidate any slot associated with a blockId that we are invalidating + * (deleting) from this DataNode. When a slot is invalid, the DFSClient will + * not use the corresponding replica for new read or mmap operations (although + * existing, ongoing read or mmap operations will complete.) + * + * @param blockId The block ID. + */ + public synchronized void processBlockInvalidation(ExtendedBlockId blockId) { + if (!enabled) return; + final Set affectedSlots = slots.get(blockId); + if (!affectedSlots.isEmpty()) { + final StringBuilder bld = new StringBuilder(); + String prefix = ""; + bld.append("Block ").append(blockId).append(" has been invalidated. "). + append("Marking short-circuit slots as invalid: "); + for (Slot slot : affectedSlots) { + slot.makeInvalid(); + bld.append(prefix).append(slot.toString()); + prefix = ", "; + } + LOG.info(bld.toString()); + } + } + public static class NewShmInfo implements Closeable { public final ShmId shmId; public final FileInputStream stream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index b068c664fe3..e8a06aec8ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -44,6 +44,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; @@ -1232,8 +1233,15 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException { } volumeMap.remove(bpid, invalidBlks[i]); } + + // If a DFSClient has the replica in its cache of short-circuit file + // descriptors (and the client is using ShortCircuitShm), invalidate it. + datanode.getShortCircuitRegistry().processBlockInvalidation( + new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid)); + // If the block is cached, start uncaching it. cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId()); + // Delete the block asynchronously to make sure we can do it fast enough. // It's ok to unlink the block file before the uncache operation // finishes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java index 1c9a2e5a742..81cc68da072 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java @@ -32,11 +32,16 @@ * DfsClientShm is a subclass of ShortCircuitShm which is used by the * DfsClient. * When the UNIX domain socket associated with this shared memory segment - * closes unexpectedly, we mark the slots inside this segment as stale. - * ShortCircuitReplica objects that contain stale slots are themselves stale, + * closes unexpectedly, we mark the slots inside this segment as disconnected. + * ShortCircuitReplica objects that contain disconnected slots are stale, * and will not be used to service new reads or mmap operations. * However, in-progress read or mmap operations will continue to proceed. * Once the last slot is deallocated, the segment can be safely munmapped. + * + * Slots may also become stale because the associated replica has been deleted + * on the DataNode. In this case, the DataNode will clear the 'valid' bit. + * The client will then see these slots as stale (see + * #{ShortCircuitReplica#isStale}). */ public class DfsClientShm extends ShortCircuitShm implements DomainSocketWatcher.Handler { @@ -58,7 +63,7 @@ public class DfsClientShm extends ShortCircuitShm * * {@link DfsClientShm#handle} sets this to true. */ - private boolean stale = false; + private boolean disconnected = false; DfsClientShm(ShmId shmId, FileInputStream stream, EndpointShmManager manager, DomainPeer peer) throws IOException { @@ -76,14 +81,14 @@ public DomainPeer getPeer() { } /** - * Determine if the shared memory segment is stale. + * Determine if the shared memory segment is disconnected from the DataNode. * * This must be called with the DfsClientShmManager lock held. * * @return True if the shared memory segment is stale. */ - public synchronized boolean isStale() { - return stale; + public synchronized boolean isDisconnected() { + return disconnected; } /** @@ -97,8 +102,8 @@ public synchronized boolean isStale() { public boolean handle(DomainSocket sock) { manager.unregisterShm(getShmId()); synchronized (this) { - Preconditions.checkState(!stale); - stale = true; + Preconditions.checkState(!disconnected); + disconnected = true; boolean hadSlots = false; for (Iterator iter = slotIterator(); iter.hasNext(); ) { Slot slot = iter.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java index ca9e8e6e0a5..6dbaf84d269 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java @@ -271,12 +271,12 @@ Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer, loading = false; finishedLoading.signalAll(); } - if (shm.isStale()) { + if (shm.isDisconnected()) { // If the peer closed immediately after the shared memory segment // was created, the DomainSocketWatcher callback might already have - // fired and marked the shm as stale. In this case, we obviously - // don't want to add the SharedMemorySegment to our list of valid - // not-full segments. + // fired and marked the shm as disconnected. In this case, we + // obviously don't want to add the SharedMemorySegment to our list + // of valid not-full segments. if (LOG.isDebugEnabled()) { LOG.debug(this + ": the UNIX domain socket associated with " + "this short-circuit memory closed before we could make " + @@ -299,7 +299,7 @@ Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer, void freeSlot(Slot slot) { DfsClientShm shm = (DfsClientShm)slot.getShm(); shm.unregisterSlot(slot.getSlotIdx()); - if (shm.isStale()) { + if (shm.isDisconnected()) { // Stale shared memory segments should not be tracked here. Preconditions.checkState(!full.containsKey(shm.getShmId())); Preconditions.checkState(!notFull.containsKey(shm.getShmId())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java index d860c8b174c..7b89d0a978d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java @@ -306,6 +306,13 @@ public int getSlotIdx() { (slotAddress - baseAddress) / BYTES_PER_SLOT); } + /** + * Clear the slot. + */ + void clear() { + unsafe.putLongVolatile(null, this.slotAddress, 0); + } + private boolean isSet(long flag) { long prev = unsafe.getLongVolatile(null, this.slotAddress); return (prev & flag) != 0; @@ -535,6 +542,7 @@ synchronized public final Slot allocAndRegisterSlot( } allocatedSlots.set(idx, true); Slot slot = new Slot(calculateSlotAddress(idx), blockId); + slot.clear(); slot.makeValid(); slots[idx] = slot; if (LOG.isTraceEnabled()) { @@ -583,7 +591,7 @@ synchronized public final Slot registerSlot(int slotIdx, Slot slot = new Slot(calculateSlotAddress(slotIdx), blockId); if (!slot.isValid()) { throw new InvalidRequestException(this + ": slot " + slotIdx + - " has not been allocated."); + " is not marked as valid."); } slots[slotIdx] = slot; allocatedSlots.set(slotIdx, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java index a2d2bf830f3..ca30e029942 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY; import static org.hamcrest.CoreMatchers.equalTo; import java.io.DataOutputStream; @@ -30,7 +31,9 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.util.Arrays; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import org.apache.commons.lang.mutable.MutableBoolean; @@ -462,6 +465,7 @@ public void visit(HashMap info) } }, 10, 60000); cluster.shutdown(); + sockDir.close(); } @Test(timeout=60000) @@ -516,4 +520,98 @@ public void visit(int numOutstandingMmaps, }); cluster.shutdown(); } + + /** + * Test unlinking a file whose blocks we are caching in the DFSClient. + * The DataNode will notify the DFSClient that the replica is stale via the + * ShortCircuitShm. + */ + @Test(timeout=60000) + public void testUnlinkingReplicasInFileDescriptorCache() throws Exception { + BlockReaderTestUtil.enableShortCircuitShmTracing(); + TemporarySocketDirectory sockDir = new TemporarySocketDirectory(); + Configuration conf = createShortCircuitConf( + "testUnlinkingReplicasInFileDescriptorCache", sockDir); + // We don't want the CacheCleaner to time out short-circuit shared memory + // segments during the test, so set the timeout really high. + conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY, + 1000000000L); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + DistributedFileSystem fs = cluster.getFileSystem(); + final ShortCircuitCache cache = + fs.getClient().getClientContext().getShortCircuitCache(); + cache.getDfsClientShmManager().visit(new Visitor() { + @Override + public void visit(HashMap info) + throws IOException { + // The ClientShmManager starts off empty. + Assert.assertEquals(0, info.size()); + } + }); + final Path TEST_PATH = new Path("/test_file"); + final int TEST_FILE_LEN = 8193; + final int SEED = 0xFADE0; + DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LEN, + (short)1, SEED); + byte contents[] = DFSTestUtil.readFileBuffer(fs, TEST_PATH); + byte expected[] = DFSTestUtil. + calculateFileContentsFromSeed(SEED, TEST_FILE_LEN); + Assert.assertTrue(Arrays.equals(contents, expected)); + // Loading this file brought the ShortCircuitReplica into our local + // replica cache. + final DatanodeInfo datanode = + new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); + cache.getDfsClientShmManager().visit(new Visitor() { + @Override + public void visit(HashMap info) + throws IOException { + Assert.assertTrue(info.get(datanode).full.isEmpty()); + Assert.assertFalse(info.get(datanode).disabled); + Assert.assertEquals(1, info.get(datanode).notFull.values().size()); + DfsClientShm shm = + info.get(datanode).notFull.values().iterator().next(); + Assert.assertFalse(shm.isDisconnected()); + } + }); + // Remove the file whose blocks we just read. + fs.delete(TEST_PATH, false); + + // Wait for the replica to be purged from the DFSClient's cache. + GenericTestUtils.waitFor(new Supplier() { + MutableBoolean done = new MutableBoolean(true); + @Override + public Boolean get() { + try { + done.setValue(true); + cache.getDfsClientShmManager().visit(new Visitor() { + @Override + public void visit(HashMap info) throws IOException { + Assert.assertTrue(info.get(datanode).full.isEmpty()); + Assert.assertFalse(info.get(datanode).disabled); + Assert.assertEquals(1, + info.get(datanode).notFull.values().size()); + DfsClientShm shm = info.get(datanode).notFull.values(). + iterator().next(); + // Check that all slots have been invalidated. + for (Iterator iter = shm.slotIterator(); + iter.hasNext(); ) { + Slot slot = iter.next(); + if (slot.isValid()) { + done.setValue(false); + } + } + } + }); + } catch (IOException e) { + LOG.error("error running visitor", e); + } + return done.booleanValue(); + } + }, 10, 60000); + cluster.shutdown(); + sockDir.close(); + } } From 9cfde45b45d6ca0f9665510d687dcd19f4ae3ef3 Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Sat, 26 Jul 2014 00:48:42 +0000 Subject: [PATCH 062/354] HADOOP-10896. Update compatibility doc to capture visibility of un-annotated classes/ methods. (kasha) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613543 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/site/apt/Compatibility.apt.vm | 9 ++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 419d45c6831..34bd95103cd 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -621,6 +621,9 @@ Release 2.5.0 - UNRELEASED HADOOP-10782. Fix typo in DataChecksum class. (Jingguo Yao via suresh) + HADOOP-10896. Update compatibility doc to capture visibility of + un-annotated classes/ methods. (kasha) + OPTIMIZATIONS HADOOP-10674. Improve PureJavaCrc32 performance and use java.util.zip.CRC32 diff --git a/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm index ecf6e75f3bc..98d1f57166f 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm @@ -72,10 +72,13 @@ Apache Hadoop Compatibility * Private-Stable APIs can change across major releases, but not within a major release. + * Classes not annotated are implicitly "Private". Class members not + annotated inherit the annotations of the enclosing class. + * Note: APIs generated from the proto files need to be compatible for -rolling-upgrades. See the section on wire-compatibility for more details. The -compatibility policies for APIs and wire-communication need to go -hand-in-hand to address this. + rolling-upgrades. See the section on wire-compatibility for more details. + The compatibility policies for APIs and wire-communication need to go + hand-in-hand to address this. ** Semantic compatibility From b0c2c856192e71145c012504511fad172436131b Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Sat, 26 Jul 2014 01:29:25 +0000 Subject: [PATCH 063/354] YARN-1726. ResourceSchedulerWrapper broken due to AbstractYarnScheduler. (Wei Yan via kasha) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613547 13f79535-47bb-0310-9956-ffa450edef68 --- .../yarn/sls/appmaster/AMSimulator.java | 69 +++++++++---------- .../yarn/sls/appmaster/MRAMSimulator.java | 5 +- .../yarn/sls/nodemanager/NMSimulator.java | 68 ++++++++++-------- .../scheduler/ResourceSchedulerWrapper.java | 40 +++++++---- .../sls/scheduler/SLSCapacityScheduler.java | 22 +++--- .../hadoop/yarn/sls/scheduler/TaskRunner.java | 16 ++--- .../apache/hadoop/yarn/sls/TestSLSRunner.java | 30 +++++++- hadoop-yarn-project/CHANGES.txt | 3 + 8 files changed, 149 insertions(+), 104 deletions(-) diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java index 5af4eaa2de4..2272e3ed911 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java @@ -63,6 +63,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Logger; @@ -133,8 +135,7 @@ public void init(int id, int heartbeatInterval, * register with RM */ @Override - public void firstStep() - throws YarnException, IOException, InterruptedException { + public void firstStep() throws Exception { simulateStartTimeMS = System.currentTimeMillis() - SLSRunner.getRunner().getStartTimeMS(); @@ -149,8 +150,7 @@ public void firstStep() } @Override - public void middleStep() - throws InterruptedException, YarnException, IOException { + public void middleStep() throws Exception { // process responses in the queue processResponseQueue(); @@ -162,7 +162,7 @@ public void middleStep() } @Override - public void lastStep() { + public void lastStep() throws Exception { LOG.info(MessageFormat.format("Application {0} is shutting down.", appId)); // unregister tracking if (isTracked) { @@ -173,26 +173,19 @@ public void lastStep() { .newRecordInstance(FinishApplicationMasterRequest.class); finishAMRequest.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED); - try { - UserGroupInformation ugi = - UserGroupInformation.createRemoteUser(appAttemptId.toString()); - Token token = - rm.getRMContext().getRMApps().get(appAttemptId.getApplicationId()) - .getRMAppAttempt(appAttemptId).getAMRMToken(); - ugi.addTokenIdentifier(token.decodeIdentifier()); - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - rm.getApplicationMasterService() - .finishApplicationMaster(finishAMRequest); - return null; - } - }); - } catch (IOException e) { - e.printStackTrace(); - } catch (InterruptedException e) { - e.printStackTrace(); - } + UserGroupInformation ugi = + UserGroupInformation.createRemoteUser(appAttemptId.toString()); + Token token = rm.getRMContext().getRMApps().get(appId) + .getRMAppAttempt(appAttemptId).getAMRMToken(); + ugi.addTokenIdentifier(token.decodeIdentifier()); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + rm.getApplicationMasterService() + .finishApplicationMaster(finishAMRequest); + return null; + } + }); simulateFinishTimeMS = System.currentTimeMillis() - SLSRunner.getRunner().getStartTimeMS(); @@ -230,11 +223,9 @@ protected AllocateRequest createAllocateRequest(List ask) { return createAllocateRequest(ask, new ArrayList()); } - protected abstract void processResponseQueue() - throws InterruptedException, YarnException, IOException; + protected abstract void processResponseQueue() throws Exception; - protected abstract void sendContainerRequest() - throws YarnException, IOException, InterruptedException; + protected abstract void sendContainerRequest() throws Exception; protected abstract void checkStop(); @@ -280,11 +271,18 @@ public Object run() throws YarnException { // waiting until application ACCEPTED RMApp app = rm.getRMContext().getRMApps().get(appId); while(app.getState() != RMAppState.ACCEPTED) { - Thread.sleep(50); + Thread.sleep(10); } - appAttemptId = rm.getRMContext().getRMApps().get(appId) - .getCurrentAppAttempt().getAppAttemptId(); + // Waiting until application attempt reach LAUNCHED + // "Unmanaged AM must register after AM attempt reaches LAUNCHED state" + this.appAttemptId = rm.getRMContext().getRMApps().get(appId) + .getCurrentAppAttempt().getAppAttemptId(); + RMAppAttempt rmAppAttempt = rm.getRMContext().getRMApps().get(appId) + .getCurrentAppAttempt(); + while (rmAppAttempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED) { + Thread.sleep(10); + } } private void registerAM() @@ -297,10 +295,9 @@ private void registerAM() amRegisterRequest.setTrackingUrl("localhost:1000"); UserGroupInformation ugi = - UserGroupInformation.createRemoteUser(appAttemptId.toString()); - Token token = - rm.getRMContext().getRMApps().get(appAttemptId.getApplicationId()) - .getRMAppAttempt(appAttemptId).getAMRMToken(); + UserGroupInformation.createRemoteUser(appAttemptId.toString()); + Token token = rm.getRMContext().getRMApps().get(appId) + .getRMAppAttempt(appAttemptId).getAMRMToken(); ugi.addTokenIdentifier(token.decodeIdentifier()); ugi.doAs( diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java index d24510ba6fd..fb702059ade 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java @@ -145,8 +145,7 @@ public void init(int id, int heartbeatInterval, } @Override - public void firstStep() - throws YarnException, IOException, InterruptedException { + public void firstStep() throws Exception { super.firstStep(); requestAMContainer(); @@ -390,7 +389,7 @@ protected void checkStop() { } @Override - public void lastStep() { + public void lastStep() throws Exception { super.lastStep(); // clear data structures diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java index 4112685e152..0947ba8a18b 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java @@ -27,6 +27,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.DelayQueue; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -107,12 +108,12 @@ public void init(String nodeIdStr, int memory, int cores, } @Override - public void firstStep() throws YarnException, IOException { + public void firstStep() { // do nothing } @Override - public void middleStep() { + public void middleStep() throws Exception { // we check the lifetime for each running containers ContainerSimulator cs = null; synchronized(completedContainerList) { @@ -136,37 +137,31 @@ public void middleStep() { ns.setResponseId(RESPONSE_ID ++); ns.setNodeHealthStatus(NodeHealthStatus.newInstance(true, "", 0)); beatRequest.setNodeStatus(ns); - try { - NodeHeartbeatResponse beatResponse = - rm.getResourceTrackerService().nodeHeartbeat(beatRequest); - if (! beatResponse.getContainersToCleanup().isEmpty()) { - // remove from queue - synchronized(releasedContainerList) { - for (ContainerId containerId : beatResponse.getContainersToCleanup()){ - if (amContainerList.contains(containerId)) { - // AM container (not killed?, only release) - synchronized(amContainerList) { - amContainerList.remove(containerId); - } - LOG.debug(MessageFormat.format("NodeManager {0} releases " + - "an AM ({1}).", node.getNodeID(), containerId)); - } else { - cs = runningContainers.remove(containerId); - containerQueue.remove(cs); - releasedContainerList.add(containerId); - LOG.debug(MessageFormat.format("NodeManager {0} releases a " + - "container ({1}).", node.getNodeID(), containerId)); + NodeHeartbeatResponse beatResponse = + rm.getResourceTrackerService().nodeHeartbeat(beatRequest); + if (! beatResponse.getContainersToCleanup().isEmpty()) { + // remove from queue + synchronized(releasedContainerList) { + for (ContainerId containerId : beatResponse.getContainersToCleanup()){ + if (amContainerList.contains(containerId)) { + // AM container (not killed?, only release) + synchronized(amContainerList) { + amContainerList.remove(containerId); } + LOG.debug(MessageFormat.format("NodeManager {0} releases " + + "an AM ({1}).", node.getNodeID(), containerId)); + } else { + cs = runningContainers.remove(containerId); + containerQueue.remove(cs); + releasedContainerList.add(containerId); + LOG.debug(MessageFormat.format("NodeManager {0} releases a " + + "container ({1}).", node.getNodeID(), containerId)); } } } - if (beatResponse.getNodeAction() == NodeAction.SHUTDOWN) { - lastStep(); - } - } catch (YarnException e) { - e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); + } + if (beatResponse.getNodeAction() == NodeAction.SHUTDOWN) { + lastStep(); } } @@ -262,4 +257,19 @@ public void cleanupContainer(ContainerId containerId) { completedContainerList.add(containerId); } } + + @VisibleForTesting + Map getRunningContainers() { + return runningContainers; + } + + @VisibleForTesting + List getAMContainers() { + return amContainerList; + } + + @VisibleForTesting + List getCompletedContainers() { + return completedContainerList; + } } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java index 0bd0c87d2d0..6ccae98bd86 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java @@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; @@ -101,7 +102,6 @@ final public class ResourceSchedulerWrapper private static final String EOL = System.getProperty("line.separator"); private static final int SAMPLING_SIZE = 60; private ScheduledExecutorService pool; - private RMContext rmContext; // counters for scheduler allocate/handle operations private Counter schedulerAllocateCounter; private Counter schedulerHandleCounter; @@ -576,7 +576,7 @@ private void registerContainerAppNumMetrics() { new Gauge() { @Override public Integer getValue() { - if(scheduler == null || scheduler.getRootQueueMetrics() == null) { + if (scheduler == null || scheduler.getRootQueueMetrics() == null) { return 0; } else { return scheduler.getRootQueueMetrics().getAppsRunning(); @@ -723,17 +723,18 @@ public void run() { public void addAMRuntime(ApplicationId appId, long traceStartTimeMS, long traceEndTimeMS, long simulateStartTimeMS, long simulateEndTimeMS) { - - try { - // write job runtime information - StringBuilder sb = new StringBuilder(); - sb.append(appId).append(",").append(traceStartTimeMS).append(",") - .append(traceEndTimeMS).append(",").append(simulateStartTimeMS) - .append(",").append(simulateEndTimeMS); - jobRuntimeLogBW.write(sb.toString() + EOL); - jobRuntimeLogBW.flush(); - } catch (IOException e) { - e.printStackTrace(); + if (metricsON) { + try { + // write job runtime information + StringBuilder sb = new StringBuilder(); + sb.append(appId).append(",").append(traceStartTimeMS).append(",") + .append(traceEndTimeMS).append(",").append(simulateStartTimeMS) + .append(",").append(simulateEndTimeMS); + jobRuntimeLogBW.write(sb.toString() + EOL); + jobRuntimeLogBW.flush(); + } catch (IOException e) { + e.printStackTrace(); + } } } @@ -919,4 +920,17 @@ public String moveApplication(ApplicationId appId, String newQueue) public Resource getClusterResource() { return null; } + + @Override + public synchronized List getTransferredContainers( + ApplicationAttemptId currentAttempt) { + return new ArrayList(); + } + + @Override + public Map> + getSchedulerApplications() { + return new HashMap>(); + } } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java index 44a872198d6..06addfb28fd 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java @@ -729,16 +729,18 @@ public void addAMRuntime(ApplicationId appId, long traceStartTimeMS, long traceEndTimeMS, long simulateStartTimeMS, long simulateEndTimeMS) { - try { - // write job runtime information - StringBuilder sb = new StringBuilder(); - sb.append(appId).append(",").append(traceStartTimeMS).append(",") - .append(traceEndTimeMS).append(",").append(simulateStartTimeMS) - .append(",").append(simulateEndTimeMS); - jobRuntimeLogBW.write(sb.toString() + EOL); - jobRuntimeLogBW.flush(); - } catch (IOException e) { - e.printStackTrace(); + if (metricsON) { + try { + // write job runtime information + StringBuilder sb = new StringBuilder(); + sb.append(appId).append(",").append(traceStartTimeMS).append(",") + .append(traceEndTimeMS).append(",").append(simulateStartTimeMS) + .append(",").append(simulateEndTimeMS); + jobRuntimeLogBW.write(sb.toString() + EOL); + jobRuntimeLogBW.flush(); + } catch (IOException e) { + e.printStackTrace(); + } } } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java index c936dd93180..d35290428c7 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java @@ -99,12 +99,10 @@ public final void run() { } else { lastStep(); } - } catch (YarnException e) { - e.printStackTrace(); - } catch (IOException e) { - e.printStackTrace(); - } catch (InterruptedException e) { + } catch (Exception e) { e.printStackTrace(); + Thread.getDefaultUncaughtExceptionHandler() + .uncaughtException(Thread.currentThread(), e); } } @@ -124,13 +122,11 @@ public int compareTo(Delayed o) { } - public abstract void firstStep() - throws YarnException, IOException, InterruptedException; + public abstract void firstStep() throws Exception; - public abstract void middleStep() - throws YarnException, InterruptedException, IOException; + public abstract void middleStep() throws Exception; - public abstract void lastStep() throws YarnException; + public abstract void lastStep() throws Exception; public void setEndTime(long et) { endTime = et; diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java index b05972734f2..9da8ef34a20 100644 --- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java +++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java @@ -18,10 +18,13 @@ package org.apache.hadoop.yarn.sls; -import org.apache.commons.io.FileUtils; +import org.junit.Assert; import org.junit.Test; import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.UUID; public class TestSLSRunner { @@ -30,6 +33,15 @@ public class TestSLSRunner { @SuppressWarnings("all") public void testSimulatorRunning() throws Exception { File tempDir = new File("target", UUID.randomUUID().toString()); + final List exceptionList = + Collections.synchronizedList(new ArrayList()); + + Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { + @Override + public void uncaughtException(Thread t, Throwable e) { + exceptionList.add(e); + } + }); // start the simulator File slsOutputDir = new File(tempDir.getAbsolutePath() + "/slsoutput/"); @@ -38,8 +50,20 @@ public void testSimulatorRunning() throws Exception { "-output", slsOutputDir.getAbsolutePath()}; SLSRunner.main(args); - // wait for 45 seconds before stop - Thread.sleep(45 * 1000); + // wait for 20 seconds before stop + int count = 20; + while (count >= 0) { + Thread.sleep(1000); + + if (! exceptionList.isEmpty()) { + SLSRunner.getRunner().stop(); + Assert.fail("TestSLSRunner catched exception from child thread " + + "(TaskRunner.Task): " + exceptionList.get(0).getMessage()); + break; + } + count--; + } + SLSRunner.getRunner().stop(); } diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index e6fbea9350e..3d3429a674e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -444,6 +444,9 @@ Release 2.5.0 - UNRELEASED YARN-2335. Annotate all hadoop-sls APIs as @Private. (Wei Yan via kasha) + YARN-1726. ResourceSchedulerWrapper broken due to AbstractYarnScheduler. + (Wei Yan via kasha) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES From 5d4677b57b05c1690ab0a825869df9a4f6c173bc Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Sat, 26 Jul 2014 01:51:35 +0000 Subject: [PATCH 064/354] YARN-1796. container-executor shouldn't require o-r permissions. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613548 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 ++ .../container-executor/impl/container-executor.c | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 3d3429a674e..fb164f8d303 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -106,6 +106,8 @@ Release 2.6.0 - UNRELEASED YARN-2147. client lacks delegation token exception details when application submit fails (Chen He via jlowe) + YARN-1796. container-executor shouldn't require o-r permissions (atm) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c index 9387ba4f1e0..16ede961edc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c @@ -111,16 +111,16 @@ int check_executor_permissions(char *executable_file) { return -1; } - // check others do not have read/write/execute permissions - if ((filestat.st_mode & S_IROTH) == S_IROTH || (filestat.st_mode & S_IWOTH) - == S_IWOTH || (filestat.st_mode & S_IXOTH) == S_IXOTH) { + // check others do not have write/execute permissions + if ((filestat.st_mode & S_IWOTH) == S_IWOTH || + (filestat.st_mode & S_IXOTH) == S_IXOTH) { fprintf(LOGFILE, - "The container-executor binary should not have read or write or" - " execute for others.\n"); + "The container-executor binary should not have write or execute " + "for others.\n"); return -1; } - // Binary should be setuid/setgid executable + // Binary should be setuid executable if ((filestat.st_mode & S_ISUID) == 0) { fprintf(LOGFILE, "The container-executor binary should be set setuid.\n"); return -1; From 4625792c20d8e297a34fe9599d9b474a6b1cc77a Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Sat, 26 Jul 2014 02:03:18 +0000 Subject: [PATCH 065/354] YARN-1726. Add missing files. ResourceSchedulerWrapper broken due to AbstractYarnScheduler. (Wei Yan via kasha) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613552 13f79535-47bb-0310-9956-ffa450edef68 --- .../yarn/sls/appmaster/TestAMSimulator.java | 86 +++++++++++++++ .../yarn/sls/nodemanager/TestNMSimulator.java | 100 ++++++++++++++++++ 2 files changed, 186 insertions(+) create mode 100644 hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java create mode 100644 hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java new file mode 100644 index 00000000000..83482c33686 --- /dev/null +++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.sls.appmaster; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.sls.conf.SLSConfiguration; +import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class TestAMSimulator { + private ResourceManager rm; + private YarnConfiguration conf; + + @Before + public void setup() { + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_SCHEDULER, + "org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper"); + conf.set(SLSConfiguration.RM_SCHEDULER, + "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler"); + conf.setBoolean(SLSConfiguration.METRICS_SWITCH, false); + rm = new ResourceManager(); + rm.init(conf); + rm.start(); + } + + class MockAMSimulator extends AMSimulator { + @Override + protected void processResponseQueue() + throws InterruptedException, YarnException, IOException { + } + + @Override + protected void sendContainerRequest() + throws YarnException, IOException, InterruptedException { + } + + @Override + protected void checkStop() { + } + } + + @Test + public void testAMSimulator() throws Exception { + // Register one app + MockAMSimulator app = new MockAMSimulator(); + List containers = new ArrayList(); + app.init(1, 1000, containers, rm, null, 0, 1000000l, "user1", "default", + false, "app1"); + app.firstStep(); + Assert.assertEquals(1, rm.getRMContext().getRMApps().size()); + Assert.assertNotNull(rm.getRMContext().getRMApps().get(app.appId)); + + // Finish this app + app.lastStep(); + } + + @After + public void tearDown() { + rm.stop(); + } +} \ No newline at end of file diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java new file mode 100644 index 00000000000..84be2313cf4 --- /dev/null +++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/nodemanager/TestNMSimulator.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.sls.nodemanager; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.sls.conf.SLSConfiguration; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class TestNMSimulator { + private final int GB = 1024; + private ResourceManager rm; + private YarnConfiguration conf; + + @Before + public void setup() { + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_SCHEDULER, + "org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper"); + conf.set(SLSConfiguration.RM_SCHEDULER, + "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler"); + conf.setBoolean(SLSConfiguration.METRICS_SWITCH, false); + rm = new ResourceManager(); + rm.init(conf); + rm.start(); + } + + @Test + public void testNMSimulator() throws Exception { + // Register one node + NMSimulator node1 = new NMSimulator(); + node1.init("rack1/node1", GB * 10, 10, 0, 1000, rm); + node1.middleStep(); + + Assert.assertEquals(1, rm.getResourceScheduler().getNumClusterNodes()); + Assert.assertEquals(GB * 10, + rm.getResourceScheduler().getRootQueueMetrics().getAvailableMB()); + Assert.assertEquals(10, + rm.getResourceScheduler().getRootQueueMetrics() + .getAvailableVirtualCores()); + + // Allocate one container on node1 + ContainerId cId1 = newContainerId(1, 1, 1); + Container container1 = Container.newInstance(cId1, null, null, + Resources.createResource(GB, 1), null, null); + node1.addNewContainer(container1, 100000l); + Assert.assertTrue("Node1 should have one running container.", + node1.getRunningContainers().containsKey(cId1)); + + // Allocate one AM container on node1 + ContainerId cId2 = newContainerId(2, 1, 1); + Container container2 = Container.newInstance(cId2, null, null, + Resources.createResource(GB, 1), null, null); + node1.addNewContainer(container2, -1l); + Assert.assertTrue("Node1 should have one running AM container", + node1.getAMContainers().contains(cId2)); + + // Remove containers + node1.cleanupContainer(cId1); + Assert.assertTrue("Container1 should be removed from Node1.", + node1.getCompletedContainers().contains(cId1)); + node1.cleanupContainer(cId2); + Assert.assertFalse("Container2 should be removed from Node1.", + node1.getAMContainers().contains(cId2)); + } + + private ContainerId newContainerId(int appId, int appAttemptId, int cId) { + return BuilderUtils.newContainerId( + BuilderUtils.newApplicationAttemptId( + BuilderUtils.newApplicationId(System.currentTimeMillis(), appId), + appAttemptId), cId); + } + + @After + public void tearDown() throws Exception { + rm.stop(); + } +} From 9bce3eca42b40f9b1f9fc23fc4418ed23b7beda5 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Sat, 26 Jul 2014 04:18:28 +0000 Subject: [PATCH 066/354] HDFS-6749. FSNamesystem methods should call resolvePath. Contributed by Charles Lamb. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613561 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/namenode/FSNamesystem.java | 8 ++++++++ .../hadoop/hdfs/server/namenode/TestINodeFile.java | 14 ++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d3748106f8c..eb524b209df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -383,6 +383,9 @@ Release 2.6.0 - UNRELEASED HDFS-6752. Avoid Address bind errors in TestDatanodeConfig#testMemlockLimit (vinayakumarb) + HDFS-6749. FSNamesystem methods should call resolvePath. + (Charles Lamb via cnauroth) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6ce6a70ce2b..a705f17dd27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3720,8 +3720,10 @@ boolean isFileClosed(String src) StandbyException, IOException { FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { checkTraverse(pc, src); @@ -8183,9 +8185,11 @@ AclStatus getAclStatus(String src) throws IOException { nnConf.checkAclsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { checkOperation(OperationCategory.READ); + src = FSDirectory.resolvePath(src, pathComponents, dir); if (isPermissionEnabled) { checkPermission(pc, src, false, null, null, null, null); } @@ -8288,8 +8292,10 @@ List getXAttrs(String src, List xAttrs) throws IOException { } } checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { checkPathAccess(pc, src, FsAction.READ); @@ -8333,8 +8339,10 @@ List listXAttrs(String src) throws IOException { nnConf.checkXAttrsConfigFlag(); final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { + src = FSDirectory.resolvePath(src, pathComponents, dir); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { /* To access xattr names, you need EXECUTE in the owning directory. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 3cee355840c..704bc1669d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -521,6 +521,7 @@ public void testInodeIdBasedPaths() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); @@ -568,6 +569,19 @@ public void testInodeIdBasedPaths() throws Exception { // ClientProtocol#getPreferredBlockSize assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString())); + + /* + * HDFS-6749 added missing calls to FSDirectory.resolvePath in the + * following four methods. The calls below ensure that + * /.reserved/.inodes paths work properly. No need to check return + * values as these methods are tested elsewhere. + */ + { + fs.isFileClosed(testFileInodePath); + fs.getAclStatus(testFileInodePath); + fs.getXAttrs(testFileInodePath); + fs.listXAttrs(testFileInodePath); + } // symbolic link related tests From 549bcc2c02983086ee6694982d5f3503f5f4517f Mon Sep 17 00:00:00 2001 From: Zhijie Shen Date: Sun, 27 Jul 2014 01:37:51 +0000 Subject: [PATCH 067/354] MAPREDUCE-6002. Made MR task avoid reporting error to AM when the task process is shutting down. Contributed by Wangda Tan. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613743 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../hadoop/mapred/LocalContainerLauncher.java | 18 ++++++++++----- .../org/apache/hadoop/mapred/YarnChild.java | 22 +++++++++++++------ .../java/org/apache/hadoop/mapred/Task.java | 6 +++++ 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index c8a83bf64e5..5760cef3060 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -325,6 +325,9 @@ Release 2.5.0 - UNRELEASED MAPREDUCE-5952. LocalContainerLauncher#renameMapOutputForReduce incorrectly assumes a single dir for mapOutIndex. (Gera Shegalov via kasha) + MAPREDUCE-6002. Made MR task avoid reporting error to AM when the task process + is shutting down. (Wangda Tan via zjshen) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index c7898ed966f..218ac835d27 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -31,6 +31,7 @@ import java.util.concurrent.LinkedBlockingQueue; import com.google.common.annotations.VisibleForTesting; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSError; @@ -57,6 +58,7 @@ import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -406,7 +408,9 @@ private void runSubtask(org.apache.hadoop.mapred.Task task, } catch (FSError e) { LOG.fatal("FSError from child", e); // umbilical: MRAppMaster creates (taskAttemptListener), passes to us - umbilical.fsError(classicAttemptID, e.getMessage()); + if (!ShutdownHookManager.get().isShutdownInProgress()) { + umbilical.fsError(classicAttemptID, e.getMessage()); + } throw new RuntimeException(); } catch (Exception exception) { @@ -429,11 +433,13 @@ private void runSubtask(org.apache.hadoop.mapred.Task task, } catch (Throwable throwable) { LOG.fatal("Error running local (uberized) 'child' : " + StringUtils.stringifyException(throwable)); - Throwable tCause = throwable.getCause(); - String cause = (tCause == null) - ? throwable.getMessage() - : StringUtils.stringifyException(tCause); - umbilical.fatalError(classicAttemptID, cause); + if (!ShutdownHookManager.get().isShutdownInProgress()) { + Throwable tCause = throwable.getCause(); + String cause = + (tCause == null) ? throwable.getMessage() : StringUtils + .stringifyException(tCause); + umbilical.fatalError(classicAttemptID, cause); + } throw new RuntimeException(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java index 9212bfd154a..4ba1991ed9b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java @@ -56,6 +56,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants; @@ -176,7 +177,9 @@ public Object run() throws Exception { }); } catch (FSError e) { LOG.fatal("FSError from child", e); - umbilical.fsError(taskid, e.getMessage()); + if (!ShutdownHookManager.get().isShutdownInProgress()) { + umbilical.fsError(taskid, e.getMessage()); + } } catch (Exception exception) { LOG.warn("Exception running child : " + StringUtils.stringifyException(exception)); @@ -201,17 +204,22 @@ public Object run() throws Exception { } // Report back any failures, for diagnostic purposes if (taskid != null) { - umbilical.fatalError(taskid, StringUtils.stringifyException(exception)); + if (!ShutdownHookManager.get().isShutdownInProgress()) { + umbilical.fatalError(taskid, + StringUtils.stringifyException(exception)); + } } } catch (Throwable throwable) { LOG.fatal("Error running child : " + StringUtils.stringifyException(throwable)); if (taskid != null) { - Throwable tCause = throwable.getCause(); - String cause = tCause == null - ? throwable.getMessage() - : StringUtils.stringifyException(tCause); - umbilical.fatalError(taskid, cause); + if (!ShutdownHookManager.get().isShutdownInProgress()) { + Throwable tCause = throwable.getCause(); + String cause = + tCause == null ? throwable.getMessage() : StringUtils + .stringifyException(tCause); + umbilical.fatalError(taskid, cause); + } } } finally { RPC.stopProxy(umbilical); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java index 4815f191f7e..3a4c513f3f1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java @@ -66,6 +66,7 @@ import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; @@ -322,6 +323,11 @@ protected void setWriteSkipRecs(boolean writeSkipRecs) { protected void reportFatalError(TaskAttemptID id, Throwable throwable, String logMsg) { LOG.fatal(logMsg); + + if (ShutdownHookManager.get().isShutdownInProgress()) { + return; + } + Throwable tCause = throwable.getCause(); String cause = tCause == null ? StringUtils.stringifyException(throwable) From d6532d3a77abe5a9b1760c1e26a899d49aa661dc Mon Sep 17 00:00:00 2001 From: Zhijie Shen Date: Sun, 27 Jul 2014 17:55:06 +0000 Subject: [PATCH 068/354] YARN-2247. Made RM web services authenticate users via kerberos and delegation token. Contributed by Varun Vasudev. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613821 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../hadoop/yarn/conf/YarnConfiguration.java | 11 + .../src/main/resources/yarn-default.xml | 9 + .../security/http/RMAuthenticationFilter.java | 63 ++++ .../RMAuthenticationFilterInitializer.java | 121 ++++++ .../resourcemanager/ResourceManager.java | 61 +++ .../security/RMAuthenticationHandler.java | 157 ++++++++ .../resourcemanager/webapp/RMWebServices.java | 24 +- ...ServicesDelegationTokenAuthentication.java | 354 ++++++++++++++++++ .../webapp/TestRMWebappAuthentication.java | 272 ++++++++++++++ .../src/site/apt/ResourceManagerRest.apt.vm | 21 ++ 11 files changed, 1095 insertions(+), 1 deletion(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMAuthenticationHandler.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebappAuthentication.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index fb164f8d303..c0405106a01 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -147,6 +147,9 @@ Release 2.5.0 - UNRELEASED YARN-2233. Implemented ResourceManager web-services to create, renew and cancel delegation tokens. (Varun Vasudev via vinodkv) + YARN-2247. Made RM web services authenticate users via kerberos and delegation + token. (Varun Vasudev via zjshen) + IMPROVEMENTS YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 59e108a6fb1..ab6b20e574e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -263,6 +263,17 @@ public class YarnConfiguration extends Configuration { public static final String RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = RM_PREFIX + "webapp.spnego-keytab-file"; + /** + * Flag to enable override of the default kerberos authentication filter with + * the RM authentication filter to allow authentication using delegation + * tokens(fallback to kerberos if the tokens are missing). Only applicable + * when the http authentication type is kerberos. + */ + public static final String RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER = RM_PREFIX + + "webapp.delegation-token-auth-filter.enabled"; + public static final boolean DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER = + true; + /** How long to wait until a container is considered dead.*/ public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = RM_PREFIX + "rm.container-allocation.expiry-interval-ms"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 8bc49e69769..edc2f8cab61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -194,6 +194,15 @@ /etc/krb5.keytab + + Flag to enable override of the default kerberos authentication + filter with the RM authentication filter to allow authentication using + delegation tokens(fallback to kerberos if the tokens are missing). Only + applicable when the http authentication type is kerberos. + yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled + true + + How long to wait until a node manager is considered dead. yarn.nm.liveness-monitor.expiry-interval-ms diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java new file mode 100644 index 00000000000..651b5b0c762 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilter.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.security.http; + +import java.util.Properties; + +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; + +@Private +@Unstable +public class RMAuthenticationFilter extends AuthenticationFilter { + + public static final String AUTH_HANDLER_PROPERTY = + "yarn.resourcemanager.authentication-handler"; + + public RMAuthenticationFilter() { + } + + @Override + protected Properties getConfiguration(String configPrefix, + FilterConfig filterConfig) throws ServletException { + + // In yarn-site.xml, we can simply set type to "kerberos". However, we need + // to replace the name here to use the customized Kerberos + DT service + // instead of the standard Kerberos handler. + + Properties properties = super.getConfiguration(configPrefix, filterConfig); + String yarnAuthHandler = properties.getProperty(AUTH_HANDLER_PROPERTY); + if (yarnAuthHandler == null || yarnAuthHandler.isEmpty()) { + // if http auth type is simple, the default authentication filter + // will handle it, else throw an exception + if (!properties.getProperty(AUTH_TYPE).equals("simple")) { + throw new ServletException("Authentication handler class is empty"); + } + } + if (properties.getProperty(AUTH_TYPE).equalsIgnoreCase("kerberos")) { + properties.setProperty(AUTH_TYPE, yarnAuthHandler); + } + return properties; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java new file mode 100644 index 00000000000..2227833e7cf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.security.http; + +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.FilterContainer; +import org.apache.hadoop.http.FilterInitializer; +import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; + +@Unstable +public class RMAuthenticationFilterInitializer extends FilterInitializer { + + String configPrefix; + String signatureSecretFileProperty; + String kerberosPrincipalProperty; + String cookiePath; + + public RMAuthenticationFilterInitializer() { + this.configPrefix = "hadoop.http.authentication."; + this.signatureSecretFileProperty = + AuthenticationFilter.SIGNATURE_SECRET + ".file"; + this.kerberosPrincipalProperty = KerberosAuthenticationHandler.PRINCIPAL; + this.cookiePath = "/"; + } + + protected Map createFilterConfig(Configuration conf) { + Map filterConfig = new HashMap(); + + // setting the cookie path to root '/' so it is used for all resources. + filterConfig.put(AuthenticationFilter.COOKIE_PATH, cookiePath); + + for (Map.Entry entry : conf) { + String name = entry.getKey(); + if (name.startsWith(configPrefix)) { + String value = conf.get(name); + name = name.substring(configPrefix.length()); + filterConfig.put(name, value); + } + } + + String signatureSecretFile = filterConfig.get(signatureSecretFileProperty); + if (signatureSecretFile != null) { + Reader reader = null; + try { + StringBuilder secret = new StringBuilder(); + reader = + new InputStreamReader(new FileInputStream(signatureSecretFile), + "UTF-8"); + int c = reader.read(); + while (c > -1) { + secret.append((char) c); + c = reader.read(); + } + filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET, + secret.toString()); + } catch (IOException ex) { + // if running in non-secure mode, this filter only gets added + // because the user has not setup his own filter so just generate + // a random secret. in secure mode, the user needs to setup security + if (UserGroupInformation.isSecurityEnabled()) { + throw new RuntimeException( + "Could not read HTTP signature secret file: " + signatureSecretFile); + } + } finally { + IOUtils.closeQuietly(reader); + } + } + + // Resolve _HOST into bind address + String bindAddress = conf.get(HttpServer2.BIND_ADDRESS); + String principal = filterConfig.get(kerberosPrincipalProperty); + if (principal != null) { + try { + principal = SecurityUtil.getServerPrincipal(principal, bindAddress); + } catch (IOException ex) { + throw new RuntimeException( + "Could not resolve Kerberos principal name: " + ex.toString(), ex); + } + filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL, principal); + } + return filterConfig; + } + + @Override + public void initFilter(FilterContainer container, Configuration conf) { + + Map filterConfig = createFilterConfig(conf); + container.addFilter("YARNAuthenticationFilter", + RMAuthenticationFilter.class.getName(), filterConfig); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 7dceda249da..4b5d94875ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -32,11 +32,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.http.lib.StaticUserWebFilter; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.security.Groups; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.CompositeService; @@ -88,8 +90,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMAuthenticationHandler; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilter; +import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer; import org.apache.hadoop.yarn.server.webproxy.AppReportFetcher; import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; import org.apache.hadoop.yarn.server.webproxy.WebAppProxy; @@ -789,6 +794,62 @@ public void handle(RMNodeEvent event) { } protected void startWepApp() { + + // Use the customized yarn filter instead of the standard kerberos filter to + // allow users to authenticate using delegation tokens + // 3 conditions need to be satisfied - + // 1. security is enabled + // 2. http auth type is set to kerberos + // 3. "yarn.resourcemanager.webapp.use-yarn-filter" override is set to true + + Configuration conf = getConfig(); + boolean useYarnAuthenticationFilter = + conf.getBoolean( + YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER, + YarnConfiguration.DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER); + String authPrefix = "hadoop.http.authentication."; + String authTypeKey = authPrefix + "type"; + String initializers = conf.get("hadoop.http.filter.initializers"); + if (UserGroupInformation.isSecurityEnabled() + && useYarnAuthenticationFilter + && conf.get(authTypeKey, "").equalsIgnoreCase( + KerberosAuthenticationHandler.TYPE)) { + LOG.info("Using RM authentication filter(kerberos/delegation-token)" + + " for RM webapp authentication"); + RMAuthenticationHandler + .setSecretManager(getClientRMService().rmDTSecretManager); + String yarnAuthKey = + authPrefix + RMAuthenticationFilter.AUTH_HANDLER_PROPERTY; + conf.setStrings(yarnAuthKey, RMAuthenticationHandler.class.getName()); + + initializers = + initializers == null || initializers.isEmpty() ? "" : "," + + initializers; + if (!initializers.contains(RMAuthenticationFilterInitializer.class + .getName())) { + conf.set("hadoop.http.filter.initializers", + RMAuthenticationFilterInitializer.class.getName() + initializers); + } + } + + // if security is not enabled and the default filter initializer has been + // set, set the initializer to include the + // RMAuthenticationFilterInitializer which in turn will set up the simple + // auth filter. + + if (!UserGroupInformation.isSecurityEnabled()) { + if (initializers == null || initializers.isEmpty()) { + conf.set("hadoop.http.filter.initializers", + RMAuthenticationFilterInitializer.class.getName()); + conf.set(authTypeKey, "simple"); + } else if (initializers.equals(StaticUserWebFilter.class.getName())) { + conf.set("hadoop.http.filter.initializers", + RMAuthenticationFilterInitializer.class.getName() + "," + + initializers); + conf.set(authTypeKey, "simple"); + } + } + Builder builder = WebApps .$for("cluster", ApplicationMasterService.class, masterService, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMAuthenticationHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMAuthenticationHandler.java new file mode 100644 index 00000000000..798c479c287 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/RMAuthenticationHandler.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.security; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authentication.server.AuthenticationToken; +import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; + +public class RMAuthenticationHandler extends KerberosAuthenticationHandler { + + public static final String TYPE = "kerberos-dt"; + public static final String HEADER = "Hadoop-YARN-Auth-Delegation-Token"; + + static RMDelegationTokenSecretManager secretManager; + static boolean secretManagerInitialized = false; + + public RMAuthenticationHandler() { + super(); + } + + /** + * Returns authentication type of the handler. + * + * @return kerberos-dt + */ + @Override + public String getType() { + return TYPE; + } + + @Override + public boolean managementOperation(AuthenticationToken token, + HttpServletRequest request, HttpServletResponse response) { + return true; + } + + /** + * Authenticates a request looking for the delegation header and + * verifying it is a valid token. If the header is missing, it delegates the + * authentication to the {@link KerberosAuthenticationHandler} unless it is + * disabled. + * + * @param request + * the HTTP client request. + * @param response + * the HTTP client response. + * + * @return the authentication token for the authenticated request. + * @throws IOException + * thrown if an IO error occurred. + * @throws AuthenticationException + * thrown if the authentication failed. + */ + @Override + public AuthenticationToken authenticate(HttpServletRequest request, + HttpServletResponse response) throws IOException, AuthenticationException { + + AuthenticationToken token; + String delegationParam = this.getEncodedDelegationTokenFromRequest(request); + if (delegationParam != null) { + Token dt = + new Token(); + ; + dt.decodeFromUrlString(delegationParam); + UserGroupInformation ugi = this.verifyToken(dt); + if (ugi == null) { + throw new AuthenticationException("Invalid token"); + } + final String shortName = ugi.getShortUserName(); + token = new AuthenticationToken(shortName, ugi.getUserName(), getType()); + } else { + token = super.authenticate(request, response); + if (token != null) { + // create a token with auth type set correctly + token = + new AuthenticationToken(token.getUserName(), token.getName(), + super.getType()); + } + } + return token; + } + + /** + * Verifies a delegation token. + * + * @param token + * delegation token to verify. + * @return the UGI for the token; null if the verification fails + * @throws IOException + * thrown if the token could not be verified. + */ + protected UserGroupInformation verifyToken( + Token token) throws IOException { + if (secretManagerInitialized == false) { + throw new IllegalStateException("Secret manager not initialized"); + } + ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); + DataInputStream dis = new DataInputStream(buf); + RMDelegationTokenIdentifier id = secretManager.createIdentifier(); + try { + id.readFields(dis); + secretManager.verifyToken(id, token.getPassword()); + } catch (Throwable t) { + return null; + } finally { + dis.close(); + } + return id.getUser(); + } + + /** + * Extract encoded delegation token from request + * + * @param req + * HTTPServletRequest object + * + * @return String containing the encoded token; null if encoded token not + * found + * + */ + protected String getEncodedDelegationTokenFromRequest(HttpServletRequest req) { + String header = req.getHeader(HEADER); + return header; + } + + public static void setSecretManager(RMDelegationTokenSecretManager manager) { + secretManager = manager; + secretManagerInitialized = true; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 0493efdb7d4..a8ec19260ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -55,6 +55,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; @@ -680,6 +681,11 @@ public Response updateAppState(AppState targetState, throw new AuthorizationException(msg); } + if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) { + String msg = "The default static user cannot carry out this operation."; + return Response.status(Status.FORBIDDEN).entity(msg).build(); + } + String userName = callerUGI.getUserName(); RMApp app = null; try { @@ -800,6 +806,13 @@ private UserGroupInformation getCallerUserGroupInformation( return callerUGI; } + private boolean isStaticUser(UserGroupInformation callerUGI) { + String staticUser = + conf.get(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER, + CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER); + return staticUser.equals(callerUGI.getUserName()); + } + /** * Generates a new ApplicationId which is then sent to the client * @@ -822,6 +835,10 @@ public Response createNewApplication(@Context HttpServletRequest hsr) throw new AuthorizationException("Unable to obtain user name, " + "user not authenticated"); } + if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) { + String msg = "The default static user cannot carry out this operation."; + return Response.status(Status.FORBIDDEN).entity(msg).build(); + } NewApplication appId = createNewApplication(); return Response.status(Status.OK).entity(appId).build(); @@ -859,6 +876,11 @@ public Response submitApplication(ApplicationSubmissionContextInfo newApp, + "user not authenticated"); } + if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) { + String msg = "The default static user cannot carry out this operation."; + return Response.status(Status.FORBIDDEN).entity(msg).build(); + } + ApplicationSubmissionContext appContext = createAppSubmissionContext(newApp); final SubmitApplicationRequest req = @@ -975,7 +997,7 @@ protected Resource createAppSubmissionContextResource( * * @param newApp * the information provided by the user - * @return + * @return created context * @throws BadRequestException * @throws IOException */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java new file mode 100644 index 00000000000..34a914a7541 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java @@ -0,0 +1,354 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.StringWriter; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.concurrent.Callable; + +import javax.ws.rs.core.MediaType; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.Marshaller; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.codehaus.jettison.json.JSONObject; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.sun.jersey.api.client.ClientResponse.Status; + +public class TestRMWebServicesDelegationTokenAuthentication { + + private static final File testRootDir = new File("target", + TestRMWebServicesDelegationTokenAuthentication.class.getName() + "-root"); + private static File httpSpnegoKeytabFile = new File( + KerberosTestUtils.getKeytabFile()); + + private static String httpSpnegoPrincipal = KerberosTestUtils + .getServerPrincipal(); + + private static boolean miniKDCStarted = false; + private static MiniKdc testMiniKDC; + private static MockRM rm; + + // use published header name + final static String DelegationTokenHeader = + "Hadoop-YARN-Auth-Delegation-Token"; + + @BeforeClass + public static void setUp() { + try { + testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir); + setupKDC(); + setupAndStartRM(); + } catch (Exception e) { + assertTrue("Couldn't create MiniKDC", false); + } + } + + @AfterClass + public static void tearDown() { + if (testMiniKDC != null) { + testMiniKDC.stop(); + } + if (rm != null) { + rm.stop(); + } + } + + public TestRMWebServicesDelegationTokenAuthentication() throws Exception { + super(); + } + + private static void setupAndStartRM() throws Exception { + Configuration rmconf = new Configuration(); + rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); + rmconf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, + ResourceScheduler.class); + rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + String httpPrefix = "hadoop.http.authentication."; + rmconf.setStrings(httpPrefix + "type", "kerberos"); + rmconf.set(httpPrefix + KerberosAuthenticationHandler.PRINCIPAL, + httpSpnegoPrincipal); + rmconf.set(httpPrefix + KerberosAuthenticationHandler.KEYTAB, + httpSpnegoKeytabFile.getAbsolutePath()); + // use any file for signature secret + rmconf.set(httpPrefix + AuthenticationFilter.SIGNATURE_SECRET + ".file", + httpSpnegoKeytabFile.getAbsolutePath()); + rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + rmconf.setBoolean(YarnConfiguration.RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER, + true); + rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY, + httpSpnegoPrincipal); + rmconf.set(YarnConfiguration.RM_KEYTAB, + httpSpnegoKeytabFile.getAbsolutePath()); + rmconf.set(YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, + httpSpnegoKeytabFile.getAbsolutePath()); + rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY, + httpSpnegoPrincipal); + rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY, + httpSpnegoKeytabFile.getAbsolutePath()); + rmconf.setBoolean("mockrm.webapp.enabled", true); + UserGroupInformation.setConfiguration(rmconf); + rm = new MockRM(rmconf); + rm.start(); + + } + + private static void setupKDC() throws Exception { + if (miniKDCStarted == false) { + testMiniKDC.start(); + getKdc().createPrincipal(httpSpnegoKeytabFile, "HTTP/localhost", + "client", UserGroupInformation.getLoginUser().getShortUserName()); + miniKDCStarted = true; + } + } + + private static MiniKdc getKdc() { + return testMiniKDC; + } + + // Test that you can authenticate with only delegation tokens + // 1. Get a delegation token using Kerberos auth(this ends up + // testing the fallback authenticator) + // 2. Submit an app without kerberos or delegation-token + // - we should get an UNAUTHORIZED response + // 3. Submit same app with delegation-token + // - we should get OK response + // - confirm owner of the app is the user whose + // delegation-token we used + + @Test + public void testDelegationTokenAuth() throws Exception { + final String token = getDelegationToken("test"); + + ApplicationSubmissionContextInfo app = + new ApplicationSubmissionContextInfo(); + String appid = "application_123_0"; + app.setApplicationId(appid); + String requestBody = getMarshalledAppInfo(app); + + URL url = new URL("http://localhost:8088/ws/v1/cluster/apps"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + setupConn(conn, "POST", "application/xml", requestBody); + + // this should fail with unauthorized because only + // auth is kerberos or delegation token + try { + conn.getInputStream(); + fail("we should not be here"); + } catch (IOException e) { + assertEquals(Status.UNAUTHORIZED.getStatusCode(), conn.getResponseCode()); + } + + conn = (HttpURLConnection) url.openConnection(); + conn.setRequestProperty(DelegationTokenHeader, token); + setupConn(conn, "POST", MediaType.APPLICATION_XML, requestBody); + + // this should not fail + conn.getInputStream(); + boolean appExists = + rm.getRMContext().getRMApps() + .containsKey(ConverterUtils.toApplicationId(appid)); + assertTrue(appExists); + RMApp actualApp = + rm.getRMContext().getRMApps() + .get(ConverterUtils.toApplicationId(appid)); + String owner = actualApp.getUser(); + assertEquals("client", owner); + + return; + } + + // Test to make sure that cancelled delegation tokens + // are rejected + @Test + public void testCancelledDelegationToken() throws Exception { + String token = getDelegationToken("client"); + cancelDelegationToken(token); + ApplicationSubmissionContextInfo app = + new ApplicationSubmissionContextInfo(); + String appid = "application_123_0"; + app.setApplicationId(appid); + String requestBody = getMarshalledAppInfo(app); + + URL url = new URL("http://localhost:8088/ws/v1/cluster/apps"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestProperty(DelegationTokenHeader, token); + setupConn(conn, "POST", MediaType.APPLICATION_XML, requestBody); + + // this should fail with unauthorized because only + // auth is kerberos or delegation token + try { + conn.getInputStream(); + fail("Authentication should fail with expired delegation tokens"); + } catch (IOException e) { + assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode()); + } + return; + } + + // Test to make sure that we can't do delegation token + // functions using just delegation token auth + @Test + public void testDelegationTokenOps() throws Exception { + String token = getDelegationToken("client"); + String createRequest = "{\"renewer\":\"test\"}"; + String renewRequest = "{\"token\": \"" + token + "\"}"; + + // first test create and renew + String[] requests = { createRequest, renewRequest }; + for (String requestBody : requests) { + URL url = new URL("http://localhost:8088/ws/v1/cluster/delegation-token"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestProperty(DelegationTokenHeader, token); + setupConn(conn, "POST", MediaType.APPLICATION_JSON, requestBody); + try { + conn.getInputStream(); + fail("Creation/Renewing delegation tokens should not be " + + "allowed with token auth"); + } catch (IOException e) { + assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode()); + } + } + + // test cancel + URL url = new URL("http://localhost:8088/ws/v1/cluster/delegation-token"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestProperty(DelegationTokenHeader, token); + conn.setRequestProperty(RMWebServices.DELEGATION_TOKEN_HEADER, token); + setupConn(conn, "DELETE", null, null); + try { + conn.getInputStream(); + fail("Cancelling delegation tokens should not be allowed with token auth"); + } catch (IOException e) { + assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode()); + } + return; + } + + private String getDelegationToken(final String renewer) throws Exception { + String token = KerberosTestUtils.doAsClient(new Callable() { + @Override + public String call() throws Exception { + String ret = null; + String body = "{\"renewer\":\"" + renewer + "\"}"; + URL url = + new URL("http://localhost:8088/ws/v1/cluster/delegation-token"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + setupConn(conn, "POST", MediaType.APPLICATION_JSON, body); + InputStream response = conn.getInputStream(); + assertEquals(Status.OK.getStatusCode(), conn.getResponseCode()); + BufferedReader reader = null; + try { + reader = new BufferedReader(new InputStreamReader(response, "UTF8")); + for (String line; (line = reader.readLine()) != null;) { + JSONObject obj = new JSONObject(line); + if (obj.has("token")) { + reader.close(); + response.close(); + ret = obj.getString("token"); + break; + } + } + } finally { + IOUtils.closeQuietly(reader); + IOUtils.closeQuietly(response); + } + return ret; + } + }); + return token; + } + + private void cancelDelegationToken(final String tokenString) throws Exception { + + KerberosTestUtils.doAsClient(new Callable() { + @Override + public Void call() throws Exception { + URL url = + new URL("http://localhost:8088/ws/v1/cluster/delegation-token"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestProperty(RMWebServices.DELEGATION_TOKEN_HEADER, + tokenString); + setupConn(conn, "DELETE", null, null); + InputStream response = conn.getInputStream(); + assertEquals(Status.OK.getStatusCode(), conn.getResponseCode()); + response.close(); + return null; + } + }); + return; + } + + static String getMarshalledAppInfo(ApplicationSubmissionContextInfo appInfo) + throws Exception { + + StringWriter writer = new StringWriter(); + JAXBContext context = + JAXBContext.newInstance(ApplicationSubmissionContextInfo.class); + Marshaller m = context.createMarshaller(); + m.marshal(appInfo, writer); + return writer.toString(); + } + + static void setupConn(HttpURLConnection conn, String method, + String contentType, String body) throws Exception { + conn.setRequestMethod(method); + conn.setDoOutput(true); + conn.setRequestProperty("Accept-Charset", "UTF8"); + if (contentType != null && !contentType.isEmpty()) { + conn.setRequestProperty("Content-Type", contentType + ";charset=UTF8"); + if (body != null && !body.isEmpty()) { + OutputStream stream = conn.getOutputStream(); + stream.write(body.getBytes("UTF8")); + stream.close(); + } + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebappAuthentication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebappAuthentication.java new file mode 100644 index 00000000000..2f6a02287c4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebappAuthentication.java @@ -0,0 +1,272 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.File; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Arrays; +import java.util.Collection; + +import javax.ws.rs.core.MediaType; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.sun.jersey.api.client.ClientResponse.Status; + +/* Just a simple test class to ensure that the RM handles the static web user + * correctly for secure and un-secure modes + * + */ +@RunWith(Parameterized.class) +public class TestRMWebappAuthentication { + + private static MockRM rm; + private static Configuration simpleConf; + private static Configuration kerberosConf; + + private static final File testRootDir = new File("target", + TestRMWebServicesDelegationTokenAuthentication.class.getName() + "-root"); + private static File httpSpnegoKeytabFile = new File( + KerberosTestUtils.getKeytabFile()); + + private static boolean miniKDCStarted = false; + private static MiniKdc testMiniKDC; + + static { + simpleConf = new Configuration(); + simpleConf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); + simpleConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, + ResourceScheduler.class); + simpleConf.setBoolean("mockrm.webapp.enabled", true); + kerberosConf = new Configuration(); + kerberosConf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); + kerberosConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, + ResourceScheduler.class); + kerberosConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); + kerberosConf.set( + CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + kerberosConf.set(YarnConfiguration.RM_KEYTAB, + httpSpnegoKeytabFile.getAbsolutePath()); + kerberosConf.setBoolean("mockrm.webapp.enabled", true); + } + + @Parameters + public static Collection params() { + return Arrays.asList(new Object[][] { { 1, simpleConf }, + { 2, kerberosConf } }); + } + + public TestRMWebappAuthentication(int run, Configuration conf) { + super(); + setupAndStartRM(conf); + } + + @BeforeClass + public static void setUp() { + try { + testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir); + setupKDC(); + } catch (Exception e) { + assertTrue("Couldn't create MiniKDC", false); + } + } + + @AfterClass + public static void tearDown() { + if (testMiniKDC != null) { + testMiniKDC.stop(); + } + } + + private static void setupKDC() throws Exception { + if (!miniKDCStarted) { + testMiniKDC.start(); + getKdc().createPrincipal(httpSpnegoKeytabFile, "HTTP/localhost", + "client", UserGroupInformation.getLoginUser().getShortUserName()); + miniKDCStarted = true; + } + } + + private static MiniKdc getKdc() { + return testMiniKDC; + } + + private static void setupAndStartRM(Configuration conf) { + UserGroupInformation.setConfiguration(conf); + rm = new MockRM(conf); + } + + // ensure that in a non-secure cluster users can access + // the web pages as earlier and submit apps as anonymous + // user or by identifying themselves + @Test + public void testSimpleAuth() throws Exception { + + rm.start(); + + // ensure users can access web pages + // this should work for secure and non-secure clusters + URL url = new URL("http://localhost:8088/cluster"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + try { + conn.getInputStream(); + assertEquals(Status.OK.getStatusCode(), conn.getResponseCode()); + } catch (Exception e) { + fail("Fetching url failed"); + } + + if (UserGroupInformation.isSecurityEnabled()) { + testAnonymousKerberosUser(); + } else { + testAnonymousSimpleUser(); + } + + rm.stop(); + } + + private void testAnonymousKerberosUser() throws Exception { + + ApplicationSubmissionContextInfo app = + new ApplicationSubmissionContextInfo(); + String appid = "application_123_0"; + app.setApplicationId(appid); + String requestBody = + TestRMWebServicesDelegationTokenAuthentication + .getMarshalledAppInfo(app); + + URL url = + new URL("http://localhost:8088/ws/v1/cluster/apps/new-application"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST", + "application/xml", requestBody); + + try { + conn.getInputStream(); + fail("Anonymous users should not be allowed to get new application ids in secure mode."); + } catch (IOException ie) { + assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode()); + } + + url = new URL("http://localhost:8088/ws/v1/cluster/apps"); + conn = (HttpURLConnection) url.openConnection(); + TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST", + "application/xml", requestBody); + + try { + conn.getInputStream(); + fail("Anonymous users should not be allowed to submit apps in secure mode."); + } catch (IOException ie) { + assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode()); + } + + requestBody = "{ \"state\": \"KILLED\"}"; + url = + new URL( + "http://localhost:8088/ws/v1/cluster/apps/application_123_0/state"); + conn = (HttpURLConnection) url.openConnection(); + TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "PUT", + "application/json", requestBody); + + try { + conn.getInputStream(); + fail("Anonymous users should not be allowed to kill apps in secure mode."); + } catch (IOException ie) { + assertEquals(Status.FORBIDDEN.getStatusCode(), conn.getResponseCode()); + } + } + + private void testAnonymousSimpleUser() throws Exception { + + ApplicationSubmissionContextInfo app = + new ApplicationSubmissionContextInfo(); + String appid = "application_123_0"; + app.setApplicationId(appid); + String requestBody = + TestRMWebServicesDelegationTokenAuthentication + .getMarshalledAppInfo(app); + + URL url = new URL("http://localhost:8088/ws/v1/cluster/apps"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST", + "application/xml", requestBody); + + conn.getInputStream(); + assertEquals(Status.ACCEPTED.getStatusCode(), conn.getResponseCode()); + boolean appExists = + rm.getRMContext().getRMApps() + .containsKey(ConverterUtils.toApplicationId(appid)); + assertTrue(appExists); + RMApp actualApp = + rm.getRMContext().getRMApps() + .get(ConverterUtils.toApplicationId(appid)); + String owner = actualApp.getUser(); + assertEquals( + rm.getConfig().get(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER, + CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER), owner); + + appid = "application_123_1"; + app.setApplicationId(appid); + requestBody = + TestRMWebServicesDelegationTokenAuthentication + .getMarshalledAppInfo(app); + url = new URL("http://localhost:8088/ws/v1/cluster/apps?user.name=client"); + conn = (HttpURLConnection) url.openConnection(); + TestRMWebServicesDelegationTokenAuthentication.setupConn(conn, "POST", + MediaType.APPLICATION_XML, requestBody); + + conn.getInputStream(); + appExists = + rm.getRMContext().getRMApps() + .containsKey(ConverterUtils.toApplicationId(appid)); + assertTrue(appExists); + actualApp = + rm.getRMContext().getRMApps() + .get(ConverterUtils.toApplicationId(appid)); + owner = actualApp.getUser(); + assertEquals("client", owner); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm index 1952e11176d..9609ba39de9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm @@ -2912,3 +2912,24 @@ Accept: application/xml +---+ No response body. + +** Authentication using delegation tokens + + This feature is in the alpha mode and may change in the future. + + You can use delegation tokens to authenticate yourself when using YARN RM webservices. However, this requires setting the right configurations. The conditions for this are: + + * Hadoop is setup in secure mode with the authentication type set to kerberos. + + * Hadoop HTTP authentication is setup with the authentication type set to kerberos + + Once setup, delegation tokens can be fetched using the web services listed above and used as shown in an example below: + ++---+ + PUT http:///ws/v1/cluster/apps/application_1399397633663_0003/state + Hadoop-YARN-Auth-Delegation-Token: MgASY2xpZW50QEVYQU1QTEUuQ09NDHRlc3QtcmVuZXdlcgCKAUbjqcHHigFHB7ZFxwQCFKWD3znCkDSy6SQIjRCLDydxbxvgE1JNX0RFTEVHQVRJT05fVE9LRU4A + Content-Type: application/json; charset=UTF8 + { + "state":"KILLED" + } ++---+ From f49c2a1b625948119afd5a867db2101c4b2cfb0e Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Mon, 28 Jul 2014 18:53:47 +0000 Subject: [PATCH 069/354] HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614125 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../src/site/apt/HdfsNfsGateway.apt.vm | 60 +++++++------------ 2 files changed, 24 insertions(+), 39 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index eb524b209df..c4930e62217 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -938,6 +938,9 @@ Release 2.5.0 - UNRELEASED HDFS-6723. New NN webUI no longer displays decommissioned state for dead node. (Ming Ma via wheat9) + HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config + (brandonli) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm index 54544cff46f..863ba39a739 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm @@ -44,10 +44,13 @@ HDFS NFS Gateway * {Configuration} - The user running the NFS-gateway must be able to proxy all the users using the NFS mounts. - For instance, if user 'nfsserver' is running the gateway, and users belonging to the groups 'nfs-users1' - and 'nfs-users2' use the NFS mounts, then in core-site.xml of the namenode, the following must be set - (NOTE: replace 'nfsserver' with the user name starting the gateway in your cluster): + The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts. + In non-secure mode, the user running the gateway is the proxy user, while in secure mode the + user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver' + and users belonging to the groups 'nfs-users1' + and 'nfs-users2' use the NFS mounts, then in core-site.xml of the NameNode, the following + two properities must be set and only NameNode needs restart after the configuration change + (NOTE: replace the string 'nfsserver' with the proxy user name in your cluster): ---- @@ -72,7 +75,9 @@ HDFS NFS Gateway ---- The above are the only required configuration for the NFS gateway in non-secure mode. For Kerberized - hadoop clusters, the following configurations need to be added to hdfs-site.xml: + hadoop clusters, the following configurations need to be added to hdfs-site.xml for the gateway (NOTE: replace + string "nfsserver" with the proxy user name and ensure the user contained in the keytab is + also the same proxy user): ---- @@ -87,6 +92,8 @@ HDFS NFS Gateway nfsserver/_HOST@YOUR-REALM.COM ---- + + The rest of the NFS gateway configurations are optional for both secure and non-secure mode. The AIX NFS client has a {{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}} that prevent it from working correctly by default with the HDFS NFS @@ -108,7 +115,7 @@ HDFS NFS Gateway have been committed. It's strongly recommended for the users to update a few configuration properties based on their use - cases. All the related configuration properties can be added or updated in hdfs-site.xml. + cases. All the following configuration properties can be added or updated in hdfs-site.xml. * If the client mounts the export with access time update allowed, make sure the following property is not disabled in the configuration file. Only NameNode needs to restart after @@ -145,36 +152,6 @@ HDFS NFS Gateway ---- - * For optimal performance, it is recommended that rtmax be updated to - 1MB. However, note that this 1MB is a per client allocation, and not - from a shared memory pool, and therefore a larger value may adversely - affect small reads, consuming a lot of memory. The maximum value of - this property is 1MB. - ----- - - nfs.rtmax - 1048576 - This is the maximum size in bytes of a READ request - supported by the NFS gateway. If you change this, make sure you - also update the nfs mount's rsize(add rsize= # of bytes to the - mount directive). - - ----- - ----- - - nfs.wtmax - 65536 - This is the maximum size in bytes of a WRITE request - supported by the NFS gateway. If you change this, make sure you - also update the nfs mount's wsize(add wsize= # of bytes to the - mount directive). - - ----- - * By default, the export can be mounted by any client. To better control the access, users can update the following property. The value string contains machine name and access privilege, separated by whitespace @@ -238,8 +215,10 @@ HDFS NFS Gateway [[3]] Start mountd and nfsd. - No root privileges are required for this command. However, ensure that the user starting - the Hadoop cluster and the user starting the NFS gateway are same. + No root privileges are required for this command. In non-secure mode, the NFS gateway + should be started by the proxy user mentioned at the beginning of this user guide. + While in secure mode, any user can start NFS gateway + as long as the user has read access to the Kerberos keytab defined in "nfs.keytab.file". ------------------------- hadoop nfs3 @@ -339,7 +318,10 @@ HDFS NFS Gateway ------------------------------------------------------------------- Then the users can access HDFS as part of the local file system except that, - hard link and random write are not supported yet. + hard link and random write are not supported yet. To optimize the performance + of large file I/O, one can increase the NFS transfer size(rsize and wsize) during mount. + By default, NFS gateway supports 1MB as the maximum transfer size. For larger data + transfer size, one needs to update "nfs.rtmax" and "nfs.rtmax" in hdfs-site.xml. * {Allow mounts from unprivileged clients} From 4f9ffc7455ae2182df1da1b7e3f5a55e645fc8a4 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 28 Jul 2014 23:43:39 +0000 Subject: [PATCH 070/354] HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614215 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/DFSClient.java | 26 +++--- .../hadoop/hdfs/protocol/ClientProtocol.java | 12 ++- ...amenodeProtocolServerSideTranslatorPB.java | 19 +++- .../ClientNamenodeProtocolTranslatorPB.java | 16 ++++ ...atanodeProtocolClientSideTranslatorPB.java | 14 +-- .../hadoop/hdfs/protocolPB/PBHelper.java | 52 ++++++++++- .../blockmanagement/DatanodeDescriptor.java | 9 ++ .../blockmanagement/DatanodeStorageInfo.java | 6 ++ .../hdfs/server/namenode/FSNamesystem.java | 28 +++++- .../server/namenode/NameNodeRpcServer.java | 15 +++- .../protocol/DatanodeStorageReport.java | 42 +++++++++ .../main/proto/ClientNamenodeProtocol.proto | 15 ++++ .../src/main/proto/DatanodeProtocol.proto | 24 ----- .../hadoop-hdfs/src/main/proto/hdfs.proto | 24 +++++ .../hadoop/hdfs/TestDatanodeReport.java | 90 ++++++++++++++----- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 17 +++- 17 files changed, 326 insertions(+), 85 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c4930e62217..fca7e231045 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -324,6 +324,8 @@ Release 2.6.0 - UNRELEASED HDFS-6750. The DataNode should use its shared memory segment to mark short-circuit replicas that have been unlinked as stale (cmccabe) + HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. (szetszwo) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 9edb3db6585..45a9011a568 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -32,19 +32,21 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY; @@ -60,8 +62,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; import java.io.BufferedOutputStream; import java.io.DataInputStream; @@ -91,7 +91,6 @@ import javax.net.SocketFactory; -import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -112,22 +111,22 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.Options; -import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.VolumeId; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; +import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; @@ -158,8 +157,8 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; -import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; +import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; @@ -175,6 +174,7 @@ import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; @@ -200,6 +200,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import com.google.common.net.InetAddresses; /******************************************************** @@ -2192,6 +2193,11 @@ public DatanodeInfo[] datanodeReport(DatanodeReportType type) return namenode.getDatanodeReport(type); } + public DatanodeStorageReport[] getDatanodeStorageReport( + DatanodeReportType type) throws IOException { + return namenode.getDatanodeStorageReport(type); + } + /** * Enter, leave or get safe mode. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 9398c721a68..a2a52fef389 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -31,11 +32,10 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; -import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.AtMostOnce; @@ -654,6 +655,13 @@ public void renewLease(String clientName) throws AccessControlException, public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type) throws IOException; + /** + * Get a report on the current datanode storages. + */ + @Idempotent + public DatanodeStorageReport[] getDatanodeStorageReport( + HdfsConstants.DatanodeReportType type) throws IOException; + /** * Get the block size for the given file. * @param filename The name of the file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 3a312b0d418..df0d1b0006c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto; @@ -93,6 +94,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -174,7 +177,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; @@ -655,6 +657,21 @@ public GetDatanodeReportResponseProto getDatanodeReport( } } + @Override + public GetDatanodeStorageReportResponseProto getDatanodeStorageReport( + RpcController controller, GetDatanodeStorageReportRequestProto req) + throws ServiceException { + try { + List reports = PBHelper.convertDatanodeStorageReports( + server.getDatanodeStorageReport(PBHelper.convert(req.getType()))); + return GetDatanodeStorageReportResponseProto.newBuilder() + .addAllDatanodeStorageReports(reports) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public GetPreferredBlockSizeResponseProto getPreferredBlockSize( RpcController controller, GetPreferredBlockSizeRequestProto req) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index d20ae1d14ea..0f8eba970ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -151,6 +152,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufHelper; @@ -580,6 +582,20 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type) } } + @Override + public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type) + throws IOException { + final GetDatanodeStorageReportRequestProto req + = GetDatanodeStorageReportRequestProto.newBuilder() + .setType(PBHelper.convert(type)).build(); + try { + return PBHelper.convertDatanodeStorageReports( + rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override public long getPreferredBlockSize(String filename) throws IOException, UnresolvedLinkException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index 5775d6e2634..46023ecaa34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -21,18 +21,13 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto; @@ -51,7 +46,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -61,14 +55,10 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -137,9 +127,7 @@ public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, .setRegistration(PBHelper.convert(registration)) .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount) .setFailedVolumes(failedVolumes); - for (StorageReport r : reports) { - builder.addReports(PBHelper.convert(r)); - } + builder.addAllReports(PBHelper.convertStorageReports(reports)); if (cacheCapacity != 0) { builder.setCacheCapacity(cacheCapacity); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index bd7e5f13918..5706aab062f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -90,6 +90,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto; @@ -102,14 +103,11 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; @@ -125,6 +123,8 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; @@ -149,6 +149,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; @@ -182,6 +183,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; @@ -620,6 +622,41 @@ public static DatanodeInfoProto convert(DatanodeInfo info) { return builder.build(); } + public static DatanodeStorageReportProto convertDatanodeStorageReport( + DatanodeStorageReport report) { + return DatanodeStorageReportProto.newBuilder() + .setDatanodeInfo(convert(report.getDatanodeInfo())) + .addAllStorageReports(convertStorageReports(report.getStorageReports())) + .build(); + } + + public static List convertDatanodeStorageReports( + DatanodeStorageReport[] reports) { + final List protos + = new ArrayList(reports.length); + for(int i = 0; i < reports.length; i++) { + protos.add(convertDatanodeStorageReport(reports[i])); + } + return protos; + } + + public static DatanodeStorageReport convertDatanodeStorageReport( + DatanodeStorageReportProto proto) { + return new DatanodeStorageReport( + convert(proto.getDatanodeInfo()), + convertStorageReports(proto.getStorageReportsList())); + } + + public static DatanodeStorageReport[] convertDatanodeStorageReports( + List protos) { + final DatanodeStorageReport[] reports + = new DatanodeStorageReport[protos.size()]; + for(int i = 0; i < reports.length; i++) { + reports[i] = convertDatanodeStorageReport(protos.get(i)); + } + return reports; + } + public static AdminStates convert(AdminState adminState) { switch(adminState) { case DECOMMISSION_INPROGRESS: @@ -1713,6 +1750,15 @@ public static StorageReport[] convertStorageReports( return report; } + public static List convertStorageReports(StorageReport[] storages) { + final List protos = new ArrayList( + storages.length); + for(int i = 0; i < storages.length; i++) { + protos.add(convert(storages[i])); + } + return protos; + } + public static JournalInfo convert(JournalInfoProto info) { int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0; int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index a645d434985..fcc189d9f9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -259,6 +259,15 @@ DatanodeStorageInfo[] getStorageInfos() { } } + public StorageReport[] getStorageReports() { + final StorageReport[] reports = new StorageReport[storageMap.size()]; + final DatanodeStorageInfo[] infos = getStorageInfos(); + for(int i = 0; i < infos.length; i++) { + reports[i] = infos[i].toStorageReport(); + } + return reports; + } + boolean hasStaleStorages() { synchronized (storageMap) { for (DatanodeStorageInfo storage : storageMap.values()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index f7bab3ced6a..fa4b0e533bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -291,6 +291,12 @@ public int hashCode() { public String toString() { return "[" + storageType + "]" + storageID + ":" + state; } + + StorageReport toStorageReport() { + return new StorageReport( + new DatanodeStorage(storageID, state, storageType), + false, capacity, dfsUsed, remaining, blockPoolUsed); + } /** @return the first {@link DatanodeStorageInfo} corresponding to * the given datanode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a705f17dd27..f12c4e2cfe1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -62,6 +62,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY; @@ -83,9 +85,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT; - import static org.apache.hadoop.util.Time.now; import java.io.BufferedWriter; @@ -231,6 +230,7 @@ import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; @@ -4916,6 +4916,28 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type } } + DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type + ) throws AccessControlException, StandbyException { + checkSuperuserPrivilege(); + checkOperation(OperationCategory.UNCHECKED); + readLock(); + try { + checkOperation(OperationCategory.UNCHECKED); + final DatanodeManager dm = getBlockManager().getDatanodeManager(); + final List datanodes = dm.getDatanodeListForReport(type); + + DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()]; + for (int i = 0; i < reports.length; i++) { + final DatanodeDescriptor d = datanodes.get(i); + reports[i] = new DatanodeStorageReport(new DatanodeInfo(d), + d.getStorageReports()); + } + return reports; + } finally { + readUnlock(); + } + } + /** * Save namespace image. * This will save current namespace into fsimage file and empty edits file. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index f1f67247c26..2c2cd4f2272 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -115,6 +115,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; @@ -830,11 +831,23 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type) throws IOException { DatanodeInfo results[] = namesystem.datanodeReport(type); if (results == null ) { - throw new IOException("Cannot find datanode report"); + throw new IOException("Failed to get datanode report for " + type + + " datanodes."); } return results; } + @Override // ClientProtocol + public DatanodeStorageReport[] getDatanodeStorageReport( + DatanodeReportType type) throws IOException { + final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type); + if (reports == null ) { + throw new IOException("Failed to get datanode storage report for " + type + + " datanodes."); + } + return reports; + } + @Override // ClientProtocol public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java new file mode 100644 index 00000000000..6a956a0fac1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; + +/** + * Class captures information of a datanode and its storages. + */ +public class DatanodeStorageReport { + final DatanodeInfo datanodeInfo; + final StorageReport[] storageReports; + + public DatanodeStorageReport(DatanodeInfo datanodeInfo, + StorageReport[] storageReports) { + this.datanodeInfo = datanodeInfo; + this.storageReports = storageReports; + } + + public DatanodeInfo getDatanodeInfo() { + return datanodeInfo; + } + + public StorageReport[] getStorageReports() { + return storageReports; + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 5a75c41fb54..d2f92d64d0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -281,6 +281,19 @@ message GetDatanodeReportResponseProto { repeated DatanodeInfoProto di = 1; } +message GetDatanodeStorageReportRequestProto { + required DatanodeReportTypeProto type = 1; +} + +message DatanodeStorageReportProto { + required DatanodeInfoProto datanodeInfo = 1; + repeated StorageReportProto storageReports = 2; +} + +message GetDatanodeStorageReportResponseProto { + repeated DatanodeStorageReportProto datanodeStorageReports = 1; +} + message GetPreferredBlockSizeRequestProto { required string filename = 1; } @@ -672,6 +685,8 @@ service ClientNamenodeProtocol { rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto); rpc getDatanodeReport(GetDatanodeReportRequestProto) returns(GetDatanodeReportResponseProto); + rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto) + returns(GetDatanodeStorageReportResponseProto); rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto) returns(GetPreferredBlockSizeResponseProto); rpc setSafeMode(SetSafeModeRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 2afcf057f70..187761a4502 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -44,20 +44,6 @@ message DatanodeRegistrationProto { required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0" } -/** - * Represents a storage available on the datanode - */ -message DatanodeStorageProto { - enum StorageState { - NORMAL = 0; - READ_ONLY_SHARED = 1; - } - - required string storageUuid = 1; - optional StorageState state = 2 [default = NORMAL]; - optional StorageTypeProto storageType = 3 [default = DISK]; -} - /** * Commands sent from namenode to the datanodes */ @@ -196,16 +182,6 @@ message HeartbeatRequestProto { optional uint64 cacheUsed = 7 [default = 0 ]; } -message StorageReportProto { - required string storageUuid = 1 [ deprecated = true ]; - optional bool failed = 2 [ default = false ]; - optional uint64 capacity = 3 [ default = 0 ]; - optional uint64 dfsUsed = 4 [ default = 0 ]; - optional uint64 remaining = 5 [ default = 0 ]; - optional uint64 blockPoolUsed = 6 [ default = 0 ]; - optional DatanodeStorageProto storage = 7; // supersedes StorageUuid -} - /** * state - State the NN is in when returning response to the DN * txid - Highest transaction ID this NN has seen diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 8f15be6e92e..04fcc500e84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -99,6 +99,30 @@ message DatanodeInfoProto { optional uint64 cacheUsed = 12 [default = 0]; } +/** + * Represents a storage available on the datanode + */ +message DatanodeStorageProto { + enum StorageState { + NORMAL = 0; + READ_ONLY_SHARED = 1; + } + + required string storageUuid = 1; + optional StorageState state = 2 [default = NORMAL]; + optional StorageTypeProto storageType = 3 [default = DISK]; +} + +message StorageReportProto { + required string storageUuid = 1 [ deprecated = true ]; + optional bool failed = 2 [ default = false ]; + optional uint64 capacity = 3 [ default = 0 ]; + optional uint64 dfsUsed = 4 [ default = 0 ]; + optional uint64 remaining = 5 [ default = 0 ]; + optional uint64 blockPoolUsed = 6 [ default = 0 ]; + optional DatanodeStorageProto storage = 7; // supersedes StorageUuid +} + /** * Summary of a file or directory */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index d5802b0b54c..1e6db21d8af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -21,19 +21,26 @@ import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; -import java.net.InetSocketAddress; -import java.util.ArrayList; +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.junit.Test; /** * This test ensures the all types of data node report work correctly. */ public class TestDatanodeReport { + static final Log LOG = LogFactory.getLog(TestDatanodeReport.class); final static private Configuration conf = new HdfsConfiguration(); final static private int NUM_OF_DATANODES = 4; @@ -50,20 +57,18 @@ public void testDatanodeReport() throws Exception { try { //wait until the cluster is up cluster.waitActive(); + final String bpid = cluster.getNamesystem().getBlockPoolId(); + final List datanodes = cluster.getDataNodes(); + final DFSClient client = cluster.getFileSystem().dfs; - InetSocketAddress addr = new InetSocketAddress("localhost", - cluster.getNameNodePort()); - DFSClient client = new DFSClient(addr, conf); - - assertEquals(client.datanodeReport(DatanodeReportType.ALL).length, - NUM_OF_DATANODES); - assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length, - NUM_OF_DATANODES); - assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0); + assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid); + assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid); + assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid); // bring down one datanode - ArrayList datanodes = cluster.getDataNodes(); - datanodes.remove(datanodes.size()-1).shutdown(); + final DataNode last = datanodes.get(datanodes.size() - 1); + LOG.info("XXX shutdown datanode " + last.getDatanodeUuid()); + last.shutdown(); DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD); while (nodeInfo.length != 1) { @@ -74,22 +79,59 @@ public void testDatanodeReport() throws Exception { nodeInfo = client.datanodeReport(DatanodeReportType.DEAD); } - assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length, - NUM_OF_DATANODES-1); - assertEquals(client.datanodeReport(DatanodeReportType.ALL).length, - NUM_OF_DATANODES); + assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null); + assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null); + assertReports(1, DatanodeReportType.DEAD, client, datanodes, null); Thread.sleep(5000); assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem")); - }finally { + } finally { cluster.shutdown(); } } - - public static void main(String[] args) throws Exception { - new TestDatanodeReport().testDatanodeReport(); + + final static Comparator CMP = new Comparator() { + @Override + public int compare(StorageReport left, StorageReport right) { + return left.getStorage().getStorageID().compareTo( + right.getStorage().getStorageID()); + } + }; + + static void assertReports(int numDatanodes, DatanodeReportType type, + DFSClient client, List datanodes, String bpid) throws IOException { + final DatanodeInfo[] infos = client.datanodeReport(type); + assertEquals(numDatanodes, infos.length); + final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type); + assertEquals(numDatanodes, reports.length); + + for(int i = 0; i < infos.length; i++) { + assertEquals(infos[i], reports[i].getDatanodeInfo()); + + final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes); + if (bpid != null) { + //check storage + final StorageReport[] computed = reports[i].getStorageReports(); + Arrays.sort(computed, CMP); + final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid); + Arrays.sort(expected, CMP); + + assertEquals(expected.length, computed.length); + for(int j = 0; j < expected.length; j++) { + assertEquals(expected[j].getStorage().getStorageID(), + computed[j].getStorage().getStorageID()); + } + } + } } -} - - + static DataNode findDatanode(String id, List datanodes) { + for(DataNode d : datanodes) { + if (d.getDatanodeUuid().equals(id)) { + return d; + } + } + throw new IllegalStateException("Datnode " + id + " not in datanode list: " + + datanodes); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 6c8547ebf8d..440b4f3a6cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -31,25 +31,25 @@ import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; @@ -67,9 +67,18 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; -import org.apache.hadoop.hdfs.server.protocol.*; +import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; +import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; +import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.security.token.Token; From c6cddce752bf537ea6567101000d93cbbc18aa16 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 29 Jul 2014 00:44:12 +0000 Subject: [PATCH 071/354] HADOOP-10876. The constructor of Path should not take an empty URL as a parameter. Contributed by Zhihai Xu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614230 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../main/java/org/apache/hadoop/fs/Path.java | 16 ++++++++++++- .../java/org/apache/hadoop/fs/TestPath.java | 24 +++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 34bd95103cd..b13598f9aa8 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -491,6 +491,9 @@ Release 2.6.0 - UNRELEASED HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry. (Benoy Antony via umamahesh) + HADOOP-10876. The constructor of Path should not take an empty URL as a + parameter. (Zhihai Xu via wang) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java index 54ddedaff1d..0e8db1df11e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java @@ -128,7 +128,20 @@ private void checkPathArg( String path ) throws IllegalArgumentException { "Can not create a Path from an empty string"); } } - + + /** check URI parameter of Path constructor. */ + private void checkPathArg(URI aUri) throws IllegalArgumentException { + // disallow construction of a Path from an empty URI + if (aUri == null) { + throw new IllegalArgumentException( + "Can not create a Path from a null URI"); + } + if (aUri.toString().isEmpty()) { + throw new IllegalArgumentException( + "Can not create a Path from an empty URI"); + } + } + /** Construct a path from a String. Path strings are URIs, but with * unescaped elements and some additional normalization. */ public Path(String pathString) throws IllegalArgumentException { @@ -176,6 +189,7 @@ public Path(String pathString) throws IllegalArgumentException { * Construct a path from a URI */ public Path(URI aUri) { + checkPathArg(aUri); uri = aUri.normalize(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java index 94908da7a38..54d25c995bd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java @@ -26,11 +26,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.AvroTestUtil; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; import com.google.common.base.Joiner; import junit.framework.TestCase; +import static org.junit.Assert.fail; public class TestPath extends TestCase { /** @@ -305,6 +307,28 @@ public void testURI() throws URISyntaxException, IOException { // if the child uri is absolute path assertEquals("foo://bar/fud#boo", new Path(new Path(new URI( "foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString()); + + // empty URI + URI uri3 = new URI(""); + assertEquals("", uri3.toString()); + try { + path = new Path(uri3); + fail("Expected exception for empty URI"); + } catch (IllegalArgumentException e) { + // expect to receive an IllegalArgumentException + GenericTestUtils.assertExceptionContains("Can not create a Path" + + " from an empty URI", e); + } + // null URI + uri3 = null; + try { + path = new Path(uri3); + fail("Expected exception for null URI"); + } catch (IllegalArgumentException e) { + // expect to receive an IllegalArgumentException + GenericTestUtils.assertExceptionContains("Can not create a Path" + + " from a null URI", e); + } } /** Test URIs created from Path objects */ From 260fd25568322c575b08b89d7ca60e75f000de41 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 29 Jul 2014 02:55:27 +0000 Subject: [PATCH 072/354] HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Contributed by Todd Lipcon) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614242 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../fs/CommonConfigurationKeysPublic.java | 4 ++-- .../src/main/resources/core-default.xml | 19 ------------------- 3 files changed, 5 insertions(+), 21 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index b13598f9aa8..c2f6f91edaf 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -460,6 +460,9 @@ Release 2.6.0 - UNRELEASED HADOOP-10882. Move DirectBufferPool into common util. (todd) + HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via + Arpit Agarwal) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 57d4eec23be..59c08143bf4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -207,7 +207,7 @@ public class CommonConfigurationKeysPublic { public static final String IPC_CLIENT_TCPNODELAY_KEY = "ipc.client.tcpnodelay"; /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */ - public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = false; + public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true; /** See core-default.xml */ public static final String IPC_SERVER_LISTEN_QUEUE_SIZE_KEY = "ipc.server.listen.queue.size"; @@ -226,7 +226,7 @@ public class CommonConfigurationKeysPublic { public static final String IPC_SERVER_TCPNODELAY_KEY = "ipc.server.tcpnodelay"; /** Default value for IPC_SERVER_TCPNODELAY_KEY */ - public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = false; + public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = true; /** See core-default.xml */ public static final String HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY = diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 46bcf895a52..eeb2bb20fa8 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -807,25 +807,6 @@ for ldap providers in the same way as above does. - - ipc.server.tcpnodelay - false - Turn on/off Nagle's algorithm for the TCP socket connection on - the server. Setting to true disables the algorithm and may decrease latency - with a cost of more/smaller packets. - - - - - ipc.client.tcpnodelay - false - Turn on/off Nagle's algorithm for the TCP socket connection on - the client. Setting to true disables the algorithm and may decrease latency - with a cost of more/smaller packets. - - - - From c0b49ff10728bb70bb60e6cb5973976f0466d247 Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Tue, 29 Jul 2014 17:41:52 +0000 Subject: [PATCH 073/354] YARN-2328. FairScheduler: Verify update and continuous scheduling threads are stopped when the scheduler is stopped. (kasha) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614432 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../scheduler/fair/FairScheduler.java | 62 +++++++++++-------- .../scheduler/fair/TestFairScheduler.java | 25 ++++++++ 3 files changed, 65 insertions(+), 25 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index c0405106a01..d3efd1cf7b8 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -71,6 +71,9 @@ Release 2.6.0 - UNRELEASED YARN-2211. Persist AMRMToken master key in RMStateStore for RM recovery. (Xuan Gong via jianhe) + YARN-2328. FairScheduler: Verify update and continuous scheduling threads are + stopped when the scheduler is stopped. (kasha) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 27a0075c1b2..4e1c244730a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -139,8 +139,11 @@ public class FairScheduler extends private final int UPDATE_DEBUG_FREQUENCY = 5; private int updatesToSkipForDebug = UPDATE_DEBUG_FREQUENCY; - private Thread updateThread; - private Thread schedulingThread; + @VisibleForTesting + Thread updateThread; + + @VisibleForTesting + Thread schedulingThread; // timeout to join when we stop this service protected final long THREAD_JOIN_TIMEOUT_MS = 1000; @@ -243,16 +246,21 @@ public QueueManager getQueueManager() { } /** - * A runnable which calls {@link FairScheduler#update()} every + * Thread which calls {@link FairScheduler#update()} every * updateInterval milliseconds. */ - private class UpdateThread implements Runnable { + private class UpdateThread extends Thread { + + @Override public void run() { - while (true) { + while (!Thread.currentThread().isInterrupted()) { try { Thread.sleep(updateInterval); update(); preemptTasksIfNecessary(); + } catch (InterruptedException ie) { + LOG.warn("Update thread interrupted. Exiting."); + return; } catch (Exception e) { LOG.error("Exception in fair scheduler UpdateThread", e); } @@ -260,6 +268,26 @@ public void run() { } } + /** + * Thread which attempts scheduling resources continuously, + * asynchronous to the node heartbeats. + */ + private class ContinuousSchedulingThread extends Thread { + + @Override + public void run() { + while (!Thread.currentThread().isInterrupted()) { + try { + continuousSchedulingAttempt(); + Thread.sleep(getContinuousSchedulingSleepMs()); + } catch (InterruptedException e) { + LOG.warn("Continuous scheduling thread interrupted. Exiting.", e); + return; + } + } + } + } + /** * Recompute the internal variables used by the scheduler - per-job weights, * fair shares, deficits, minimum slot allocations, and amount of used and @@ -970,7 +998,7 @@ private synchronized void nodeUpdate(RMNode nm) { } } - void continuousSchedulingAttempt() { + void continuousSchedulingAttempt() throws InterruptedException { List nodeIdList = new ArrayList(nodes.keySet()); // Sort the nodes by space available on them, so that we offer // containers on emptier nodes first, facilitating an even spread. This @@ -1229,30 +1257,14 @@ private synchronized void initScheduler(Configuration conf) throw new IOException("Failed to start FairScheduler", e); } - updateThread = new Thread(new UpdateThread()); + updateThread = new UpdateThread(); updateThread.setName("FairSchedulerUpdateThread"); updateThread.setDaemon(true); if (continuousSchedulingEnabled) { // start continuous scheduling thread - schedulingThread = new Thread( - new Runnable() { - @Override - public void run() { - while (!Thread.currentThread().isInterrupted()) { - try { - continuousSchedulingAttempt(); - Thread.sleep(getContinuousSchedulingSleepMs()); - } catch (InterruptedException e) { - LOG.error("Continuous scheduling thread interrupted. Exiting. ", - e); - return; - } - } - } - } - ); - schedulingThread.setName("ContinuousScheduling"); + schedulingThread = new ContinuousSchedulingThread(); + schedulingThread.setName("FairSchedulerContinuousScheduling"); schedulingThread.setDaemon(true); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 33ec3184a91..23c928c8f37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; @@ -3341,4 +3342,28 @@ public void testLowestCommonAncestorDeeperHierarchy() throws Exception { scheduler.findLowestCommonAncestorQueue(a1Queue, b1Queue); assertEquals(ancestorQueue, queue1); } + + @Test + public void testThreadLifeCycle() throws InterruptedException { + conf.setBoolean( + FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, true); + scheduler.init(conf); + scheduler.start(); + + Thread updateThread = scheduler.updateThread; + Thread schedulingThread = scheduler.schedulingThread; + + assertTrue(updateThread.isAlive()); + assertTrue(schedulingThread.isAlive()); + + scheduler.stop(); + + int numRetries = 100; + while (numRetries-- > 0 && + (updateThread.isAlive() || schedulingThread.isAlive())) { + Thread.sleep(50); + } + + assertNotEquals("One of the threads is still alive", 0, numRetries); + } } From 407bb3d3e452c8277c498dd14e0cc5b7762a7091 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 29 Jul 2014 21:11:26 +0000 Subject: [PATCH 074/354] HDFS-6509. Create a special /.reserved/raw directory for raw access to encrypted data. Contributed by Charles Lamb. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1614490 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 + .../hdfs/server/namenode/FSDirectory.java | 128 ++++++--- .../hdfs/server/namenode/FSEditLogLoader.java | 3 +- .../hdfs/server/namenode/FSNamesystem.java | 257 +++++++++++------- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 48 ++++ .../hadoop/hdfs/TestEncryptionZones.java | 19 +- 6 files changed, 308 insertions(+), 150 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 99e2c12adaf..d6b4e5b1f10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -62,6 +62,9 @@ fs-encryption (Unreleased) HDFS-6724. Decrypt EDEK before creating CryptoInputStream/CryptoOutputStream. (wang) + HDFS-6509. Create a special /.reserved/raw directory for raw access to + encrypted data. (clamb via wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index d20f3b991bc..f3ef5c3226b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -120,6 +120,8 @@ private static INodeDirectory createRoot(FSNamesystem namesystem) { + DOT_RESERVED_STRING; public final static byte[] DOT_RESERVED = DFSUtil.string2Bytes(DOT_RESERVED_STRING); + private final static String RAW_STRING = "raw"; + private final static byte[] RAW = DFSUtil.string2Bytes(RAW_STRING); public final static String DOT_INODES_STRING = ".inodes"; public final static byte[] DOT_INODES = DFSUtil.string2Bytes(DOT_INODES_STRING); @@ -1315,6 +1317,7 @@ private static void checkSnapshot(INode target, DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) throws UnresolvedLinkException, IOException { String srcs = normalizePath(src); + final boolean isRawPath = isReservedRawName(src); readLock(); try { @@ -1330,7 +1333,7 @@ DirectoryListing getListing(String src, byte[] startAfter, if (!targetNode.isDirectory()) { return new DirectoryListing( new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME, - targetNode, needLocation, snapshot)}, 0); + targetNode, needLocation, snapshot, isRawPath)}, 0); } final INodeDirectory dirInode = targetNode.asDirectory(); @@ -1344,7 +1347,7 @@ DirectoryListing getListing(String src, byte[] startAfter, for (int i=0; i0; i++) { INode cur = contents.get(startChild+i); listing[i] = createFileStatus(cur.getLocalNameBytes(), cur, - needLocation, snapshot); + needLocation, snapshot, isRawPath); listingCnt++; if (needLocation) { // Once we hit lsLimit locations, stop. @@ -1395,7 +1398,7 @@ private DirectoryListing getSnapshotsListing(String src, byte[] startAfter) for (int i = 0; i < numOfListing; i++) { Root sRoot = snapshots.get(i + skipSize).getRoot(); listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, - Snapshot.CURRENT_STATE_ID); + Snapshot.CURRENT_STATE_ID, false); } return new DirectoryListing( listing, snapshots.size() - skipSize - numOfListing); @@ -1403,12 +1406,13 @@ private DirectoryListing getSnapshotsListing(String src, byte[] startAfter) /** Get the file info for a specific file. * @param src The string representation of the path to the file - * @param resolveLink whether to throw UnresolvedLinkException + * @param resolveLink whether to throw UnresolvedLinkException + * @param isRawPath true if a /.reserved/raw pathname was passed by the user * @return object containing information regarding the file * or null if file not found */ - HdfsFileStatus getFileInfo(String src, boolean resolveLink) - throws UnresolvedLinkException, IOException { + HdfsFileStatus getFileInfo(String src, boolean resolveLink, boolean isRawPath) + throws IOException { String srcs = normalizePath(src); readLock(); try { @@ -1418,9 +1422,8 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink) final INodesInPath inodesInPath = getLastINodeInPath(srcs, resolveLink); final INode i = inodesInPath.getINode(0); - final int snapshotId = inodesInPath.getPathSnapshotId(); return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i, - inodesInPath.getPathSnapshotId()); + inodesInPath.getPathSnapshotId(), isRawPath); } finally { readUnlock(); } @@ -2259,22 +2262,25 @@ void reset() { * @param path the local name * @param node inode * @param needLocation if block locations need to be included or not + * @param isRawPath true if this is being called on behalf of a path in + * /.reserved/raw * @return a file status * @throws IOException if any error occurs */ private HdfsFileStatus createFileStatus(byte[] path, INode node, - boolean needLocation, int snapshot) throws IOException { + boolean needLocation, int snapshot, boolean isRawPath) + throws IOException { if (needLocation) { - return createLocatedFileStatus(path, node, snapshot); + return createLocatedFileStatus(path, node, snapshot, isRawPath); } else { - return createFileStatus(path, node, snapshot); + return createFileStatus(path, node, snapshot, isRawPath); } } /** * Create FileStatus by file INode */ HdfsFileStatus createFileStatus(byte[] path, INode node, - int snapshot) throws IOException { + int snapshot, boolean isRawPath) throws IOException { long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; @@ -2287,7 +2293,8 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - FileEncryptionInfo feInfo = getFileEncryptionInfo(node, snapshot); + FileEncryptionInfo feInfo = isRawPath ? null : + getFileEncryptionInfo(node, snapshot); return new HdfsFileStatus( size, @@ -2310,12 +2317,14 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, * Create FileStatus with location info by file INode */ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, - INode node, int snapshot) throws IOException { + INode node, int snapshot, boolean isRawPath) throws IOException { assert hasReadLock(); long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; LocatedBlocks loc = null; + final FileEncryptionInfo feInfo = isRawPath ? null : + getFileEncryptionInfo(node, snapshot); if (node.isFile()) { final INodeFile fileNode = node.asFile(); size = fileNode.computeFileSize(snapshot); @@ -2326,7 +2335,6 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, final boolean isUc = !inSnapshot && fileNode.isUnderConstruction(); final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size; - final FileEncryptionInfo feInfo = getFileEncryptionInfo(node, snapshot); loc = getFSNamesystem().getBlockManager().createLocatedBlocks( fileNode.getBlocks(), fileSize, isUc, 0L, size, false, @@ -2338,8 +2346,6 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - final FileEncryptionInfo feInfo = getFileEncryptionInfo(node, snapshot); - HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), @@ -2894,27 +2900,73 @@ public static boolean isReservedName(String src) { return src.startsWith(DOT_RESERVED_PATH_PREFIX); } + static boolean isReservedRawName(String src) { + return src.startsWith(DOT_RESERVED_PATH_PREFIX + + Path.SEPARATOR + RAW_STRING); + } + /** - * Resolve the path of /.reserved/.inodes//... to a regular path + * Resolve a /.reserved/... path to a non-reserved path. + *

+ * There are two special hierarchies under /.reserved/: + *

+ * /.reserved/.inodes/ performs a path lookup by inodeid, + *

+ * /.reserved/raw/... returns the encrypted (raw) bytes of a file in an + * encryption zone. For instance, if /ezone is an encryption zone, then + * /ezone/a refers to the decrypted file and /.reserved/raw/ezone/a refers to + * the encrypted (raw) bytes of /ezone/a. + *

+ * Pathnames in the /.reserved/raw directory that resolve to files not in an + * encryption zone are equivalent to the corresponding non-raw path. Hence, + * if /a/b/c refers to a file that is not in an encryption zone, then + * /.reserved/raw/a/b/c is equivalent (they both refer to the same + * unencrypted file). * * @param src path that is being processed * @param pathComponents path components corresponding to the path * @param fsd FSDirectory - * @return if the path indicates an inode, return path after replacing upto + * @return if the path indicates an inode, return path after replacing up to * with the corresponding path of the inode, else the path - * in {@code src} as is. + * in {@code src} as is. If the path refers to a path in the "raw" + * directory, return the non-raw pathname. * @throws FileNotFoundException if inodeid is invalid */ - static String resolvePath(String src, byte[][] pathComponents, FSDirectory fsd) + static String resolvePath(String src, byte[][] pathComponents, + FSDirectory fsd) throws FileNotFoundException { + final int nComponents = (pathComponents == null) ? + 0 : pathComponents.length; + if (nComponents <= 2) { + return src; + } + if (!Arrays.equals(DOT_RESERVED, pathComponents[1])) { + /* This is not a /.reserved/ path so do nothing. */ + return src; + } + + if (Arrays.equals(DOT_INODES, pathComponents[2])) { + /* It's a /.reserved/.inodes path. */ + if (nComponents > 3) { + return resolveDotInodesPath(src, pathComponents, fsd); + } else { + return src; + } + } else if (Arrays.equals(RAW, pathComponents[2])) { + /* It's /.reserved/raw so strip off the /.reserved/raw prefix. */ + if (nComponents == 3) { + return Path.SEPARATOR; + } else { + return constructRemainingPath("", pathComponents, 3); + } + } else { + /* It's some sort of /.reserved/ path. Ignore it. */ + return src; + } + } + + private static String resolveDotInodesPath(String src, + byte[][] pathComponents, FSDirectory fsd) throws FileNotFoundException { - if (pathComponents == null || pathComponents.length <= 3) { - return src; - } - // Not /.reserved/.inodes - if (!Arrays.equals(DOT_RESERVED, pathComponents[1]) - || !Arrays.equals(DOT_INODES, pathComponents[2])) { // Not .inodes path - return src; - } final String inodeId = DFSUtil.bytes2String(pathComponents[3]); final long id; try { @@ -2943,10 +2995,20 @@ static String resolvePath(String src, byte[][] pathComponents, FSDirectory fsd) } } - StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder() - : new StringBuilder(inode.getFullPathName()); - for (int i = 4; i < pathComponents.length; i++) { - path.append(Path.SEPARATOR).append(DFSUtil.bytes2String(pathComponents[i])); + String path = ""; + if (id != INodeId.ROOT_INODE_ID) { + path = inode.getFullPathName(); + } + return constructRemainingPath(path, pathComponents, 4); + } + + private static String constructRemainingPath(String pathPrefix, + byte[][] pathComponents, int startAt) { + + StringBuilder path = new StringBuilder(pathPrefix); + for (int i = startAt; i < pathComponents.length; i++) { + path.append(Path.SEPARATOR).append( + DFSUtil.bytes2String(pathComponents[i])); } if (NameNode.LOG.isDebugEnabled()) { NameNode.LOG.debug("Resolved path is " + path); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index a721491948d..d522e51bc23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -364,7 +364,8 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = fsNamesys.dir.createFileStatus( - HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID); + HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID, + false); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, stat); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 49fd82b00bf..7b29cedf9f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -338,7 +338,7 @@ public boolean isAuditEnabled() { private HdfsFileStatus getAuditFileInfo(String path, boolean resolveSymlink) throws IOException { return (isAuditEnabled() && isExternalInvocation()) - ? dir.getFileInfo(path, resolveSymlink) : null; + ? dir.getFileInfo(path, resolveSymlink, false) : null; } private void logAuditEvent(boolean succeeded, String cmd, String src) @@ -1663,9 +1663,10 @@ void setPermission(String src, FsPermission permission) } } - private void setPermissionInt(String src, FsPermission permission) + private void setPermissionInt(final String srcArg, FsPermission permission) throws AccessControlException, FileNotFoundException, SafeModeException, UnresolvedLinkException, IOException { + String src = srcArg; HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); @@ -1674,7 +1675,7 @@ private void setPermissionInt(String src, FsPermission permission) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set permission for " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOwner(pc, src); dir.setPermission(src, permission); getEditLog().logSetPermissions(src, permission); @@ -1683,7 +1684,7 @@ private void setPermissionInt(String src, FsPermission permission) writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "setPermission", src, null, resultingStat); + logAuditEvent(true, "setPermission", srcArg, null, resultingStat); } /** @@ -1701,9 +1702,10 @@ void setOwner(String src, String username, String group) } } - private void setOwnerInt(String src, String username, String group) + private void setOwnerInt(final String srcArg, String username, String group) throws AccessControlException, FileNotFoundException, SafeModeException, UnresolvedLinkException, IOException { + String src = srcArg; HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); @@ -1712,7 +1714,7 @@ private void setOwnerInt(String src, String username, String group) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set owner for " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOwner(pc, src); if (!pc.isSuperUser()) { if (username != null && !pc.getUser().equals(username)) { @@ -1729,7 +1731,7 @@ private void setOwnerInt(String src, String username, String group) writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "setOwner", src, null, resultingStat); + logAuditEvent(true, "setOwner", srcArg, null, resultingStat); } /** @@ -1812,10 +1814,11 @@ private LocatedBlocks getBlockLocationsInt(String src, long offset, * Get block locations within the specified range, updating the * access times if necessary. */ - private LocatedBlocks getBlockLocationsUpdateTimes(String src, long offset, - long length, boolean doAccessTime, boolean needBlockToken) + private LocatedBlocks getBlockLocationsUpdateTimes(final String srcArg, + long offset, long length, boolean doAccessTime, boolean needBlockToken) throws FileNotFoundException, UnresolvedLinkException, IOException { + String src = srcArg; FSPermissionChecker pc = getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); for (int attempt = 0; attempt < 2; attempt++) { @@ -1827,7 +1830,7 @@ private LocatedBlocks getBlockLocationsUpdateTimes(String src, long offset, checkOperation(OperationCategory.WRITE); writeLock(); // writelock is needed to set accesstime } - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); try { if (isReadOp) { checkOperation(OperationCategory.READ); @@ -1872,8 +1875,9 @@ && doAccessTime && isAccessTimeSupported()) { isUc = false; } - final FileEncryptionInfo feInfo = dir.getFileEncryptionInfo(inode, - iip.getPathSnapshotId()); + final FileEncryptionInfo feInfo = + FSDirectory.isReservedRawName(srcArg) ? + null : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId()); final LocatedBlocks blocks = blockManager.createLocatedBlocks(inode.getBlocks(), fileSize, @@ -2098,8 +2102,9 @@ void setTimes(String src, long mtime, long atime) } } - private void setTimesInt(String src, long mtime, long atime) + private void setTimesInt(final String srcArg, long mtime, long atime) throws IOException, UnresolvedLinkException { + String src = srcArg; HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); @@ -2108,7 +2113,7 @@ private void setTimesInt(String src, long mtime, long atime) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set times " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); // Write access is required to set access and modification times if (isPermissionEnabled) { @@ -2129,7 +2134,7 @@ private void setTimesInt(String src, long mtime, long atime) } finally { writeUnlock(); } - logAuditEvent(true, "setTimes", src, null, resultingStat); + logAuditEvent(true, "setTimes", srcArg, null, resultingStat); } /** @@ -2160,9 +2165,10 @@ void createSymlink(String target, String link, } } - private void createSymlinkInt(String target, String link, + private void createSymlinkInt(String target, final String linkArg, PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) throws IOException, UnresolvedLinkException { + String link = linkArg; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.createSymlink: target=" + target + " link=" + link); @@ -2175,7 +2181,7 @@ private void createSymlinkInt(String target, String link, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create symlink " + link); - link = FSDirectory.resolvePath(link, pathComponents, dir); + link = resolvePath(link, pathComponents); if (!createParent) { verifyParentDir(link); } @@ -2196,7 +2202,7 @@ private void createSymlinkInt(String target, String link, writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "createSymlink", link, target, resultingStat); + logAuditEvent(true, "createSymlink", linkArg, target, resultingStat); } /** @@ -2222,8 +2228,9 @@ boolean setReplication(final String src, final short replication) } } - private boolean setReplicationInt(String src, final short replication) - throws IOException { + private boolean setReplicationInt(final String srcArg, + final short replication) throws IOException { + String src = srcArg; blockManager.verifyReplication(src, replication, null); final boolean isFile; FSPermissionChecker pc = getPermissionChecker(); @@ -2234,7 +2241,7 @@ private boolean setReplicationInt(String src, final short replication) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set replication for " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); if (isPermissionEnabled) { checkPathAccess(pc, src, FsAction.WRITE); } @@ -2252,7 +2259,7 @@ private boolean setReplicationInt(String src, final short replication) getEditLog().logSync(); if (isFile) { - logAuditEvent(true, "setReplication", src); + logAuditEvent(true, "setReplication", srcArg); } return isFile; } @@ -2265,7 +2272,7 @@ long getPreferredBlockSize(String filename) readLock(); try { checkOperation(OperationCategory.READ); - filename = FSDirectory.resolvePath(filename, pathComponents, dir); + filename = resolvePath(filename, pathComponents); if (isPermissionEnabled) { checkTraverse(pc, filename); } @@ -2395,13 +2402,14 @@ HdfsFileStatus startFile(String src, PermissionStatus permissions, return status; } - private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, - String holder, String clientMachine, EnumSet flag, - boolean createParent, short replication, long blockSize, - List cipherSuites, boolean logRetryCache) + private HdfsFileStatus startFileInt(final String srcArg, + PermissionStatus permissions, String holder, String clientMachine, + EnumSet flag, boolean createParent, short replication, + long blockSize, List cipherSuites, boolean logRetryCache) throws AccessControlException, SafeModeException, FileAlreadyExistsException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, IOException { + String src = srcArg; if (NameNode.stateChangeLog.isDebugEnabled()) { StringBuilder builder = new StringBuilder(); builder.append("DIR* NameSystem.startFile: src=" + src @@ -2467,7 +2475,7 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, String ezKeyName = null; readLock(); try { - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); INodesInPath iip = dir.getINodesInPath4Write(src); // Nothing to do if the path is not within an EZ if (dir.isInAnEZ(iip)) { @@ -2496,11 +2504,12 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create file" + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); startFileInternal(pc, src, permissions, holder, clientMachine, create, overwrite, createParent, replication, blockSize, suite, edek, logRetryCache); - stat = dir.getFileInfo(src, false); + stat = dir.getFileInfo(src, false, + FSDirectory.isReservedRawName(srcArg)); } catch (StandbyException se) { skipSync = true; throw se; @@ -2522,7 +2531,7 @@ private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, } } - logAuditEvent(true, "create", src, null, stat); + logAuditEvent(true, "create", srcArg, null, stat); return stat; } @@ -2767,7 +2776,7 @@ boolean recoverLease(String src, String holder, String clientMachine) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot recover the lease of " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src); if (!inode.isUnderConstruction()) { return true; @@ -2894,11 +2903,12 @@ LocatedBlock appendFile(String src, String holder, String clientMachine) } } - private LocatedBlock appendFileInt(String src, String holder, + private LocatedBlock appendFileInt(final String srcArg, String holder, String clientMachine, boolean logRetryCache) throws AccessControlException, SafeModeException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, IOException { + String src = srcArg; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src + ", holder=" + holder @@ -2913,7 +2923,7 @@ private LocatedBlock appendFileInt(String src, String holder, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot append to file" + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); lb = appendFileInternal(pc, src, holder, clientMachine, logRetryCache); } catch (StandbyException se) { skipSync = true; @@ -2934,7 +2944,7 @@ private LocatedBlock appendFileInt(String src, String holder, +" block size " + lb.getBlock().getNumBytes()); } } - logAuditEvent(true, "append", src); + logAuditEvent(true, "append", srcArg); return lb; } @@ -2979,7 +2989,7 @@ LocatedBlock getAdditionalBlock(String src, long fileId, String clientName, readLock(); try { checkOperation(OperationCategory.READ); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; FileState fileState = analyzeFileState( src, fileId, clientName, previous, onRetryBlock); @@ -3202,7 +3212,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, checkOperation(OperationCategory.READ); //check safe mode checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); //check lease final INode inode; @@ -3255,7 +3265,7 @@ boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); final INode inode; if (fileId == INodeId.GRANDFATHER_INODE_ID) { @@ -3337,9 +3347,10 @@ private INodeFile checkLease(String src, String holder, INode inode, * (e.g if not all blocks have reached minimum replication yet) * @throws IOException on error (eg lease mismatch, file not open, file deleted) */ - boolean completeFile(String src, String holder, + boolean completeFile(final String srcArg, String holder, ExtendedBlock last, long fileId) throws SafeModeException, UnresolvedLinkException, IOException { + String src = srcArg; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " + src + " for " + holder); @@ -3353,7 +3364,7 @@ boolean completeFile(String src, String holder, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot complete file " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); success = completeFileInternal(src, holder, ExtendedBlock.getLocalBlock(last), fileId); } finally { @@ -3361,7 +3372,7 @@ boolean completeFile(String src, String holder, } getEditLog().logSync(); if (success) { - NameNode.stateChangeLog.info("DIR* completeFile: " + src + NameNode.stateChangeLog.info("DIR* completeFile: " + srcArg + " is closed by " + holder); } return success; @@ -3529,8 +3540,11 @@ boolean renameTo(String src, String dst) return ret; } - private boolean renameToInt(String src, String dst, boolean logRetryCache) + private boolean renameToInt(final String srcArg, final String dstArg, + boolean logRetryCache) throws IOException, UnresolvedLinkException { + String src = srcArg; + String dst = dstArg; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src + " to " + dst); @@ -3549,8 +3563,8 @@ private boolean renameToInt(String src, String dst, boolean logRetryCache) checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot rename " + src); waitForLoadingFSImage(); - src = FSDirectory.resolvePath(src, srcComponents, dir); - dst = FSDirectory.resolvePath(dst, dstComponents, dir); + src = resolvePath(src, srcComponents); + dst = resolvePath(dst, dstComponents); checkOperation(OperationCategory.WRITE); status = renameToInternal(pc, src, dst, logRetryCache); if (status) { @@ -3561,7 +3575,7 @@ private boolean renameToInt(String src, String dst, boolean logRetryCache) } getEditLog().logSync(); if (status) { - logAuditEvent(true, "rename", src, dst, resultingStat); + logAuditEvent(true, "rename", srcArg, dstArg, resultingStat); } return status; } @@ -3599,8 +3613,10 @@ private boolean renameToInternal(FSPermissionChecker pc, String src, /** Rename src to dst */ - void renameTo(String src, String dst, Options.Rename... options) - throws IOException, UnresolvedLinkException { + void renameTo(final String srcArg, final String dstArg, + Options.Rename... options) throws IOException, UnresolvedLinkException { + String src = srcArg; + String dst = dstArg; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options - " + src + " to " + dst); @@ -3623,8 +3639,8 @@ void renameTo(String src, String dst, Options.Rename... options) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot rename " + src); - src = FSDirectory.resolvePath(src, srcComponents, dir); - dst = FSDirectory.resolvePath(dst, dstComponents, dir); + src = resolvePath(src, srcComponents); + dst = resolvePath(dst, dstComponents); renameToInternal(pc, src, dst, cacheEntry != null, options); resultingStat = getAuditFileInfo(dst, false); success = true; @@ -3638,7 +3654,7 @@ void renameTo(String src, String dst, Options.Rename... options) for (Rename option : options) { cmd.append(option.value()).append(" "); } - logAuditEvent(true, cmd.toString(), src, dst, resultingStat); + logAuditEvent(true, cmd.toString(), srcArg, dstArg, resultingStat); } } @@ -3736,7 +3752,7 @@ private boolean deleteInternal(String src, boolean recursive, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot delete " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); if (!recursive && dir.isNonEmptyDirectory(src)) { throw new PathIsNotEmptyDirectoryException(src + " is non empty"); } @@ -3880,7 +3896,7 @@ private boolean isSafeModeTrackingBlocks() { /** * Get the file info for a specific file. * - * @param src The string representation of the path to the file + * @param srcArg The string representation of the path to the file * @param resolveLink whether to throw UnresolvedLinkException * if src refers to a symlink * @@ -3891,9 +3907,10 @@ private boolean isSafeModeTrackingBlocks() { * or null if file not found * @throws StandbyException */ - HdfsFileStatus getFileInfo(String src, boolean resolveLink) + HdfsFileStatus getFileInfo(final String srcArg, boolean resolveLink) throws AccessControlException, UnresolvedLinkException, StandbyException, IOException { + String src = srcArg; if (!DFSUtil.isValidName(src)) { throw new InvalidPathException("Invalid file name: " + src); } @@ -3904,34 +3921,36 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink) readLock(); try { checkOperation(OperationCategory.READ); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); if (isPermissionEnabled) { checkPermission(pc, src, false, null, null, null, null, false, resolveLink); } - stat = dir.getFileInfo(src, resolveLink); + stat = dir.getFileInfo(src, resolveLink, + FSDirectory.isReservedRawName(srcArg)); } catch (AccessControlException e) { - logAuditEvent(false, "getfileinfo", src); + logAuditEvent(false, "getfileinfo", srcArg); throw e; } finally { readUnlock(); } - logAuditEvent(true, "getfileinfo", src); + logAuditEvent(true, "getfileinfo", srcArg); return stat; } /** * Returns true if the file is closed */ - boolean isFileClosed(String src) + boolean isFileClosed(final String srcArg) throws AccessControlException, UnresolvedLinkException, StandbyException, IOException { + String src = srcArg; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { checkTraverse(pc, src); @@ -3939,7 +3958,7 @@ boolean isFileClosed(String src) return !INodeFile.valueOf(dir.getINode(src), src).isUnderConstruction(); } catch (AccessControlException e) { if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(false, "isFileClosed", src); + logAuditEvent(false, "isFileClosed", srcArg); } throw e; } finally { @@ -3962,8 +3981,9 @@ boolean mkdirs(String src, PermissionStatus permissions, return ret; } - private boolean mkdirsInt(String src, PermissionStatus permissions, + private boolean mkdirsInt(final String srcArg, PermissionStatus permissions, boolean createParent) throws IOException, UnresolvedLinkException { + String src = srcArg; if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src); } @@ -3979,7 +3999,7 @@ private boolean mkdirsInt(String src, PermissionStatus permissions, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create directory " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); status = mkdirsInternal(pc, src, permissions, createParent); if (status) { resultingStat = getAuditFileInfo(src, false); @@ -3989,7 +4009,7 @@ private boolean mkdirsInt(String src, PermissionStatus permissions, } getEditLog().logSync(); if (status) { - logAuditEvent(true, "mkdirs", src, null, resultingStat); + logAuditEvent(true, "mkdirs", srcArg, null, resultingStat); } return status; } @@ -4147,7 +4167,8 @@ private boolean mkdirsRecursively(String src, PermissionStatus permissions, * @return object containing information regarding the file * or null if file not found */ - ContentSummary getContentSummary(String src) throws IOException { + ContentSummary getContentSummary(final String srcArg) throws IOException { + String src = srcArg; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); @@ -4155,7 +4176,7 @@ ContentSummary getContentSummary(String src) throws IOException { boolean success = true; try { checkOperation(OperationCategory.READ); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); if (isPermissionEnabled) { checkPermission(pc, src, false, null, null, null, FsAction.READ_EXECUTE); } @@ -4166,7 +4187,7 @@ ContentSummary getContentSummary(String src) throws IOException { throw ace; } finally { readUnlock(); - logAuditEvent(success, "contentSummary", src); + logAuditEvent(success, "contentSummary", srcArg); } } @@ -4217,7 +4238,7 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot fsync file " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); final INode inode; if (fileId == INodeId.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. @@ -4657,9 +4678,10 @@ DirectoryListing getListing(String src, byte[] startAfter, } } - private DirectoryListing getListingInt(String src, byte[] startAfter, - boolean needLocation) + private DirectoryListing getListingInt(final String srcArg, byte[] startAfter, + boolean needLocation) throws AccessControlException, UnresolvedLinkException, IOException { + String src = srcArg; DirectoryListing dl; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); @@ -4668,7 +4690,7 @@ private DirectoryListing getListingInt(String src, byte[] startAfter, readLock(); try { checkOperation(OperationCategory.READ); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); // Get file name when startAfter is an INodePath if (FSDirectory.isReservedName(startAfterString)) { @@ -4692,7 +4714,7 @@ private DirectoryListing getListingInt(String src, byte[] startAfter, checkTraverse(pc, src); } } - logAuditEvent(true, "listStatus", src); + logAuditEvent(true, "listStatus", srcArg); dl = dir.getListing(src, startAfter, needLocation); } finally { readUnlock(); @@ -6085,6 +6107,28 @@ private void checkTraverse(FSPermissionChecker pc, String path) checkPermission(pc, path, false, null, null, null, null); } + /** + * This is a wrapper for FSDirectory.resolvePath(). If the path passed + * is prefixed with /.reserved/raw, then it checks to ensure that the caller + * has super user privs. + * + * @param path The path to resolve. + * @param pathComponents path components corresponding to the path + * @return if the path indicates an inode, return path after replacing up to + * with the corresponding path of the inode, else the path + * in {@code src} as is. If the path refers to a path in the "raw" + * directory, return the non-raw pathname. + * @throws FileNotFoundException + * @throws AccessControlException + */ + private String resolvePath(String path, byte[][] pathComponents) + throws FileNotFoundException, AccessControlException { + if (FSDirectory.isReservedRawName(path)) { + checkSuperuserPrivilege(); + } + return FSDirectory.resolvePath(path, pathComponents, dir); + } + @Override public void checkSuperuserPrivilege() throws AccessControlException { @@ -8279,7 +8323,9 @@ public BatchedListEntries listCachePools(String prevKey) return results; } - void modifyAclEntries(String src, List aclSpec) throws IOException { + void modifyAclEntries(final String srcArg, List aclSpec) + throws IOException { + String src = srcArg; nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); @@ -8289,7 +8335,7 @@ void modifyAclEntries(String src, List aclSpec) throws IOException { try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot modify ACL entries on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOwner(pc, src); List newAcl = dir.modifyAclEntries(src, aclSpec); getEditLog().logSetAcl(src, newAcl); @@ -8298,10 +8344,12 @@ void modifyAclEntries(String src, List aclSpec) throws IOException { writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "modifyAclEntries", src, null, resultingStat); + logAuditEvent(true, "modifyAclEntries", srcArg, null, resultingStat); } - void removeAclEntries(String src, List aclSpec) throws IOException { + void removeAclEntries(final String srcArg, List aclSpec) + throws IOException { + String src = srcArg; nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); @@ -8311,7 +8359,7 @@ void removeAclEntries(String src, List aclSpec) throws IOException { try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove ACL entries on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOwner(pc, src); List newAcl = dir.removeAclEntries(src, aclSpec); getEditLog().logSetAcl(src, newAcl); @@ -8320,10 +8368,11 @@ void removeAclEntries(String src, List aclSpec) throws IOException { writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "removeAclEntries", src, null, resultingStat); + logAuditEvent(true, "removeAclEntries", srcArg, null, resultingStat); } - void removeDefaultAcl(String src) throws IOException { + void removeDefaultAcl(final String srcArg) throws IOException { + String src = srcArg; nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); @@ -8333,7 +8382,7 @@ void removeDefaultAcl(String src) throws IOException { try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove default ACL entries on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOwner(pc, src); List newAcl = dir.removeDefaultAcl(src); getEditLog().logSetAcl(src, newAcl); @@ -8342,10 +8391,11 @@ void removeDefaultAcl(String src) throws IOException { writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "removeDefaultAcl", src, null, resultingStat); + logAuditEvent(true, "removeDefaultAcl", srcArg, null, resultingStat); } - void removeAcl(String src) throws IOException { + void removeAcl(final String srcArg) throws IOException { + String src = srcArg; nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); @@ -8355,7 +8405,7 @@ void removeAcl(String src) throws IOException { try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove ACL on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOwner(pc, src); dir.removeAcl(src); getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST); @@ -8364,10 +8414,11 @@ void removeAcl(String src) throws IOException { writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "removeAcl", src, null, resultingStat); + logAuditEvent(true, "removeAcl", srcArg, null, resultingStat); } - void setAcl(String src, List aclSpec) throws IOException { + void setAcl(final String srcArg, List aclSpec) throws IOException { + String src = srcArg; nnConf.checkAclsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); @@ -8377,7 +8428,7 @@ void setAcl(String src, List aclSpec) throws IOException { try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set ACL on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOwner(pc, src); List newAcl = dir.setAcl(src, aclSpec); getEditLog().logSetAcl(src, newAcl); @@ -8386,7 +8437,7 @@ void setAcl(String src, List aclSpec) throws IOException { writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "setAcl", src, null, resultingStat); + logAuditEvent(true, "setAcl", srcArg, null, resultingStat); } AclStatus getAclStatus(String src) throws IOException { @@ -8397,7 +8448,7 @@ AclStatus getAclStatus(String src) throws IOException { readLock(); try { checkOperation(OperationCategory.READ); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); if (isPermissionEnabled) { checkPermission(pc, src, false, null, null, null, null); } @@ -8485,7 +8536,7 @@ private void createEncryptionZoneInt(final String srcArg, String keyName, checkSuperuserPrivilege(); checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create encryption zone on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); final XAttr ezXAttr = dir.createEncryptionZone(src, keyName); List xAttrs = Lists.newArrayListWithCapacity(1); @@ -8496,7 +8547,7 @@ private void createEncryptionZoneInt(final String srcArg, String keyName, writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "createEncryptionZone", src, null, resultingStat); + logAuditEvent(true, "createEncryptionZone", srcArg, null, resultingStat); } /** @@ -8583,8 +8634,9 @@ void setXAttr(String src, XAttr xAttr, EnumSet flag) } } - private void setXAttrInt(String src, XAttr xAttr, EnumSet flag, - boolean logRetryCache) throws IOException { + private void setXAttrInt(final String srcArg, XAttr xAttr, + EnumSet flag, boolean logRetryCache) throws IOException { + String src = srcArg; nnConf.checkXAttrsConfigFlag(); checkXAttrSize(xAttr); HdfsFileStatus resultingStat = null; @@ -8596,7 +8648,7 @@ private void setXAttrInt(String src, XAttr xAttr, EnumSet flag, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set XAttr on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkXAttrChangeAccess(src, xAttr, pc); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(xAttr); @@ -8607,7 +8659,7 @@ private void setXAttrInt(String src, XAttr xAttr, EnumSet flag, writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "setXAttr", src, null, resultingStat); + logAuditEvent(true, "setXAttr", srcArg, null, resultingStat); } /** @@ -8630,7 +8682,9 @@ private void checkXAttrSize(XAttr xAttr) { } } - List getXAttrs(String src, List xAttrs) throws IOException { + List getXAttrs(final String srcArg, List xAttrs) + throws IOException { + String src = srcArg; nnConf.checkXAttrsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); boolean getAll = xAttrs == null || xAttrs.isEmpty(); @@ -8638,7 +8692,7 @@ List getXAttrs(String src, List xAttrs) throws IOException { try { XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs); } catch (AccessControlException e) { - logAuditEvent(false, "getXAttrs", src); + logAuditEvent(false, "getXAttrs", srcArg); throw e; } } @@ -8646,7 +8700,7 @@ List getXAttrs(String src, List xAttrs) throws IOException { byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { checkPathAccess(pc, src, FsAction.READ); @@ -8679,7 +8733,7 @@ List getXAttrs(String src, List xAttrs) throws IOException { return toGet; } } catch (AccessControlException e) { - logAuditEvent(false, "getXAttrs", src); + logAuditEvent(false, "getXAttrs", srcArg); throw e; } finally { readUnlock(); @@ -8693,7 +8747,7 @@ List listXAttrs(String src) throws IOException { byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkOperation(OperationCategory.READ); if (isPermissionEnabled) { /* To access xattr names, you need EXECUTE in the owning directory. */ @@ -8740,8 +8794,9 @@ void removeXAttr(String src, XAttr xAttr) throws IOException { } } - void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache) + void removeXAttrInt(final String srcArg, XAttr xAttr, boolean logRetryCache) throws IOException { + String src = srcArg; nnConf.checkXAttrsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); @@ -8752,7 +8807,7 @@ void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache) try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove XAttr entry on " + src); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = resolvePath(src, pathComponents); checkXAttrChangeAccess(src, xAttr, pc); List xAttrs = Lists.newArrayListWithCapacity(1); @@ -8769,7 +8824,7 @@ void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache) writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "removeXAttr", src, null, resultingStat); + logAuditEvent(true, "removeXAttr", srcArg, null, resultingStat); } private void checkXAttrChangeAccess(String src, XAttr xAttr, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 949713e6f44..0eb0965695b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -83,6 +83,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** Utilities for HDFS tests */ public class DFSTestUtil { @@ -1300,4 +1301,51 @@ public void close() throws IOException { sockDir.close(); } } + + /** + * Verify that two files have the same contents. + * + * @param fs The file system containing the two files. + * @param p1 The path of the first file. + * @param p2 The path of the second file. + * @param len The length of the two files. + * @throws IOException + */ + public static void verifyFilesEqual(FileSystem fs, Path p1, Path p2, int len) + throws IOException { + final FSDataInputStream in1 = fs.open(p1); + final FSDataInputStream in2 = fs.open(p2); + for (int i = 0; i < len; i++) { + assertEquals("Mismatch at byte " + i, in1.read(), in2.read()); + } + in1.close(); + in2.close(); + } + + /** + * Verify that two files have different contents. + * + * @param fs The file system containing the two files. + * @param p1 The path of the first file. + * @param p2 The path of the second file. + * @param len The length of the two files. + * @throws IOException + */ + public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2, + int len) + throws IOException { + final FSDataInputStream in1 = fs.open(p1); + final FSDataInputStream in2 = fs.open(p2); + try { + for (int i = 0; i < len; i++) { + if (in1.read() != in2.read()) { + return; + } + } + fail("files are equal, but should not be"); + } finally { + in1.close(); + in2.close(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index a53e47e6e85..c7229228606 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -30,7 +30,6 @@ import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderFactory; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSTestWrapper; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestWrapper; @@ -52,7 +51,7 @@ import org.junit.Before; import org.junit.Test; - +import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -316,16 +315,6 @@ public void testRenameFileContext() throws Exception { doRenameEncryptionZone(fcWrapper); } - private void validateFiles(Path p1, Path p2, int len) throws Exception { - FSDataInputStream in1 = fs.open(p1); - FSDataInputStream in2 = fs.open(p2); - for (int i = 0; i < len; i++) { - assertEquals("Mismatch at byte " + i, in1.read(), in2.read()); - } - in1.close(); - in2.close(); - } - private FileEncryptionInfo getFileEncryptionInfo(Path path) throws Exception { LocatedBlocks blocks = fs.getClient().getLocatedBlocks(path.toString(), 0); return blocks.getFileEncryptionInfo(); @@ -346,14 +335,14 @@ public void testReadWrite() throws Exception { final Path encFile1 = new Path(zone, "myfile"); DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED); // Read them back in and compare byte-by-byte - validateFiles(baseFile, encFile1, len); + verifyFilesEqual(fs, baseFile, encFile1, len); // Roll the key of the encryption zone List zones = dfsAdmin.listEncryptionZones(); assertEquals("Expected 1 EZ", 1, zones.size()); String keyName = zones.get(0).getKeyName(); cluster.getNamesystem().getProvider().rollNewVersion(keyName); // Read them back in and compare byte-by-byte - validateFiles(baseFile, encFile1, len); + verifyFilesEqual(fs, baseFile, encFile1, len); // Write a new enc file and validate final Path encFile2 = new Path(zone, "myfile2"); DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED); @@ -366,7 +355,7 @@ public void testReadWrite() throws Exception { assertNotEquals("Key was rolled, versions should be different", feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName()); // Contents still equal - validateFiles(encFile1, encFile2, len); + verifyFilesEqual(fs, encFile1, encFile2, len); } @Test(timeout = 60000) From 3a90228c30da198cfc705eb41e06f38053f9ff0e Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 29 Jul 2014 21:11:52 +0000 Subject: [PATCH 075/354] HDFS-6509 addendum, extra file git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1614491 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/hdfs/TestReservedRawPaths.java | 348 ++++++++++++++++++ 1 file changed, 348 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java new file mode 100644 index 00000000000..c49e88b5460 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java @@ -0,0 +1,348 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; +import org.apache.hadoop.crypto.key.KeyProviderFactory; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileContextTestWrapper; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FileSystemTestWrapper; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual; +import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesNotEqual; +import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; +import static org.apache.hadoop.test.GenericTestUtils.assertMatches; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class TestReservedRawPaths { + + private Configuration conf; + private FileSystemTestHelper fsHelper; + + private MiniDFSCluster cluster; + private HdfsAdmin dfsAdmin; + private DistributedFileSystem fs; + + protected FileSystemTestWrapper fsWrapper; + protected FileContextTestWrapper fcWrapper; + + @Before + public void setup() throws IOException { + conf = new HdfsConfiguration(); + fsHelper = new FileSystemTestHelper(); + // Set up java key store + String testRoot = fsHelper.getTestRootDir(); + File testRootDir = new File(testRoot).getAbsoluteFile(); + conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" + ); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); + fs = cluster.getFileSystem(); + fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem()); + fcWrapper = new FileContextTestWrapper( + FileContext.getFileContext(cluster.getURI(), conf)); + dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); + // Need to set the client's KeyProvider to the NN's for JKS, + // else the updates do not get flushed properly + fs.getClient().provider = cluster.getNameNode().getNamesystem() + .getProvider(); + } + + @After + public void teardown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + /** + * Basic read/write tests of raw files. + * Create a non-encrypted file + * Create an encryption zone + * Verify that non-encrypted file contents and decrypted file in EZ are equal + * Compare the raw encrypted bytes of the file with the decrypted version to + * ensure they're different + * Compare the raw and non-raw versions of the non-encrypted file to ensure + * they're the same. + */ + @Test(timeout = 120000) + public void testReadWriteRaw() throws Exception { + // Create a base file for comparison + final Path baseFile = new Path("/base"); + final int len = 8192; + DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED); + // Create the first enc file + final Path zone = new Path("/zone"); + fs.mkdirs(zone); + dfsAdmin.createEncryptionZone(zone, null); + final Path encFile1 = new Path(zone, "myfile"); + DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED); + // Read them back in and compare byte-by-byte + verifyFilesEqual(fs, baseFile, encFile1, len); + // Raw file should be different from encrypted file + final Path encFile1Raw = new Path(zone, "/.reserved/raw/zone/myfile"); + verifyFilesNotEqual(fs, encFile1Raw, encFile1, len); + // Raw file should be same as /base which is not in an EZ + final Path baseFileRaw = new Path(zone, "/.reserved/raw/base"); + verifyFilesEqual(fs, baseFile, baseFileRaw, len); + } + + private void assertPathEquals(Path p1, Path p2) throws IOException { + final FileStatus p1Stat = fs.getFileStatus(p1); + final FileStatus p2Stat = fs.getFileStatus(p2); + + /* + * Use accessTime and modificationTime as substitutes for INode to check + * for resolution to the same underlying file. + */ + assertEquals("Access times not equal", p1Stat.getAccessTime(), + p2Stat.getAccessTime()); + assertEquals("Modification times not equal", p1Stat.getModificationTime(), + p2Stat.getModificationTime()); + assertEquals("pathname1 not equal", p1, + Path.getPathWithoutSchemeAndAuthority(p1Stat.getPath())); + assertEquals("pathname1 not equal", p2, + Path.getPathWithoutSchemeAndAuthority(p2Stat.getPath())); + } + + /** + * Tests that getFileStatus on raw and non raw resolve to the same + * file. + */ + @Test(timeout = 120000) + public void testGetFileStatus() throws Exception { + final Path zone = new Path("zone"); + final Path slashZone = new Path("/", zone); + fs.mkdirs(slashZone); + dfsAdmin.createEncryptionZone(slashZone, null); + + final Path base = new Path("base"); + final Path reservedRaw = new Path("/.reserved/raw"); + final Path baseRaw = new Path(reservedRaw, base); + final int len = 8192; + DFSTestUtil.createFile(fs, baseRaw, len, (short) 1, 0xFEED); + assertPathEquals(new Path("/", base), baseRaw); + + /* Repeat the test for a file in an ez. */ + final Path ezEncFile = new Path(slashZone, base); + final Path ezRawEncFile = + new Path(new Path(reservedRaw, zone), base); + DFSTestUtil.createFile(fs, ezEncFile, len, (short) 1, 0xFEED); + assertPathEquals(ezEncFile, ezRawEncFile); + } + + @Test(timeout = 120000) + public void testReservedRoot() throws Exception { + final Path root = new Path("/"); + final Path rawRoot = new Path("/.reserved/raw"); + final Path rawRootSlash = new Path("/.reserved/raw/"); + assertPathEquals(root, rawRoot); + assertPathEquals(root, rawRootSlash); + } + + /* Verify mkdir works ok in .reserved/raw directory. */ + @Test(timeout = 120000) + public void testReservedRawMkdir() throws Exception { + final Path zone = new Path("zone"); + final Path slashZone = new Path("/", zone); + fs.mkdirs(slashZone); + dfsAdmin.createEncryptionZone(slashZone, null); + final Path rawRoot = new Path("/.reserved/raw"); + final Path dir1 = new Path("dir1"); + final Path rawDir1 = new Path(rawRoot, dir1); + fs.mkdirs(rawDir1); + assertPathEquals(rawDir1, new Path("/", dir1)); + fs.delete(rawDir1, true); + final Path rawZone = new Path(rawRoot, zone); + final Path rawDir1EZ = new Path(rawZone, dir1); + fs.mkdirs(rawDir1EZ); + assertPathEquals(rawDir1EZ, new Path(slashZone, dir1)); + fs.delete(rawDir1EZ, true); + } + + @Test(timeout = 120000) + public void testRelativePathnames() throws Exception { + final Path baseFileRaw = new Path("/.reserved/raw/base"); + final int len = 8192; + DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED); + + final Path root = new Path("/"); + final Path rawRoot = new Path("/.reserved/raw"); + assertPathEquals(root, new Path(rawRoot, "../raw")); + assertPathEquals(root, new Path(rawRoot, "../../.reserved/raw")); + assertPathEquals(baseFileRaw, new Path(rawRoot, "../raw/base")); + assertPathEquals(baseFileRaw, new Path(rawRoot, + "../../.reserved/raw/base")); + assertPathEquals(baseFileRaw, new Path(rawRoot, + "../../.reserved/raw/base/../base")); + assertPathEquals(baseFileRaw, new Path( + "/.reserved/../.reserved/raw/../raw/base")); + } + + @Test(timeout = 120000) + public void testAdminAccessOnly() throws Exception { + final Path zone = new Path("zone"); + final Path slashZone = new Path("/", zone); + fs.mkdirs(slashZone); + dfsAdmin.createEncryptionZone(slashZone, null); + final Path base = new Path("base"); + final Path reservedRaw = new Path("/.reserved/raw"); + final int len = 8192; + + /* Test failure of create file in reserved/raw as non admin */ + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] { "mygroup" }); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final DistributedFileSystem fs = cluster.getFileSystem(); + try { + final Path ezRawEncFile = + new Path(new Path(reservedRaw, zone), base); + DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED); + fail("access to /.reserved/raw is superuser-only operation"); + } catch (AccessControlException e) { + assertExceptionContains("Superuser privilege is required", e); + } + return null; + } + }); + + /* Test failure of getFileStatus in reserved/raw as non admin */ + final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base); + DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final DistributedFileSystem fs = cluster.getFileSystem(); + try { + fs.getFileStatus(ezRawEncFile); + fail("access to /.reserved/raw is superuser-only operation"); + } catch (AccessControlException e) { + assertExceptionContains("Superuser privilege is required", e); + } + return null; + } + }); + + /* Test failure of listStatus in reserved/raw as non admin */ + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final DistributedFileSystem fs = cluster.getFileSystem(); + try { + fs.listStatus(ezRawEncFile); + fail("access to /.reserved/raw is superuser-only operation"); + } catch (AccessControlException e) { + assertExceptionContains("Superuser privilege is required", e); + } + return null; + } + }); + + fs.setPermission(new Path("/"), new FsPermission((short) 0777)); + /* Test failure of mkdir in reserved/raw as non admin */ + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final DistributedFileSystem fs = cluster.getFileSystem(); + final Path d1 = new Path(reservedRaw, "dir1"); + try { + fs.mkdirs(d1); + fail("access to /.reserved/raw is superuser-only operation"); + } catch (AccessControlException e) { + assertExceptionContains("Superuser privilege is required", e); + } + return null; + } + }); + } + + @Test(timeout = 120000) + public void testListDotReserved() throws Exception { + // Create a base file for comparison + final Path baseFileRaw = new Path("/.reserved/raw/base"); + final int len = 8192; + DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED); + + /* + * Ensure that you can't list /.reserved. Ever. + */ + try { + fs.listStatus(new Path("/.reserved")); + fail("expected FNFE"); + } catch (FileNotFoundException e) { + assertExceptionContains("/.reserved does not exist", e); + } + + try { + fs.listStatus(new Path("/.reserved/.inodes")); + fail("expected FNFE"); + } catch (FileNotFoundException e) { + assertExceptionContains( + "/.reserved/.inodes does not exist", e); + } + + final FileStatus[] fileStatuses = fs.listStatus(new Path("/.reserved/raw")); + assertEquals("expected 1 entry", fileStatuses.length, 1); + assertMatches(fileStatuses[0].getPath().toString(), "/.reserved/raw/base"); + } + + @Test(timeout = 120000) + public void testListRecursive() throws Exception { + Path rootPath = new Path("/"); + Path p = rootPath; + for (int i = 0; i < 3; i++) { + p = new Path(p, "dir" + i); + fs.mkdirs(p); + } + + Path curPath = new Path("/.reserved/raw"); + int cnt = 0; + FileStatus[] fileStatuses = fs.listStatus(curPath); + while (fileStatuses != null && fileStatuses.length > 0) { + FileStatus f = fileStatuses[0]; + assertMatches(f.getPath().toString(), "/.reserved/raw"); + curPath = Path.getPathWithoutSchemeAndAuthority(f.getPath()); + cnt++; + fileStatuses = fs.listStatus(curPath); + } + assertEquals(3, cnt); + } +} From 415223548d84cd17979a0cff05f87f1fc3beb7f2 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 29 Jul 2014 23:39:38 +0000 Subject: [PATCH 076/354] HDFS-6771. Require specification of an encryption key when creating an encryption zone. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1614519 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 3 + .../apache/hadoop/hdfs/client/HdfsAdmin.java | 22 ++--- .../hdfs/server/namenode/FSNamesystem.java | 93 +++++-------------- .../apache/hadoop/hdfs/tools/CryptoAdmin.java | 8 +- ...CryptoCLI.java => TestCryptoAdminCLI.java} | 2 +- .../hadoop/hdfs/TestEncryptionZones.java | 50 +++++++--- .../src/test/resources/testCryptoConf.xml | 66 ++++++------- 7 files changed, 106 insertions(+), 138 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/{TestCryptoCLI.java => TestCryptoAdminCLI.java} (98%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index d6b4e5b1f10..2531a4e4049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -65,6 +65,9 @@ fs-encryption (Unreleased) HDFS-6509. Create a special /.reserved/raw directory for raw access to encrypted data. (clamb via wang) + HDFS-6771. Require specification of an encryption key when creating + an encryption zone. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index be3ac51cfe7..0a22d9dd3f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -231,22 +231,16 @@ public RemoteIterator listCachePools() throws IOException { } /** - * Create an encryption zone rooted at an empty existing directory. An - * encryption zone has an associated encryption key used when reading and - * writing files within the zone. An existing key can be specified, - * else a new key will be generated for the encryption zone. - * - * @param path The path of the root of the encryption zone. Must refer to - * an empty, existing directory. - * - * @param keyName Optional name of key available at the KeyProvider. If null, - * then a key is generated. - * - * @throws IOException if there was a general IO exception + * Create an encryption zone rooted at an empty existing directory, using the + * specified encryption key. An encryption zone has an associated encryption + * key used when reading and writing files within the zone. * + * @param path The path of the root of the encryption zone. Must refer to + * an empty, existing directory. + * @param keyName Name of key available at the KeyProvider. + * @throws IOException if there was a general IO exception * @throws AccessControlException if the caller does not have access to path - * - * @throws FileNotFoundException if the path does not exist + * @throws FileNotFoundException if the path does not exist */ public void createEncryptionZone(Path path, String keyName) throws IOException, AccessControlException, FileNotFoundException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7b29cedf9f4..dde19b9bd21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8457,24 +8457,19 @@ AclStatus getAclStatus(String src) throws IOException { readUnlock(); } } - + /** - * Create an encryption zone on directory src. If provided, - * will use an existing key, else will generate a new key. - * - * @param src the path of a directory which will be the root of the - * encryption zone. The directory must be empty. - * - * @param keyNameArg an optional name of a key in the configured - * KeyProvider. If this is null, then a a new key is generated. - * - * @throws AccessControlException if the caller is not the superuser. + * Create an encryption zone on directory src using the specified key. * + * @param src the path of a directory which will be the root of the + * encryption zone. The directory must be empty. + * @param keyName name of a key which must be present in the configured + * KeyProvider. + * @throws AccessControlException if the caller is not the superuser. * @throws UnresolvedLinkException if the path can't be resolved. - * - * @throws SafeModeException if the Namenode is in safe mode. + * @throws SafeModeException if the Namenode is in safe mode. */ - void createEncryptionZone(final String src, String keyNameArg) + void createEncryptionZone(final String src, final String keyName) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException { final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); @@ -8482,8 +8477,6 @@ void createEncryptionZone(final String src, String keyNameArg) return; // Return previous response } - boolean createdKey = false; - String keyName = keyNameArg; boolean success = false; try { if (provider == null) { @@ -8492,22 +8485,20 @@ void createEncryptionZone(final String src, String keyNameArg) " since no key provider is available."); } if (keyName == null || keyName.isEmpty()) { - keyName = UUID.randomUUID().toString(); - createNewKey(keyName, src); - createdKey = true; - } else { - KeyVersion keyVersion = provider.getCurrentKey(keyName); - if (keyVersion == null) { - /* - * It would be nice if we threw something more specific than - * IOException when the key is not found, but the KeyProvider API - * doesn't provide for that. If that API is ever changed to throw - * something more specific (e.g. UnknownKeyException) then we can - * update this to match it, or better yet, just rethrow the - * KeyProvider's exception. - */ - throw new IOException("Key " + keyName + " doesn't exist."); - } + throw new IOException("Must specify a key name when creating an " + + "encryption zone"); + } + KeyVersion keyVersion = provider.getCurrentKey(keyName); + if (keyVersion == null) { + /* + * It would be nice if we threw something more specific than + * IOException when the key is not found, but the KeyProvider API + * doesn't provide for that. If that API is ever changed to throw + * something more specific (e.g. UnknownKeyException) then we can + * update this to match it, or better yet, just rethrow the + * KeyProvider's exception. + */ + throw new IOException("Key " + keyName + " doesn't exist."); } createEncryptionZoneInt(src, keyName, cacheEntry != null); success = true; @@ -8516,10 +8507,6 @@ void createEncryptionZone(final String src, String keyNameArg) throw e; } finally { RetryCache.setState(cacheEntry, success); - if (!success && createdKey) { - /* Unwind key creation. */ - provider.deleteKey(keyName); - } } } @@ -8550,40 +8537,6 @@ private void createEncryptionZoneInt(final String srcArg, String keyName, logAuditEvent(true, "createEncryptionZone", srcArg, null, resultingStat); } - /** - * Create a new key on the KeyProvider for an encryption zone. - * - * @param keyNameArg name of the key - * @param src path of the encryption zone. - * @return KeyVersion of the created key - * @throws IOException - */ - private KeyVersion createNewKey(String keyNameArg, String src) - throws IOException { - Preconditions.checkNotNull(keyNameArg); - Preconditions.checkNotNull(src); - final StringBuilder sb = new StringBuilder("hdfs://"); - if (nameserviceId != null) { - sb.append(nameserviceId); - } - sb.append(src); - if (!src.endsWith("/")) { - sb.append('/'); - } - sb.append(keyNameArg); - final String keyName = sb.toString(); - providerOptions.setDescription(keyName); - providerOptions.setBitLength(codec.getCipherSuite() - .getAlgorithmBlockSize()*8); - KeyVersion version = null; - try { - version = provider.createKey(keyNameArg, providerOptions); - } catch (NoSuchAlgorithmException e) { - throw new IOException(e); - } - return version; - } - List listEncryptionZones() throws IOException { boolean success = false; checkSuperuserPrivilege(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java index c0155fcb72b..28aaef2e455 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java @@ -124,7 +124,7 @@ public String getName() { @Override public String getShortUsage() { - return "[" + getName() + " [-keyName ] -path " + "]\n"; + return "[" + getName() + " -keyName -path " + "]\n"; } @Override @@ -133,7 +133,7 @@ public String getLongUsage() { listing.addRow("", "The path of the encryption zone to create. " + "It must be an empty directory."); listing.addRow("", "Name of the key to use for the " + - "encryption zone. A new key will be generated if unspecified."); + "encryption zone."); return getShortUsage() + "\n" + "Create a new encryption zone.\n\n" + listing.toString(); @@ -149,6 +149,10 @@ public int run(Configuration conf, List args) throws IOException { final String keyName = StringUtils.popOptionWithArgument("-keyName", args); + if (keyName == null) { + System.err.println("You must specify a key name with -keyName."); + return 1; + } if (!args.isEmpty()) { System.err.println("Can't understand argument: " + args.get(0)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java index 1b4468e545b..1c83829102a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java @@ -48,7 +48,7 @@ import org.junit.Test; import org.xml.sax.SAXException; -public class TestCryptoCLI extends CLITestHelperDFS { +public class TestCryptoAdminCLI extends CLITestHelperDFS { protected MiniDFSCluster dfsCluster = null; protected FileSystem fs = null; protected String namenode = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index c7229228606..e1fb878139b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -68,12 +68,13 @@ public class TestEncryptionZones { private HdfsAdmin dfsAdmin; private DistributedFileSystem fs; private File testRootDir; + private final String TEST_KEY = "testKey"; protected FileSystemTestWrapper fsWrapper; protected FileContextTestWrapper fcWrapper; @Before - public void setup() throws IOException { + public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store @@ -93,6 +94,8 @@ public void setup() throws IOException { // else the updates do not get flushed properly fs.getClient().provider = cluster.getNameNode().getNamesystem() .getProvider(); + // Create a test key + createKey(TEST_KEY); } @After @@ -143,6 +146,8 @@ private void createKey(String keyName) throws NoSuchAlgorithmException, IOException { KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); final KeyProvider.Options options = KeyProvider.options(conf); + options.setDescription(keyName); + options.setBitLength(128); provider.createKey(keyName, options); provider.flush(); } @@ -155,7 +160,7 @@ public void testBasicOperations() throws Exception { /* Test failure of create EZ on a directory that doesn't exist. */ final Path zone1 = new Path("/zone1"); try { - dfsAdmin.createEncryptionZone(zone1, null); + dfsAdmin.createEncryptionZone(zone1, TEST_KEY); fail("expected /test doesn't exist"); } catch (IOException e) { assertExceptionContains("cannot find", e); @@ -163,13 +168,13 @@ public void testBasicOperations() throws Exception { /* Normal creation of an EZ */ fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); - dfsAdmin.createEncryptionZone(zone1, null); + dfsAdmin.createEncryptionZone(zone1, TEST_KEY); assertNumZones(++numZones); assertZonePresent(null, zone1.toString()); /* Test failure of create EZ on a directory which is already an EZ. */ try { - dfsAdmin.createEncryptionZone(zone1, null); + dfsAdmin.createEncryptionZone(zone1, TEST_KEY); } catch (IOException e) { assertExceptionContains("already in an encryption zone", e); } @@ -178,7 +183,7 @@ public void testBasicOperations() throws Exception { final Path zone1Child = new Path(zone1, "child"); fsWrapper.mkdir(zone1Child, FsPermission.getDirDefault(), false); try { - dfsAdmin.createEncryptionZone(zone1Child, null); + dfsAdmin.createEncryptionZone(zone1Child, TEST_KEY); fail("EZ in an EZ"); } catch (IOException e) { assertExceptionContains("already in an encryption zone", e); @@ -189,7 +194,7 @@ public void testBasicOperations() throws Exception { final Path notEmptyChild = new Path(notEmpty, "child"); fsWrapper.mkdir(notEmptyChild, FsPermission.getDirDefault(), true); try { - dfsAdmin.createEncryptionZone(notEmpty, null); + dfsAdmin.createEncryptionZone(notEmpty, TEST_KEY); fail("Created EZ on an non-empty directory with folder"); } catch (IOException e) { assertExceptionContains("create an encryption zone", e); @@ -199,7 +204,7 @@ public void testBasicOperations() throws Exception { /* create EZ on a folder with a file fails */ fsWrapper.createFile(notEmptyChild); try { - dfsAdmin.createEncryptionZone(notEmpty, null); + dfsAdmin.createEncryptionZone(notEmpty, TEST_KEY); fail("Created EZ on an non-empty directory with file"); } catch (IOException e) { assertExceptionContains("create an encryption zone", e); @@ -215,6 +220,21 @@ public void testBasicOperations() throws Exception { } catch (IOException e) { assertExceptionContains("doesn't exist.", e); } + + /* Test failure of empty and null key name */ + try { + dfsAdmin.createEncryptionZone(zone2, ""); + fail("created a zone with empty key name"); + } catch (IOException e) { + assertExceptionContains("Must specify a key name when creating", e); + } + try { + dfsAdmin.createEncryptionZone(zone2, null); + fail("created a zone with null key name"); + } catch (IOException e) { + assertExceptionContains("Must specify a key name when creating", e); + } + assertNumZones(1); /* Test success of creating an EZ when they key exists. */ @@ -235,7 +255,7 @@ public Object run() throws Exception { final HdfsAdmin userAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); try { - userAdmin.createEncryptionZone(nonSuper, null); + userAdmin.createEncryptionZone(nonSuper, TEST_KEY); fail("createEncryptionZone is superuser-only operation"); } catch (AccessControlException e) { assertExceptionContains("Superuser privilege is required", e); @@ -247,7 +267,7 @@ public Object run() throws Exception { // Test success of creating an encryption zone a few levels down. Path deepZone = new Path("/d/e/e/p/zone"); fsWrapper.mkdir(deepZone, FsPermission.getDirDefault(), true); - dfsAdmin.createEncryptionZone(deepZone, null); + dfsAdmin.createEncryptionZone(deepZone, TEST_KEY); assertNumZones(++numZones); assertZonePresent(null, deepZone.toString()); } @@ -266,10 +286,10 @@ public void testListEncryptionZonesAsNonSuperUser() throws Exception { final Path allPath = new Path(testRoot, "accessall"); fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true); - dfsAdmin.createEncryptionZone(superPath, null); + dfsAdmin.createEncryptionZone(superPath, TEST_KEY); fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true); - dfsAdmin.createEncryptionZone(allPath, null); + dfsAdmin.createEncryptionZone(allPath, TEST_KEY); user.doAs(new PrivilegedExceptionAction() { @Override @@ -294,7 +314,7 @@ private void doRenameEncryptionZone(FSTestWrapper wrapper) throws Exception { final Path pathFoo = new Path(testRoot, "foo"); final Path pathFooBaz = new Path(pathFoo, "baz"); wrapper.mkdir(pathFoo, FsPermission.getDirDefault(), true); - dfsAdmin.createEncryptionZone(pathFoo, null); + dfsAdmin.createEncryptionZone(pathFoo, TEST_KEY); wrapper.mkdir(pathFooBaz, FsPermission.getDirDefault(), true); try { wrapper.rename(pathFooBaz, testRoot); @@ -331,7 +351,7 @@ public void testReadWrite() throws Exception { // Create the first enc file final Path zone = new Path("/zone"); fs.mkdirs(zone); - dfsAdmin.createEncryptionZone(zone, null); + dfsAdmin.createEncryptionZone(zone, TEST_KEY); final Path encFile1 = new Path(zone, "myfile"); DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED); // Read them back in and compare byte-by-byte @@ -364,7 +384,7 @@ public void testCipherSuiteNegotiation() throws Exception { new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); final Path zone = new Path("/zone"); fs.mkdirs(zone); - dfsAdmin.createEncryptionZone(zone, null); + dfsAdmin.createEncryptionZone(zone, TEST_KEY); // Create a file in an EZ, which should succeed DFSTestUtil .createFile(fs, new Path(zone, "success1"), 0, (short) 1, 0xFEED); @@ -434,7 +454,7 @@ public void testCreateEZWithNoProvider() throws Exception { /* Normal creation of an EZ */ fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); try { - dfsAdmin.createEncryptionZone(zone1, null); + dfsAdmin.createEncryptionZone(zone1, TEST_KEY); fail("expected exception"); } catch (IOException e) { assertExceptionContains("since no key provider is available", e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml index 2ff2f20ae41..ebbf773b133 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml @@ -50,7 +50,7 @@ Test create ez, dir doesn't exist -fs NAMENODE -ls /test- - -createZone -path /test + -createZone -path /test -keyName myKey @@ -67,8 +67,8 @@ -fs NAMENODE -mkdir /foo -fs NAMENODE -ls /- - -createZone -path /foo - -createZone -path /foo + -createZone -path /foo -keyName myKey + -createZone -path /foo -keyName myKey -fs NAMENODE -rmdir /foo @@ -81,32 +81,14 @@ - - Test success of create ez in which a key is created - - -fs NAMENODE -mkdir /foo - -fs NAMENODE -ls /- - -createZone -path /foo - - - -fs NAMENODE -rmdir /foo - - - - SubstringComparator - Added encryption zone /foo - - - - Test failure of Create EZ operation in an existing EZ. -fs NAMENODE -mkdir /foo -fs NAMENODE -ls /- - -createZone -path /foo + -createZone -keyName myKey -path /foo -fs NAMENODE -mkdir /foo/bar - -createZone -path /foo/bar + -createZone -keyName myKey -path /foo/bar -fs NAMENODE -rmdir /foo/bar @@ -126,7 +108,7 @@ -fs NAMENODE -mkdir /foo -fs NAMENODE -touchz /foo/bar -fs NAMENODE -ls /- - -createZone -path /foo + -createZone -keyName myKey -path /foo -fs NAMENODE -rm /foo/bar @@ -159,19 +141,31 @@ - Test success of creating an EZ when the key exists. + Test failure of creating an EZ no path is specified. - -fs NAMENODE -mkdir /foo - -fs NAMENODE -ls /- - -createZone -path /foo -keyName mykey + -createZone -keyName blahKey - -fs NAMENODE -rmdir /foo SubstringComparator - Added encryption zone /foo + You must specify a path + + + + + + Test failure of creating an EZ no key is specified. + + -createZone -path /foo + + + + + + SubstringComparator + You must specify a key name @@ -183,7 +177,7 @@ -fs NAMENODE -mkdir /foo/bar -fs NAMENODE -mkdir /foo/bar/baz -fs NAMENODE -ls /- - -createZone -path /foo/bar/baz + -createZone -path /foo/bar/baz -keyName myKey -fs NAMENODE -rmdir /foo/bar/baz @@ -204,8 +198,8 @@ -fs NAMENODE -mkdir /src -fs NAMENODE -mkdir /dst -fs NAMENODE -ls /- - -createZone -path /src - -createZone -path /dst + -createZone -path /src -keyName myKey + -createZone -path /dst -keyName myKey -fs NAMENODE -mkdir /src/subdir -fs NAMENODE -mv /src/subdir /dst- @@ -228,7 +222,7 @@ -fs NAMENODE -mkdir /src -fs NAMENODE -mkdir /dst -fs NAMENODE -ls /- - -createZone -path /dst + -createZone -path /dst -keyName myKey -fs NAMENODE -mv /src /dst- @@ -249,7 +243,7 @@ -fs NAMENODE -mkdir /src -fs NAMENODE -mkdir /dst -fs NAMENODE -ls /- - -createZone -path /src + -createZone -path /src -keyName myKey -fs NAMENODE -mv /src /dst- @@ -268,7 +262,7 @@ Test success of renaming file intra-EZ -fs NAMENODE -mkdir /src - -createZone -path /src + -createZone -path /src -keyName myKey -fs NAMENODE -mkdir /src/subdir1 -fs NAMENODE -mkdir /src/subdir2 -fs NAMENODE -mv /src/subdir1 /src/subdir2- From 9ca2f34c8fb03227e7364ced5183562e9f805400 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Wed, 30 Jul 2014 01:35:04 +0000 Subject: [PATCH 077/354] HDFS-6730. Create a .RAW extended attribute namespace. (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1614535 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../main/java/org/apache/hadoop/fs/XAttr.java | 13 +- .../org/apache/hadoop/hdfs/XAttrHelper.java | 8 +- .../hadoop/hdfs/protocol/ClientProtocol.java | 6 +- .../server/common/HdfsServerConstants.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 14 +- .../namenode/XAttrPermissionFilter.java | 30 ++- .../hadoop-hdfs/src/main/proto/xattr.proto | 1 + .../src/site/apt/ExtendedAttributes.apt.vm | 4 +- .../java/org/apache/hadoop/fs/TestXAttr.java | 11 +- .../hdfs/server/namenode/FSXAttrBaseTest.java | 179 +++++++++++++++++- .../hdfs/server/namenode/TestFSDirectory.java | 15 +- .../src/test/resources/testXAttrConf.xml | 58 +++++- 13 files changed, 313 insertions(+), 32 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 2531a4e4049..34a86e39db7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -68,6 +68,8 @@ fs-encryption (Unreleased) HDFS-6771. Require specification of an encryption key when creating an encryption zone. (wang) + HDFS-6730. Create a .RAW extended attribute namespace. (clamb) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java index 99f629afdfe..968ee00ce76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java @@ -26,8 +26,8 @@ /** * XAttr is the POSIX Extended Attribute model similar to that found in * traditional Operating Systems. Extended Attributes consist of one - * or more name/value pairs associated with a file or directory. Four - * namespaces are defined: user, trusted, security and system. + * or more name/value pairs associated with a file or directory. Five + * namespaces are defined: user, trusted, security, system and raw. * 1) USER namespace attributes may be used by any user to store * arbitrary information. Access permissions in this namespace are * defined by a file directory's permission bits. For sticky directories, @@ -43,6 +43,12 @@ *
* 4) SECURITY namespace attributes are used by the fs kernel for * security features. It is not visible to users. + *
+ * 5) RAW namespace attributes are used for internal system attributes that + * sometimes need to be exposed. Like SYSTEM namespace attributes they are + * not visible to the user except when getXAttr/getXAttrs is called on a file + * or directory in the /.reserved/raw HDFS directory hierarchy. These + * attributes can only be accessed by the superuser. *

* @see * http://en.wikipedia.org/wiki/Extended_file_attributes @@ -55,7 +61,8 @@ public static enum NameSpace { USER, TRUSTED, SECURITY, - SYSTEM; + SYSTEM, + RAW; } private final NameSpace ns; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java index abcd47ae16a..04364ccf7e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java @@ -49,9 +49,9 @@ public static XAttr buildXAttr(String name, byte[] value) { Preconditions.checkNotNull(name, "XAttr name cannot be null."); final int prefixIndex = name.indexOf("."); - if (prefixIndex < 4) {// Prefix length is at least 4. + if (prefixIndex < 3) {// Prefix length is at least 3. throw new HadoopIllegalArgumentException("An XAttr name must be " + - "prefixed with user/trusted/security/system, followed by a '.'"); + "prefixed with user/trusted/security/system/raw, followed by a '.'"); } else if (prefixIndex == name.length() - 1) { throw new HadoopIllegalArgumentException("XAttr name cannot be empty."); } @@ -66,9 +66,11 @@ public static XAttr buildXAttr(String name, byte[] value) { ns = NameSpace.SYSTEM; } else if (prefix.equals(NameSpace.SECURITY.toString().toLowerCase())) { ns = NameSpace.SECURITY; + } else if (prefix.equals(NameSpace.RAW.toString().toLowerCase())) { + ns = NameSpace.RAW; } else { throw new HadoopIllegalArgumentException("An XAttr name must be " + - "prefixed with user/trusted/security/system, followed by a '.'"); + "prefixed with user/trusted/security/system/raw, followed by a '.'"); } XAttr xAttr = (new XAttr.Builder()).setNameSpace(ns).setName(name. substring(prefixIndex + 1)).setValue(value).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 734240f06fa..6ff4ffbbaa1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1335,7 +1335,6 @@ public List getXAttrs(String src, List xAttrs) * @see * http://en.wikipedia.org/wiki/Extended_file_attributes * @param src file or directory - * @param xAttrs xAttrs to get * @return List XAttr list * @throws IOException */ @@ -1345,12 +1344,15 @@ public List listXAttrs(String src) /** * Remove xattr of a file or directory.Value in xAttr parameter is ignored. - * Name must be prefixed with user/trusted/security/system. + * Name must be prefixed with user/trusted/security/system/raw. *

* A regular user only can remove xattr of "user" namespace. * A super user can remove xattr of "user" and "trusted" namespace. * XAttr of "security" and "system" namespace is only used/exposed * internally to the FS impl. + * The xattrs of the "raw" namespace are only used/exposed when accessed in + * the /.reserved/raw HDFS directory hierarchy. These attributes can only be + * accessed by the superuser. *

* @see * http://en.wikipedia.org/wiki/Extended_file_attributes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 5e4d1f0e5ba..98c6398c2cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -296,8 +296,8 @@ static public enum BlockUCState { public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000; public static final String CRYPTO_XATTR_ENCRYPTION_ZONE = - "system.hdfs.crypto.encryption.zone"; + "raw.hdfs.crypto.encryption.zone"; public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO = - "system.hdfs.crypto.file.encryption.info"; + "raw.hdfs.crypto.file.encryption.info"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index dde19b9bd21..8350225d8ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8594,7 +8594,8 @@ private void setXAttrInt(final String srcArg, XAttr xAttr, checkXAttrSize(xAttr); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); - XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); + XAttrPermissionFilter.checkPermissionForApi(pc, xAttr, + FSDirectory.isReservedRawName(src)); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); writeLock(); @@ -8640,10 +8641,11 @@ List getXAttrs(final String srcArg, List xAttrs) String src = srcArg; nnConf.checkXAttrsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); + final boolean isRawPath = FSDirectory.isReservedRawName(src); boolean getAll = xAttrs == null || xAttrs.isEmpty(); if (!getAll) { try { - XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs); + XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath); } catch (AccessControlException e) { logAuditEvent(false, "getXAttrs", srcArg); throw e; @@ -8660,7 +8662,7 @@ List getXAttrs(final String srcArg, List xAttrs) } List all = dir.getXAttrs(src); List filteredAll = XAttrPermissionFilter. - filterXAttrsForApi(pc, all); + filterXAttrsForApi(pc, all, isRawPath); if (getAll) { return filteredAll; } else { @@ -8696,6 +8698,7 @@ List getXAttrs(final String srcArg, List xAttrs) List listXAttrs(String src) throws IOException { nnConf.checkXAttrsConfigFlag(); final FSPermissionChecker pc = getPermissionChecker(); + final boolean isRawPath = FSDirectory.isReservedRawName(src); checkOperation(OperationCategory.READ); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); @@ -8708,7 +8711,7 @@ List listXAttrs(String src) throws IOException { } final List all = dir.getXAttrs(src); final List filteredAll = XAttrPermissionFilter. - filterXAttrsForApi(pc, all); + filterXAttrsForApi(pc, all, isRawPath); return filteredAll; } catch (AccessControlException e) { logAuditEvent(false, "listXAttrs", src); @@ -8753,7 +8756,8 @@ void removeXAttrInt(final String srcArg, XAttr xAttr, boolean logRetryCache) nnConf.checkXAttrsConfigFlag(); HdfsFileStatus resultingStat = null; FSPermissionChecker pc = getPermissionChecker(); - XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); + XAttrPermissionFilter.checkPermissionForApi(pc, xAttr, + FSDirectory.isReservedRawName(src)); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); writeLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index 98730142fbd..237f9d3d5ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -47,15 +47,27 @@ *
* SYSTEM - extended system attributes: these are used by the HDFS * core and are not available through admin/user API. + *
+ * RAW - extended system attributes: these are used for internal system + * attributes that sometimes need to be exposed. Like SYSTEM namespace + * attributes they are not visible to the user except when getXAttr/getXAttrs + * is called on a file or directory in the /.reserved/raw HDFS directory + * hierarchy. These attributes can only be accessed by the superuser. + *
*/ @InterfaceAudience.Private public class XAttrPermissionFilter { - static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr) + static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr, + boolean isRawPath) throws AccessControlException { + final boolean isSuperUser = pc.isSuperUser(); if (xAttr.getNameSpace() == XAttr.NameSpace.USER || - (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && - pc.isSuperUser())) { + (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) { + return; + } + if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && + isRawPath && isSuperUser) { return; } throw new AccessControlException("User doesn't have permission for xattr: " @@ -63,30 +75,34 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr) } static void checkPermissionForApi(FSPermissionChecker pc, - List xAttrs) throws AccessControlException { + List xAttrs, boolean isRawPath) throws AccessControlException { Preconditions.checkArgument(xAttrs != null); if (xAttrs.isEmpty()) { return; } for (XAttr xAttr : xAttrs) { - checkPermissionForApi(pc, xAttr); + checkPermissionForApi(pc, xAttr, isRawPath); } } static List filterXAttrsForApi(FSPermissionChecker pc, - List xAttrs) { + List xAttrs, boolean isRawPath) { assert xAttrs != null : "xAttrs can not be null"; if (xAttrs == null || xAttrs.isEmpty()) { return xAttrs; } List filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size()); + final boolean isSuperUser = pc.isSuperUser(); for (XAttr xAttr : xAttrs) { if (xAttr.getNameSpace() == XAttr.NameSpace.USER) { filteredXAttrs.add(xAttr); } else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && - pc.isSuperUser()) { + isSuperUser) { + filteredXAttrs.add(xAttr); + } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && + isSuperUser && isRawPath) { filteredXAttrs.add(xAttr); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto index cb86ff27731..acdc28ebb88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto @@ -27,6 +27,7 @@ message XAttrProto { TRUSTED = 1; SECURITY = 2; SYSTEM = 3; + RAW = 4; } required XAttrNamespaceProto namespace = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm index 56aec0ca288..0a99fe50ee6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm @@ -30,7 +30,7 @@ Extended Attributes in HDFS ** {Namespaces and Permissions} - In HDFS, as in Linux, there are four valid namespaces: <<>>, <<>>, <<>>, and <<>>. Each of these namespaces have different access restrictions. + In HDFS, there are five valid namespaces: <<>>, <<>>, <<>>, <<>>, and <<>>. Each of these namespaces have different access restrictions. The <<>> namespace is the namespace that will commonly be used by client applications. Access to extended attributes in the user namespace is controlled by the corresponding file permissions. @@ -40,6 +40,8 @@ Extended Attributes in HDFS The <<>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused. + The <<>> namespace is reserved for internal system attributes that sometimes need to be exposed. Like <<>> namespace attributes they are not visible to the user except when <<>>/<<>> is called on a file or directory in the <<>> HDFS directory hierarchy. These attributes can only be accessed by the superuser. An example of where <<>> namespace extended attributes are used is the <<>> utility. Encryption zone meta data is stored in <<>> extended attributes, so as long as the administrator uses <<>> pathnames in source and target, the encrypted files in the encryption zones are transparently copied. + * {Interacting with extended attributes} The Hadoop shell has support for interacting with extended attributes via <<>> and <<>>. These commands are styled after the Linux {{{http://www.bestbits.at/acl/man/man1/getfattr.txt}getfattr(1)}} and {{{http://www.bestbits.at/acl/man/man1/setfattr.txt}setfattr(1)}} commands. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java index 032a8dfead0..e47658dd6a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java @@ -29,7 +29,7 @@ * Tests for XAttr objects. */ public class TestXAttr { - private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4; + private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4, XATTR5; @BeforeClass public static void setUp() throws Exception { @@ -58,6 +58,11 @@ public static void setUp() throws Exception { .setName("name") .setValue(value) .build(); + XATTR5 = new XAttr.Builder() + .setNameSpace(XAttr.NameSpace.RAW) + .setName("name") + .setValue(value) + .build(); } @Test @@ -65,14 +70,17 @@ public void testXAttrEquals() { assertNotSame(XATTR1, XATTR2); assertNotSame(XATTR2, XATTR3); assertNotSame(XATTR3, XATTR4); + assertNotSame(XATTR4, XATTR5); assertEquals(XATTR, XATTR1); assertEquals(XATTR1, XATTR1); assertEquals(XATTR2, XATTR2); assertEquals(XATTR3, XATTR3); assertEquals(XATTR4, XATTR4); + assertEquals(XATTR5, XATTR5); assertFalse(XATTR1.equals(XATTR2)); assertFalse(XATTR2.equals(XATTR3)); assertFalse(XATTR3.equals(XATTR4)); + assertFalse(XATTR4.equals(XATTR5)); } @Test @@ -81,5 +89,6 @@ public void testXAttrHashCode() { assertFalse(XATTR1.hashCode() == XATTR2.hashCode()); assertFalse(XATTR2.hashCode() == XATTR3.hashCode()); assertFalse(XATTR3.hashCode() == XATTR4.hashCode()); + assertFalse(XATTR4.hashCode() == XATTR5.hashCode()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index 636ecc2417f..0c7b8070b44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -69,6 +69,7 @@ public class FSXAttrBaseTest { protected static Configuration conf; private static int pathCount = 0; protected static Path path; + protected static Path rawPath; // XAttrs protected static final String name1 = "user.a1"; @@ -78,6 +79,8 @@ public class FSXAttrBaseTest { protected static final byte[] value2 = {0x37, 0x38, 0x39}; protected static final String name3 = "user.a3"; protected static final String name4 = "user.a4"; + protected static final String raw1 = "raw.a1"; + protected static final String raw2 = "raw.a2"; protected FileSystem fs; @@ -107,6 +110,7 @@ public static void shutdown() { public void setUp() throws Exception { pathCount += 1; path = new Path("/p" + pathCount); + rawPath = new Path("/.reserved/raw/p" + pathCount); initFileSystem(); } @@ -395,7 +399,8 @@ public void testGetXAttrs() throws Exception { Assert.fail("expected IOException"); } catch (Exception e) { GenericTestUtils.assertExceptionContains - ("An XAttr name must be prefixed with user/trusted/security/system, " + + ("An XAttr name must be prefixed with " + + "user/trusted/security/system/raw, " + "followed by a '.'", e); } @@ -582,7 +587,7 @@ public void testRemoveXAttrPermissions() throws Exception { /* Unknown namespace should throw an exception. */ final String expectedExceptionString = "An XAttr name must be prefixed " + - "with user/trusted/security/system, followed by a '.'"; + "with user/trusted/security/system/raw, followed by a '.'"; try { fs.removeXAttr(path, "wackynamespace.foo"); Assert.fail("expected IOException"); @@ -918,6 +923,176 @@ public void testXAttrAcl() throws Exception { fsAsDiana.removeXAttr(path, name2); } + @Test(timeout = 120000) + public void testRawXAttrs() throws Exception { + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750)); + fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + + { + // getXAttr + final byte[] value = fs.getXAttr(rawPath, raw1); + Assert.assertArrayEquals(value, value1); + } + + { + // getXAttrs + final Map xattrs = fs.getXAttrs(rawPath); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(value1, xattrs.get(raw1)); + fs.removeXAttr(rawPath, raw1); + } + + { + // replace and re-get + fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(rawPath, raw1, newValue1, EnumSet.of(XAttrSetFlag.CREATE, + XAttrSetFlag.REPLACE)); + + final Map xattrs = fs.getXAttrs(rawPath); + Assert.assertEquals(xattrs.size(), 1); + Assert.assertArrayEquals(newValue1, xattrs.get(raw1)); + + fs.removeXAttr(rawPath, raw1); + } + + { + // listXAttrs on rawPath ensuring raw.* xattrs are returned + fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + + final List xattrNames = fs.listXAttrs(rawPath); + assertTrue(xattrNames.contains(raw1)); + assertTrue(xattrNames.contains(raw2)); + assertTrue(xattrNames.size() == 2); + fs.removeXAttr(rawPath, raw1); + fs.removeXAttr(rawPath, raw2); + } + + { + // listXAttrs on non-rawPath ensuring no raw.* xattrs returned + fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + + final List xattrNames = fs.listXAttrs(path); + assertTrue(xattrNames.size() == 0); + fs.removeXAttr(rawPath, raw1); + fs.removeXAttr(rawPath, raw2); + } + + { + /* + * Test non-root user operations in the "raw.*" namespace. + */ + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + // Test that non-root can not set xattrs in the "raw.*" namespace + try { + // non-raw path + userFs.setXAttr(path, raw1, value1); + fail("setXAttr should have thrown"); + } catch (AccessControlException e) { + // ignore + } + + try { + // raw path + userFs.setXAttr(rawPath, raw1, value1); + fail("setXAttr should have thrown"); + } catch (AccessControlException e) { + // ignore + } + + // Test that non-root can not do getXAttrs in the "raw.*" namespace + try { + // non-raw path + userFs.getXAttrs(rawPath); + fail("getXAttrs should have thrown"); + } catch (AccessControlException e) { + // ignore + } + + try { + // raw path + userFs.getXAttrs(path); + fail("getXAttrs should have thrown"); + } catch (AccessControlException e) { + // ignore + } + + // Test that non-root can not do getXAttr in the "raw.*" namespace + try { + // non-raw path + userFs.getXAttr(rawPath, raw1); + fail("getXAttr should have thrown"); + } catch (AccessControlException e) { + // ignore + } + + try { + // raw path + userFs.getXAttr(path, raw1); + fail("getXAttr should have thrown"); + } catch (AccessControlException e) { + // ignore + } + return null; + } + }); + } + + { + /* + * Test that non-root can not do getXAttr in the "raw.*" namespace + */ + fs.setXAttr(rawPath, raw1, value1); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + try { + // non-raw path + userFs.getXAttr(rawPath, raw1); + fail("getXAttr should have thrown"); + } catch (AccessControlException e) { + // ignore + } + + try { + // raw path + userFs.getXAttr(path, raw1); + fail("getXAttr should have thrown"); + } catch (AccessControlException e) { + // ignore + } + + /* + * Test that only root can see raw.* xattrs returned from listXAttr + * and non-root can't do listXAttrs on /.reserved/raw. + */ + // non-raw path + final List xattrNames = userFs.listXAttrs(path); + assertTrue(xattrNames.size() == 0); + try { + // raw path + userFs.listXAttrs(rawPath); + fail("listXAttrs on raw path should have thrown"); + } catch (AccessControlException e) { + // ignore + } + + return null; + } + }); + fs.removeXAttr(rawPath, raw1); + } + } + /** * Creates a FileSystem for the super-user. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index 011901ddfa7..ad067cfa197 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -191,14 +191,19 @@ public void testINodeXAttrsLimit() throws Exception { existingXAttrs.add(xAttr1); existingXAttrs.add(xAttr2); - // Adding a system namespace xAttr, isn't affected by inode xAttrs limit. - XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM). + // Adding system and raw namespace xAttrs aren't affected by inode + // xAttrs limit. + XAttr newSystemXAttr = (new XAttr.Builder()). + setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3"). + setValue(new byte[]{0x33, 0x33, 0x33}).build(); + XAttr newRawXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW). setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build(); - List newXAttrs = Lists.newArrayListWithCapacity(1); - newXAttrs.add(newXAttr); + List newXAttrs = Lists.newArrayListWithCapacity(2); + newXAttrs.add(newSystemXAttr); + newXAttrs.add(newRawXAttr); List xAttrs = fsdir.setINodeXAttrs(existingXAttrs, newXAttrs, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); - assertEquals(xAttrs.size(), 3); + assertEquals(xAttrs.size(), 4); // Adding a trusted namespace xAttr, is affected by inode xAttrs limit. XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml index 7b7f866ac38..3414f5719dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml @@ -64,7 +64,7 @@ SubstringComparator - name must be prefixed with user/trusted/security/system, followed by a '.' + name must be prefixed with user/trusted/security/system/raw, followed by a '.' @@ -125,6 +125,42 @@ + + setfattr : Add an xattr of raw namespace + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n raw.a1 -v 123456 /file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + setfattr: User doesn't have permission for xattr: raw.a1 + + + + + + + setfattr : Add an xattr of raw namespace + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n raw.a1 -v 123456 /.reserved/raw/file1 + -fs NAMENODE -getfattr -n raw.a1 /.reserved/raw/file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + raw.a1="123456" + + + + setfattr : Add an xattr, and encode is text @@ -256,6 +292,26 @@ + + + setfattr : Remove an xattr of raw namespace + + -fs NAMENODE -touchz /file1 + -fs NAMENODE -setfattr -n raw.a1 -v 123456 /.reserved/raw/file1 + -fs NAMENODE -setfattr -n raw.a2 -v 123456 /.reserved/raw/file1 + -fs NAMENODE -setfattr -x raw.a2 /.reserved/raw/file1 + -fs NAMENODE -getfattr -d /.reserved/raw/file1 + + + -fs NAMENODE -rm /file1 + + + + SubstringComparator + # file: /.reserved/raw/file1#LF#raw.a1="123456"#LF# + + + getfattr : Get an xattr From 7e54b1c6d9dc3a7fb07df36347130d605bd0a718 Mon Sep 17 00:00:00 2001 From: Jian He Date: Wed, 30 Jul 2014 03:58:59 +0000 Subject: [PATCH 078/354] YARN-2354. DistributedShell may allocate more containers than client specified after AM restarts. Contributed by Li Lu git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614538 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../distributedshell/ApplicationMaster.java | 11 ++++++----- .../distributedshell/TestDSFailedAppMaster.java | 6 ++++-- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index d3efd1cf7b8..ad368cc95e8 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -111,6 +111,9 @@ Release 2.6.0 - UNRELEASED YARN-1796. container-executor shouldn't require o-r permissions (atm) + YARN-2354. DistributedShell may allocate more containers than client + specified after AM restarts. (Li Lu via jianhe) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index 5e1cbbcd932..9051d31089f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -208,7 +208,8 @@ public static enum DSEntity { // App Master configuration // No. of containers to run shell command on - private int numTotalContainers = 1; + @VisibleForTesting + protected int numTotalContainers = 1; // Memory to request for the container on which the shell command will run private int containerMemory = 10; // VirtualCores to request for the container on which the shell command will run @@ -594,8 +595,8 @@ public void run() throws YarnException, IOException { List previousAMRunningContainers = response.getContainersFromPreviousAttempts(); - LOG.info("Received " + previousAMRunningContainers.size() - + " previous AM's running containers on AM registration."); + LOG.info(appAttemptID + " received " + previousAMRunningContainers.size() + + " previous attempts' running containers on AM registration."); numAllocatedContainers.addAndGet(previousAMRunningContainers.size()); int numTotalContainersToRequest = @@ -610,7 +611,7 @@ public void run() throws YarnException, IOException { ContainerRequest containerAsk = setupContainerAskForRM(); amRMClient.addContainerRequest(containerAsk); } - numRequestedContainers.set(numTotalContainersToRequest); + numRequestedContainers.set(numTotalContainers); try { publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END); @@ -689,7 +690,7 @@ public void onContainersCompleted(List completedContainers) { LOG.info("Got response from RM for container ask, completedCnt=" + completedContainers.size()); for (ContainerStatus containerStatus : completedContainers) { - LOG.info("Got container status for containerID=" + LOG.info(appAttemptID + " got container status for containerID=" + containerStatus.getContainerId() + ", state=" + containerStatus.getState() + ", exitStatus=" + containerStatus.getExitStatus() + ", diagnostics=" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java index db7419bc8e6..f3ab4b7538b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java @@ -36,9 +36,11 @@ public void run() throws YarnException, IOException { if (appAttemptID.getAttemptId() == 2) { // should reuse the earlier running container, so numAllocatedContainers // should be set to 1. And should ask no more containers, so - // numRequestedContainers should be set to 0. + // numRequestedContainers should be the same as numTotalContainers. + // The only container is the container requested by the AM in the first + // attempt. if (numAllocatedContainers.get() != 1 - || numRequestedContainers.get() != 0) { + || numRequestedContainers.get() != numTotalContainers) { LOG.info("NumAllocatedContainers is " + numAllocatedContainers.get() + " and NumRequestedContainers is " + numAllocatedContainers.get() + ".Application Master failed. exiting"); From 12241908ad5b9a38068241a22d3d29e4a9184e98 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 30 Jul 2014 05:53:20 +0000 Subject: [PATCH 079/354] HDFS-6665. Add tests for XAttrs in combination with viewfs. Contributed by Stephen Chu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614545 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../viewfs/TestViewFileSystemWithXAttrs.java | 151 ++++++++++++++++++ .../fs/viewfs/TestViewFsWithXAttrs.java | 148 +++++++++++++++++ 3 files changed, 302 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fca7e231045..df7052425a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -326,6 +326,9 @@ Release 2.6.0 - UNRELEASED HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. (szetszwo) + HDFS-6665. Add tests for XAttrs in combination with viewfs. + (Stephen Chu via wang) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java new file mode 100644 index 00000000000..227548de99d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java @@ -0,0 +1,151 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +/** + * Verify XAttrs through ViewFileSystem functionality. + */ +public class TestViewFileSystemWithXAttrs { + + private static MiniDFSCluster cluster; + private static Configuration clusterConf = new Configuration(); + private static FileSystem fHdfs; + private static FileSystem fHdfs2; + private FileSystem fsView; + private Configuration fsViewConf; + private FileSystem fsTarget, fsTarget2; + private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2; + private FileSystemTestHelper fileSystemTestHelper = + new FileSystemTestHelper("/tmp/TestViewFileSystemWithXAttrs"); + + // XAttrs + protected static final String name1 = "user.a1"; + protected static final byte[] value1 = {0x31, 0x32, 0x33}; + protected static final String name2 = "user.a2"; + protected static final byte[] value2 = {0x37, 0x38, 0x39}; + + @BeforeClass + public static void clusterSetupAtBeginning() throws IOException { + cluster = new MiniDFSCluster.Builder(clusterConf) + .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) + .numDataNodes(2) + .build(); + cluster.waitClusterUp(); + + fHdfs = cluster.getFileSystem(0); + fHdfs2 = cluster.getFileSystem(1); + } + + @AfterClass + public static void ClusterShutdownAtEnd() throws Exception { + cluster.shutdown(); + } + + @Before + public void setUp() throws Exception { + fsTarget = fHdfs; + fsTarget2 = fHdfs2; + targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); + targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2); + + fsTarget.delete(targetTestRoot, true); + fsTarget2.delete(targetTestRoot2, true); + fsTarget.mkdirs(targetTestRoot); + fsTarget2.mkdirs(targetTestRoot2); + + fsViewConf = ViewFileSystemTestSetup.createConfig(); + setupMountPoints(); + fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf); + } + + private void setupMountPoints() { + mountOnNn1 = new Path("/mountOnNn1"); + mountOnNn2 = new Path("/mountOnNn2"); + ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), + targetTestRoot.toUri()); + ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), + targetTestRoot2.toUri()); + } + + @After + public void tearDown() throws Exception { + fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true); + fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true); + } + + /** + * Verify a ViewFileSystem wrapped over multiple federated NameNodes will + * dispatch the XAttr operations to the correct NameNode. + */ + @Test + public void testXAttrOnMountEntry() throws Exception { + // Set XAttrs on the first namespace and verify they are correct + fsView.setXAttr(mountOnNn1, name1, value1); + fsView.setXAttr(mountOnNn1, name2, value2); + assertEquals(2, fsView.getXAttrs(mountOnNn1).size()); + assertArrayEquals(value1, fsView.getXAttr(mountOnNn1, name1)); + assertArrayEquals(value2, fsView.getXAttr(mountOnNn1, name2)); + // Double-check by getting the XAttrs using FileSystem + // instead of ViewFileSystem + assertArrayEquals(value1, fHdfs.getXAttr(targetTestRoot, name1)); + assertArrayEquals(value2, fHdfs.getXAttr(targetTestRoot, name2)); + + // Paranoid check: verify the other namespace does not + // have XAttrs set on the same path. + assertEquals(0, fsView.getXAttrs(mountOnNn2).size()); + assertEquals(0, fHdfs2.getXAttrs(targetTestRoot2).size()); + + // Remove the XAttr entries on the first namespace + fsView.removeXAttr(mountOnNn1, name1); + fsView.removeXAttr(mountOnNn1, name2); + assertEquals(0, fsView.getXAttrs(mountOnNn1).size()); + assertEquals(0, fHdfs.getXAttrs(targetTestRoot).size()); + + // Now set XAttrs on the second namespace + fsView.setXAttr(mountOnNn2, name1, value1); + fsView.setXAttr(mountOnNn2, name2, value2); + assertEquals(2, fsView.getXAttrs(mountOnNn2).size()); + assertArrayEquals(value1, fsView.getXAttr(mountOnNn2, name1)); + assertArrayEquals(value2, fsView.getXAttr(mountOnNn2, name2)); + assertArrayEquals(value1, fHdfs2.getXAttr(targetTestRoot2, name1)); + assertArrayEquals(value2, fHdfs2.getXAttr(targetTestRoot2, name2)); + + fsView.removeXAttr(mountOnNn2, name1); + fsView.removeXAttr(mountOnNn2, name2); + assertEquals(0, fsView.getXAttrs(mountOnNn2).size()); + assertEquals(0, fHdfs2.getXAttrs(targetTestRoot2).size()); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java new file mode 100644 index 00000000000..431f851406b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileContextTestHelper; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +/** + * Verify XAttrs through ViewFs functionality. + */ +public class TestViewFsWithXAttrs { + + private static MiniDFSCluster cluster; + private static Configuration clusterConf = new Configuration(); + private static FileContext fc, fc2; + private FileContext fcView, fcTarget, fcTarget2; + private Configuration fsViewConf; + private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2; + private FileContextTestHelper fileContextTestHelper = + new FileContextTestHelper("/tmp/TestViewFsWithXAttrs"); + + // XAttrs + protected static final String name1 = "user.a1"; + protected static final byte[] value1 = {0x31, 0x32, 0x33}; + protected static final String name2 = "user.a2"; + protected static final byte[] value2 = {0x37, 0x38, 0x39}; + + @BeforeClass + public static void clusterSetupAtBeginning() throws IOException { + cluster = new MiniDFSCluster.Builder(clusterConf) + .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) + .numDataNodes(2) + .build(); + cluster.waitClusterUp(); + + fc = FileContext.getFileContext(cluster.getURI(0), clusterConf); + fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf); + } + + @AfterClass + public static void ClusterShutdownAtEnd() throws Exception { + cluster.shutdown(); + } + + @Before + public void setUp() throws Exception { + fcTarget = fc; + fcTarget2 = fc2; + targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc); + targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2); + + fcTarget.delete(targetTestRoot, true); + fcTarget2.delete(targetTestRoot2, true); + fcTarget.mkdir(targetTestRoot, new FsPermission((short) 0750), true); + fcTarget2.mkdir(targetTestRoot2, new FsPermission((short) 0750), true); + + fsViewConf = ViewFileSystemTestSetup.createConfig(); + setupMountPoints(); + fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf); + } + + private void setupMountPoints() { + mountOnNn1 = new Path("/mountOnNn1"); + mountOnNn2 = new Path("/mountOnNn2"); + ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri()); + ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri()); + } + + @After + public void tearDown() throws Exception { + fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true); + fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true); + } + + /** + * Verify a ViewFs wrapped over multiple federated NameNodes will + * dispatch the XAttr operations to the correct NameNode. + */ + @Test + public void testXAttrOnMountEntry() throws Exception { + // Set XAttrs on the first namespace and verify they are correct + fcView.setXAttr(mountOnNn1, name1, value1); + fcView.setXAttr(mountOnNn1, name2, value2); + assertEquals(2, fcView.getXAttrs(mountOnNn1).size()); + assertArrayEquals(value1, fcView.getXAttr(mountOnNn1, name1)); + assertArrayEquals(value2, fcView.getXAttr(mountOnNn1, name2)); + // Double-check by getting the XAttrs using FileSystem + // instead of ViewFs + assertArrayEquals(value1, fc.getXAttr(targetTestRoot, name1)); + assertArrayEquals(value2, fc.getXAttr(targetTestRoot, name2)); + + // Paranoid check: verify the other namespace does not + // have XAttrs set on the same path. + assertEquals(0, fcView.getXAttrs(mountOnNn2).size()); + assertEquals(0, fc2.getXAttrs(targetTestRoot2).size()); + + // Remove the XAttr entries on the first namespace + fcView.removeXAttr(mountOnNn1, name1); + fcView.removeXAttr(mountOnNn1, name2); + assertEquals(0, fcView.getXAttrs(mountOnNn1).size()); + assertEquals(0, fc.getXAttrs(targetTestRoot).size()); + + // Now set XAttrs on the second namespace + fcView.setXAttr(mountOnNn2, name1, value1); + fcView.setXAttr(mountOnNn2, name2, value2); + assertEquals(2, fcView.getXAttrs(mountOnNn2).size()); + assertArrayEquals(value1, fcView.getXAttr(mountOnNn2, name1)); + assertArrayEquals(value2, fcView.getXAttr(mountOnNn2, name2)); + assertArrayEquals(value1, fc2.getXAttr(targetTestRoot2, name1)); + assertArrayEquals(value2, fc2.getXAttr(targetTestRoot2, name2)); + + fcView.removeXAttr(mountOnNn2, name1); + fcView.removeXAttr(mountOnNn2, name2); + assertEquals(0, fcView.getXAttrs(mountOnNn2).size()); + assertEquals(0, fc2.getXAttrs(targetTestRoot2).size()); + } +} From 09e7ff05df5abf43bafe40ff24c3a39a5df80070 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 30 Jul 2014 05:57:55 +0000 Subject: [PATCH 080/354] HDFS-6778. The extended attributes javadoc should simply refer to the user docs. Contributed by Charles Lamb. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614547 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/fs/AbstractFileSystem.java | 90 +++--------------- .../org/apache/hadoop/fs/FileContext.java | 91 +++--------------- .../java/org/apache/hadoop/fs/FileSystem.java | 92 +++---------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/protocol/ClientProtocol.java | 52 ++++------- 5 files changed, 64 insertions(+), 264 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index b82ea7ffe43..5b456b1eff7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -1040,21 +1040,10 @@ public AclStatus getAclStatus(Path path) throws IOException { /** * Set an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only set an xattr for the "user" namespace. - * The super user can set an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set when the logged-in user has the correct permissions. - * If the xattr exists, it will be replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to modify * @param name xattr name. @@ -1069,21 +1058,10 @@ public void setXAttr(Path path, String name, byte[] value) /** * Set an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only set an xattr for the "user" namespace. - * The super user can set an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set when the logged-in user has the correct permissions. - * If the xattr exists, it will be replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to modify * @param name xattr name. @@ -1099,18 +1077,10 @@ public void setXAttr(Path path, String name, byte[] value, /** * Get an xattr for a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only get an xattr for the "user" namespace. - * The super user can get an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * An xattr will only be returned when the logged-in user has the correct permissions. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attribute * @param name xattr name. @@ -1127,13 +1097,7 @@ public byte[] getXAttr(Path path, String name) throws IOException { * Only those xattrs for which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattrs for the "user" namespace. - * The super user can only get xattrs for "user" and "trusted" namespaces. - * The xattr of "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @return Map describing the XAttrs of the file or directory @@ -1149,13 +1113,7 @@ public Map getXAttrs(Path path) throws IOException { * Only those xattrs for which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattrs for the "user" namespace. - * The super user can only get xattrs for "user" and "trusted" namespaces. - * The xattr of "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @param names XAttr names. @@ -1173,14 +1131,7 @@ public Map getXAttrs(Path path, List names) * Only the xattr names for which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattr names for the "user" namespace. - * The super user can only get xattr names for the "user" and "trusted" - * namespaces. - * The xattr names in the "security" and "system" namespaces are only - * used/exposed internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @return Map describing the XAttrs of the file or directory @@ -1194,21 +1145,10 @@ public List listXAttrs(Path path) /** * Remove an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only remove an xattr for the "user" namespace. - * The super user can remove an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set when the logged-in user has the correct permissions. - * If the xattr exists, it will be replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to remove extended attribute * @param name xattr name diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 2bfcbdccebd..808709859a9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -2297,21 +2297,10 @@ public AclStatus next(final AbstractFileSystem fs, final Path p) /** * Set an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only set an xattr for the "user" namespace. - * The super user can set an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set when the logged-in user has the correct permissions. - * If the xattr exists, it will be replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to modify * @param name xattr name. @@ -2326,21 +2315,10 @@ public void setXAttr(Path path, String name, byte[] value) /** * Set an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only set an xattr for the "user" namespace. - * The super user can set an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set when the logged-in user has the correct permissions. - * If the xattr exists, it will be replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to modify * @param name xattr name. @@ -2363,19 +2341,10 @@ public Void next(final AbstractFileSystem fs, final Path p) /** * Get an xattr for a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * - * A regular user can only get an xattr for the "user" namespace. - * The super user can get an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * An xattr will only be returned when the logged-in user has the correct permissions. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attribute * @param name xattr name. @@ -2398,13 +2367,7 @@ public byte[] next(final AbstractFileSystem fs, final Path p) * Only those xattrs for which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattrs for the "user" namespace. - * The super user can only get xattrs for "user" and "trusted" namespaces. - * The xattr of "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @return Map describing the XAttrs of the file or directory @@ -2426,13 +2389,7 @@ public Map next(final AbstractFileSystem fs, final Path p) * Only those xattrs for which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattrs for the "user" namespace. - * The super user can only get xattrs for "user" and "trusted" namespaces. - * The xattr of "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @param names XAttr names. @@ -2453,21 +2410,10 @@ public Map next(final AbstractFileSystem fs, final Path p) /** * Remove an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only remove an xattr for the "user" namespace. - * The super user can remove an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set when the logged-in user has the correct permissions. - * If the xattr exists, it will be replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to remove extended attribute * @param name xattr name @@ -2490,14 +2436,7 @@ public Void next(final AbstractFileSystem fs, final Path p) * Only those xattr names which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattr names for the "user" namespace. - * The super user can only get xattr names for "user" and "trusted" - * namespaces. - * The xattrs of the "security" and "system" namespaces are only - * used/exposed internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @return List of the XAttr names of the file or directory diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index cb921c88424..1eb54d16a9b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -2364,21 +2364,10 @@ public AclStatus getAclStatus(Path path) throws IOException { /** * Set an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only set an xattr for the "user" namespace. - * The super user can set an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set when the logged-in user has the correct permissions. - * If the xattr exists, it will be replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to modify * @param name xattr name. @@ -2393,21 +2382,10 @@ public void setXAttr(Path path, String name, byte[] value) /** * Set an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only set an xattr for the "user" namespace. - * The super user can set an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set if the logged-in user has the correct permissions. - * If the xattr exists, it is replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to modify * @param name xattr name. @@ -2423,20 +2401,10 @@ public void setXAttr(Path path, String name, byte[] value, /** * Get an xattr name and value for a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * - * A regular user can only get an xattr for the "user" namespace. - * The super user can get an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * An xattr will only be returned if the logged-in user has the - * correct permissions. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attribute * @param name xattr name. @@ -2453,13 +2421,7 @@ public byte[] getXAttr(Path path, String name) throws IOException { * Only those xattrs which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattrs for the "user" namespace. - * The super user can only get xattrs for "user" and "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @return Map describing the XAttrs of the file or directory @@ -2475,13 +2437,7 @@ public Map getXAttrs(Path path) throws IOException { * Only those xattrs which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattrs for the "user" namespace. - * The super user can only get xattrs for "user" and "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @param names XAttr names. @@ -2499,14 +2455,7 @@ public Map getXAttrs(Path path, List names) * Only those xattr names which the logged-in user has permissions to view * are returned. *

- * A regular user can only get xattr names for the "user" namespace. - * The super user can only get xattr names for "user" and "trusted" - * namespaces. - * The xattrs of the "security" and "system" namespaces are only - * used/exposed internally by/to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to get extended attributes * @return List of the XAttr names of the file or directory @@ -2519,21 +2468,10 @@ public List listXAttrs(Path path) throws IOException { /** * Remove an xattr of a file or directory. - * The name must be prefixed with user/trusted/security/system and - * followed by ".". For example, "user.attr". + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user can only remove an xattr for the "user" namespace. - * The super user can remove an xattr of either the "user" or "trusted" namespaces. - * The xattrs of the "security" and "system" namespaces are only used/exposed - * internally by/to the FS impl. - *

- * The access permissions of an xattr in the "user" namespace are - * defined by the file and directory permission bits. - * An xattr can only be set when the logged-in user has the correct permissions. - * If the xattr exists, it will be replaced. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. * * @param path Path to remove extended attribute * @param name xattr name diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index df7052425a1..d9751c87598 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -329,6 +329,9 @@ Release 2.6.0 - UNRELEASED HDFS-6665. Add tests for XAttrs in combination with viewfs. (Stephen Chu via wang) + HDFS-6778. The extended attributes javadoc should simply refer to the + user docs. (clamb via wang) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index a2a52fef389..74eca82fbe0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1267,17 +1267,11 @@ public void removeAclEntries(String src, List aclSpec) /** * Set xattr of a file or directory. - * A regular user only can set xattr of "user" namespace. - * A super user can set xattr of "user" and "trusted" namespace. - * XAttr of "security" and "system" namespace is only used/exposed - * internally to the FS impl. + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * For xattr of "user" namespace, its access permissions are - * defined by the file or directory permission bits. - * XAttr will be set only when login user has correct permissions. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. + * * @param src file or directory * @param xAttr XAttr to set * @param flag set flag @@ -1288,18 +1282,13 @@ public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException; /** - * Get xattrs of file or directory. Values in xAttrs parameter are ignored. - * If xattrs is null or empty, equals getting all xattrs of the file or - * directory. - * Only xattrs which login user has correct permissions will be returned. + * Get xattrs of a file or directory. Values in xAttrs parameter are ignored. + * If xAttrs is null or empty, this is the same as getting all xattrs of the + * file or directory. Only those xattrs for which the logged-in user has + * permissions to view are returned. *

- * A regular user only can get xattr of "user" namespace. - * A super user can get xattr of "user" and "trusted" namespace. - * XAttr of "security" and "system" namespace is only used/exposed - * internally to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. + * * @param src file or directory * @param xAttrs xAttrs to get * @return List XAttr list @@ -1314,13 +1303,8 @@ public List getXAttrs(String src, List xAttrs) * Only the xattr names for which the logged in user has the permissions to * access will be returned. *

- * A regular user only can get xattr names from the "user" namespace. - * A super user can get xattr names of the "user" and "trusted" namespace. - * XAttr names of the "security" and "system" namespaces are only used/exposed - * internally by the file system impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. + * * @param src file or directory * @param xAttrs xAttrs to get * @return List XAttr list @@ -1332,15 +1316,11 @@ public List listXAttrs(String src) /** * Remove xattr of a file or directory.Value in xAttr parameter is ignored. - * Name must be prefixed with user/trusted/security/system. + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". *

- * A regular user only can remove xattr of "user" namespace. - * A super user can remove xattr of "user" and "trusted" namespace. - * XAttr of "security" and "system" namespace is only used/exposed - * internally to the FS impl. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes + * Refer to the HDFS extended attributes user documentation for details. + * * @param src file or directory * @param xAttr XAttr to remove * @throws IOException From f6436c0720a56da29d582ebec855854ccd6e7c28 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 30 Jul 2014 14:05:59 +0000 Subject: [PATCH 081/354] HDFS-4629. Using com.sun.org.apache.xml.internal.serialize.* in XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614663 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++++ hadoop-hdfs-project/hadoop-hdfs/pom.xml | 5 +++++ .../hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java | 4 ++-- hadoop-project/pom.xml | 6 ++++++ 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d9751c87598..f750178561a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -394,6 +394,10 @@ Release 2.6.0 - UNRELEASED HDFS-6749. FSNamesystem methods should call resolvePath. (Charles Lamb via cnauroth) + HDFS-4629. Using com.sun.org.apache.xml.internal.serialize.* in + XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA. + (Amir Sanjar via stevel) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 846c46fb3d0..2d48918d6ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -176,6 +176,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> netty compile + + xerces + xercesImpl + compile + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java index b4fa791e741..7a39ba6072b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java @@ -29,8 +29,8 @@ import org.xml.sax.SAXException; import org.xml.sax.helpers.AttributesImpl; -import com.sun.org.apache.xml.internal.serialize.OutputFormat; -import com.sun.org.apache.xml.internal.serialize.XMLSerializer; +import org.apache.xml.serialize.OutputFormat; +import org.apache.xml.serialize.XMLSerializer; /** * An XmlEditsVisitor walks over an EditLog structure and writes out diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 2dc83dd4f92..bbe334565ed 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -790,6 +790,12 @@ microsoft-windowsazure-storage-sdk 0.6.0 + + + xerces + xercesImpl + 2.9.1 + From 9b2bfc7c28f6648bbaf7675c6e708237eb654db0 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Wed, 30 Jul 2014 14:49:06 +0000 Subject: [PATCH 082/354] HDFS-4629. Using com.sun.org.apache.xml.internal.serialize.* in XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA -pom indentation git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614673 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-project/pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index bbe334565ed..63705e94568 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -789,13 +789,13 @@ com.microsoft.windowsazure.storage microsoft-windowsazure-storage-sdk 0.6.0 - + - + xerces xercesImpl 2.9.1 - + From 535fe14dedbf919442ec03ac573315c7a16a6dbe Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Wed, 30 Jul 2014 17:49:09 +0000 Subject: [PATCH 083/354] HDFS-6570. add api that enables checking if a user has certain permissions on a file. Contributed by Jitendra Pandey. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614723 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/fs/AbstractFileSystem.java | 13 ++++ .../org/apache/hadoop/fs/FileContext.java | 50 ++++++++++++ .../java/org/apache/hadoop/fs/FileSystem.java | 67 ++++++++++++++++ .../apache/hadoop/fs/FilterFileSystem.java | 7 ++ .../java/org/apache/hadoop/fs/FilterFs.java | 8 ++ .../hadoop/fs/viewfs/ChRootedFileSystem.java | 8 ++ .../apache/hadoop/fs/viewfs/ChRootedFs.java | 7 ++ .../hadoop/fs/viewfs/ViewFileSystem.java | 10 ++- .../org/apache/hadoop/fs/viewfs/ViewFs.java | 9 +++ .../apache/hadoop/fs/TestHarFileSystem.java | 3 + hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../main/java/org/apache/hadoop/fs/Hdfs.java | 6 ++ .../org/apache/hadoop/hdfs/DFSClient.java | 12 +++ .../hadoop/hdfs/DistributedFileSystem.java | 20 +++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 19 +++++ ...amenodeProtocolServerSideTranslatorPB.java | 16 ++++ .../ClientNamenodeProtocolTranslatorPB.java | 13 ++++ .../hadoop/hdfs/protocolPB/PBHelper.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 23 ++++++ .../server/namenode/NameNodeRpcServer.java | 6 ++ .../web/resources/NamenodeWebHdfsMethods.java | 23 ++++-- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 7 ++ .../hdfs/web/resources/FsActionParam.java | 58 ++++++++++++++ .../hadoop/hdfs/web/resources/GetOpParam.java | 4 +- .../main/proto/ClientNamenodeProtocol.proto | 10 +++ .../hadoop-hdfs/src/site/apt/WebHDFS.apt.vm | 44 +++++++++++ .../apache/hadoop/hdfs/TestDFSPermission.java | 77 +++++++++++++++++++ .../org/apache/hadoop/hdfs/TestSafeMode.java | 23 +++++- .../hdfs/server/namenode/FSAclBaseTest.java | 28 +++++++ .../hdfs/server/namenode/TestINodeFile.java | 2 + .../snapshot/TestAclWithSnapshot.java | 9 +++ .../web/TestWebHdfsFileSystemContract.java | 33 ++++++++ .../hadoop/hdfs/web/TestWebHdfsUrl.java | 24 ++++++ .../security/TestPermissionSymlinks.java | 35 +++++++++ 34 files changed, 670 insertions(+), 11 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 5b456b1eff7..a9a19cdc29b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.security.AccessControlException; @@ -803,6 +804,18 @@ public abstract FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException; + /** + * The specification of this method matches that of + * {@link FileContext#access(Path, FsAction)} + * except that an UnresolvedLinkException may be thrown if a symlink is + * encountered in the path. + */ + @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"}) + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + FileSystem.checkAccessPermissions(this.getFileStatus(path), mode); + } + /** * The specification of this method matches that of * {@link FileContext#getFileLinkStatus(Path)} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 808709859a9..c9c8fa8ffdd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT; @@ -1108,6 +1109,55 @@ public FileStatus next(final AbstractFileSystem fs, final Path p) }.resolve(this, absF); } + /** + * Checks if the user can access a path. The mode specifies which access + * checks to perform. If the requested permissions are granted, then the + * method returns normally. If access is denied, then the method throws an + * {@link AccessControlException}. + *

+ * The default implementation of this method calls {@link #getFileStatus(Path)} + * and checks the returned permissions against the requested permissions. + * Note that the getFileStatus call will be subject to authorization checks. + * Typically, this requires search (execute) permissions on each directory in + * the path's prefix, but this is implementation-defined. Any file system + * that provides a richer authorization model (such as ACLs) may override the + * default implementation so that it checks against that model instead. + *

+ * In general, applications should avoid using this method, due to the risk of + * time-of-check/time-of-use race conditions. The permissions on a file may + * change immediately after the access call returns. Most applications should + * prefer running specific file system actions as the desired user represented + * by a {@link UserGroupInformation}. + * + * @param path Path to check + * @param mode type of access to check + * @throws AccessControlException if access is denied + * @throws FileNotFoundException if the path does not exist + * @throws UnsupportedFileSystemException if file system for path + * is not supported + * @throws IOException see specific implementation + * + * Exceptions applicable to file systems accessed over RPC: + * @throws RpcClientException If an exception occurred in the RPC client + * @throws RpcServerException If an exception occurred in the RPC server + * @throws UnexpectedServerException If server implementation throws + * undeclared exception to RPC server + */ + @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"}) + public void access(final Path path, final FsAction mode) + throws AccessControlException, FileNotFoundException, + UnsupportedFileSystemException, IOException { + final Path absPath = fixRelativePart(path); + new FSLinkResolver() { + @Override + public Void next(AbstractFileSystem fs, Path p) throws IOException, + UnresolvedLinkException { + fs.access(p, mode); + return null; + } + }.resolve(this, absPath); + } + /** * Return a file status object that represents the path. If the path * refers to a symlink then the FileStatus of the symlink is returned. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 1eb54d16a9b..1d2270b37ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -25,6 +25,7 @@ import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -50,6 +51,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.Text; @@ -2072,6 +2074,71 @@ public short getDefaultReplication(Path path) { */ public abstract FileStatus getFileStatus(Path f) throws IOException; + /** + * Checks if the user can access a path. The mode specifies which access + * checks to perform. If the requested permissions are granted, then the + * method returns normally. If access is denied, then the method throws an + * {@link AccessControlException}. + *

+ * The default implementation of this method calls {@link #getFileStatus(Path)} + * and checks the returned permissions against the requested permissions. + * Note that the getFileStatus call will be subject to authorization checks. + * Typically, this requires search (execute) permissions on each directory in + * the path's prefix, but this is implementation-defined. Any file system + * that provides a richer authorization model (such as ACLs) may override the + * default implementation so that it checks against that model instead. + *

+ * In general, applications should avoid using this method, due to the risk of + * time-of-check/time-of-use race conditions. The permissions on a file may + * change immediately after the access call returns. Most applications should + * prefer running specific file system actions as the desired user represented + * by a {@link UserGroupInformation}. + * + * @param path Path to check + * @param mode type of access to check + * @throws AccessControlException if access is denied + * @throws FileNotFoundException if the path does not exist + * @throws IOException see specific implementation + */ + @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"}) + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, IOException { + checkAccessPermissions(this.getFileStatus(path), mode); + } + + /** + * This method provides the default implementation of + * {@link #access(Path, FsAction)}. + * + * @param stat FileStatus to check + * @param mode type of access to check + * @throws IOException for any error + */ + @InterfaceAudience.Private + static void checkAccessPermissions(FileStatus stat, FsAction mode) + throws IOException { + FsPermission perm = stat.getPermission(); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + String user = ugi.getShortUserName(); + List groups = Arrays.asList(ugi.getGroupNames()); + if (user.equals(stat.getOwner())) { + if (perm.getUserAction().implies(mode)) { + return; + } + } else if (groups.contains(stat.getGroup())) { + if (perm.getGroupAction().implies(mode)) { + return; + } + } else { + if (perm.getOtherAction().implies(mode)) { + return; + } + } + throw new AccessControlException(String.format( + "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(), + stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm)); + } + /** * See {@link FileContext#fixRelativePart} */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index 139e1430f8b..52706f4049a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.security.AccessControlException; @@ -397,6 +398,12 @@ public FileStatus getFileStatus(Path f) throws IOException { return fs.getFileStatus(f); } + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, IOException { + fs.access(path, mode); + } + public void createSymlink(final Path target, final Path link, final boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index 6ffe9214b37..b6e1d96e038 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.security.AccessControlException; @@ -119,6 +120,13 @@ public FileStatus getFileStatus(Path f) return myFs.getFileStatus(f); } + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + checkPath(path); + myFs.access(path, mode); + } + @Override public FileStatus getFileLinkStatus(final Path f) throws IOException, UnresolvedLinkException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index 4480da20f39..9650a374d18 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -41,7 +41,9 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Progressable; /** @@ -222,6 +224,12 @@ public FileStatus getFileStatus(final Path f) return super.getFileStatus(fullPath(f)); } + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, IOException { + super.access(fullPath(path), mode); + } + @Override public FsStatus getStatus(Path p) throws IOException { return super.getStatus(fullPath(p)); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java index 5d53eb79d0a..9569e1089bb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java @@ -41,7 +41,9 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; @@ -200,6 +202,11 @@ public FileStatus getFileStatus(final Path f) return myFs.getFileStatus(fullPath(f)); } + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + myFs.access(fullPath(path), mode); + } + @Override public FileStatus getFileLinkStatus(final Path f) throws IOException, UnresolvedLinkException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index b4ac18eb1af..963289f4373 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -51,6 +51,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclUtil; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; @@ -359,7 +360,14 @@ public FileStatus getFileStatus(final Path f) throws AccessControlException, return new ViewFsFileStatus(status, this.makeQualified(f)); } - + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.access(res.remainingPath, mode); + } + @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 5cdccd29975..014f4881275 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -54,6 +54,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; @@ -352,6 +353,14 @@ public FileStatus getFileStatus(final Path f) throws AccessControlException, return new ViewFsFileStatus(status, this.makeQualified(f)); } + @Override + public void access(Path path, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.access(res.remainingPath, mode); + } + @Override public FileStatus getFileLinkStatus(final Path f) throws AccessControlException, FileNotFoundException, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 24e712c051c..1e86439785b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; @@ -201,6 +202,8 @@ public Map getXAttrs(Path path, List names) public void removeXAttr(Path path, String name) throws IOException; public AclStatus getAclStatus(Path path) throws IOException; + + public void access(Path path, FsAction mode) throws IOException; } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f750178561a..1e33c194b54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -332,6 +332,9 @@ Release 2.6.0 - UNRELEASED HDFS-6778. The extended attributes javadoc should simply refer to the user docs. (clamb via wang) + HDFS-6570. add api that enables checking if a user has certain permissions on + a file. (Jitendra Pandey via cnauroth) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 6e4b66f71e6..a0e75f81d3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.hdfs.CorruptFileBlockIterator; @@ -448,6 +449,11 @@ public void removeXAttr(Path path, String name) throws IOException { dfs.removeXAttr(getUriPath(path), name); } + @Override + public void access(Path path, final FsAction mode) throws IOException { + dfs.checkAccess(getUriPath(path), mode); + } + /** * Renew an existing delegation token. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 45a9011a568..b9af35ea255 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -122,6 +122,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.net.Peer; @@ -2832,6 +2833,17 @@ public void removeXAttr(String src, String name) throws IOException { } } + public void checkAccess(String src, FsAction mode) throws IOException { + checkOpen(); + try { + namenode.checkAccess(src, mode); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + FileNotFoundException.class, + UnresolvedPathException.class); + } + } + @Override // RemotePeerFactory public Peer newConnectedPeer(InetSocketAddress addr, Token blockToken, DatanodeID datanodeId) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 5ae39d69747..e20c61f5185 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -59,6 +59,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; @@ -1898,4 +1899,23 @@ public Void next(final FileSystem fs, final Path p) throws IOException { } }.resolve(this, absF); } + + @Override + public void access(Path path, final FsAction mode) throws IOException { + final Path absF = fixRelativePart(path); + new FileSystemLinkResolver() { + @Override + public Void doCall(final Path p) throws IOException { + dfs.checkAccess(getPathName(p), mode); + return null; + } + + @Override + public Void next(final FileSystem fs, final Path p) + throws IOException { + fs.access(p, mode); + return null; + } + }.resolve(this, absF); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 74eca82fbe0..8dbe1f7609f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -1327,4 +1328,22 @@ public List listXAttrs(String src) */ @AtMostOnce public void removeXAttr(String src, XAttr xAttr) throws IOException; + + /** + * Checks if the user can access a path. The mode specifies which access + * checks to perform. If the requested permissions are granted, then the + * method returns normally. If access is denied, then the method throws an + * {@link AccessControlException}. + * In general, applications should avoid using this method, due to the risk of + * time-of-check/time-of-use race conditions. The permissions on a file may + * change immediately after the access call returns. + * + * @param path Path to check + * @param mode type of access to check + * @throws AccessControlException if access is denied + * @throws FileNotFoundException if the path does not exist + * @throws IOException see specific implementation + */ + @Idempotent + public void checkAccess(String path, FsAction mode) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index df0d1b0006c..c4211b1d795 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -174,6 +174,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; @@ -320,6 +322,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements private static final RemoveXAttrResponseProto VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance(); + private static final CheckAccessResponseProto + VOID_CHECKACCESS_RESPONSE = CheckAccessResponseProto.getDefaultInstance(); + /** * Constructor * @@ -1338,4 +1343,15 @@ public RemoveXAttrResponseProto removeXAttr(RpcController controller, } return VOID_REMOVEXATTR_RESPONSE; } + + @Override + public CheckAccessResponseProto checkAccess(RpcController controller, + CheckAccessRequestProto req) throws ServiceException { + try { + server.checkAccess(req.getPath(), PBHelper.convert(req.getMode())); + } catch (IOException e) { + throw new ServiceException(e); + } + return VOID_CHECKACCESS_RESPONSE; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 0f8eba970ca..85dbb7d7184 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; @@ -144,6 +145,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto; @@ -1346,4 +1348,15 @@ public void removeXAttr(String src, XAttr xAttr) throws IOException { throw ProtobufHelper.getRemoteException(e); } } + + @Override + public void checkAccess(String path, FsAction mode) throws IOException { + CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder() + .setPath(path).setMode(PBHelper.convert(mode)).build(); + try { + rpcProxy.checkAccess(null, req); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 5706aab062f..9ca93a5ae29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -2107,11 +2107,11 @@ private static XAttr.NameSpace convert(XAttrNamespaceProto v) { return castEnum(v, XATTR_NAMESPACE_VALUES); } - private static FsActionProto convert(FsAction v) { + public static FsActionProto convert(FsAction v) { return FsActionProto.valueOf(v != null ? v.ordinal() : 0); } - private static FsAction convert(FsActionProto v) { + public static FsAction convert(FsActionProto v) { return castEnum(v, FSACTION_VALUES); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f12c4e2cfe1..37f8c4b23d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8458,6 +8458,29 @@ private void checkXAttrChangeAccess(String src, XAttr xAttr, } } + void checkAccess(String src, FsAction mode) throws AccessControlException, + FileNotFoundException, UnresolvedLinkException, IOException { + checkOperation(OperationCategory.READ); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + readLock(); + try { + checkOperation(OperationCategory.READ); + src = FSDirectory.resolvePath(src, pathComponents, dir); + if (dir.getINode(src) == null) { + throw new FileNotFoundException("Path not found"); + } + if (isPermissionEnabled) { + FSPermissionChecker pc = getPermissionChecker(); + checkPathAccess(pc, src, mode); + } + } catch (AccessControlException e) { + logAuditEvent(false, "checkAccess", src); + throw e; + } finally { + readUnlock(); + } + } + /** * Default AuditLogger implementation; used when no access logger is * defined in the config file. It can also be explicitly listed in the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 2c2cd4f2272..6800fcde174 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -54,6 +54,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HealthCheckFailedException; @@ -1443,5 +1444,10 @@ public List listXAttrs(String src) throws IOException { public void removeXAttr(String src, XAttr xAttr) throws IOException { namesystem.removeXAttr(src, xAttr); } + + @Override + public void checkAccess(String path, FsAction mode) throws IOException { + namesystem.checkAccess(path, mode); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index d7235b38727..991885b2e40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -57,6 +57,7 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -112,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrNameParam; import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; +import org.apache.hadoop.hdfs.web.resources.FsActionParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.Server; @@ -755,10 +757,12 @@ public Response getRoot( @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) final XAttrEncodingParam xattrEncoding, @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT) - final ExcludeDatanodesParam excludeDatanodes + final ExcludeDatanodesParam excludeDatanodes, + @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT) + final FsActionParam fsAction ) throws IOException, InterruptedException { return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length, - renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes); + renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction); } /** Handle HTTP GET request. */ @@ -789,11 +793,13 @@ public Response get( @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) final XAttrEncodingParam xattrEncoding, @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT) - final ExcludeDatanodesParam excludeDatanodes + final ExcludeDatanodesParam excludeDatanodes, + @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT) + final FsActionParam fsAction ) throws IOException, InterruptedException { init(ugi, delegation, username, doAsUser, path, op, offset, length, - renewer, bufferSize, xattrEncoding, excludeDatanodes); + renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction); return ugi.doAs(new PrivilegedExceptionAction() { @Override @@ -801,7 +807,7 @@ public Response run() throws IOException, URISyntaxException { try { return get(ugi, delegation, username, doAsUser, path.getAbsolutePath(), op, offset, length, renewer, bufferSize, - xattrNames, xattrEncoding, excludeDatanodes); + xattrNames, xattrEncoding, excludeDatanodes, fsAction); } finally { reset(); } @@ -822,7 +828,8 @@ private Response get( final BufferSizeParam bufferSize, final List xattrNames, final XAttrEncodingParam xattrEncoding, - final ExcludeDatanodesParam excludeDatanodes + final ExcludeDatanodesParam excludeDatanodes, + final FsActionParam fsAction ) throws IOException, URISyntaxException { final NameNode namenode = (NameNode)context.getAttribute("name.node"); final NamenodeProtocols np = getRPCServer(namenode); @@ -919,6 +926,10 @@ private Response get( final String js = JsonUtil.toJsonString(xAttrs); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } + case CHECKACCESS: { + np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue())); + return Response.ok().build(); + } default: throw new UnsupportedOperationException(op + " is not supported"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 78062ad0b5f..cf6233f5a35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -1356,6 +1357,12 @@ BlockLocation[] decodeResponse(Map json) throws IOException { }.run(); } + @Override + public void access(final Path path, final FsAction mode) throws IOException { + final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS; + new FsPathRunner(op, path, new FsActionParam(mode)).run(); + } + @Override public ContentSummary getContentSummary(final Path p) throws IOException { statistics.incrementReadOps(1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java new file mode 100644 index 00000000000..c8401960034 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import org.apache.hadoop.fs.permission.FsAction; + +import java.util.regex.Pattern; + +/** {@link FsAction} Parameter */ +public class FsActionParam extends StringParam { + + /** Parameter name. */ + public static final String NAME = "fsaction"; + + /** Default parameter value. */ + public static final String DEFAULT = NULL; + + private static String FS_ACTION_PATTERN = "[rwx-]{3}"; + + private static final Domain DOMAIN = new Domain(NAME, + Pattern.compile(FS_ACTION_PATTERN)); + + /** + * Constructor. + * @param str a string representation of the parameter value. + */ + public FsActionParam(final String str) { + super(DOMAIN, str == null || str.equals(DEFAULT)? null: str); + } + + /** + * Constructor. + * @param value the parameter value. + */ + public FsActionParam(final FsAction value) { + super(DOMAIN, value == null? null: value.SYMBOL); + } + + @Override + public String getName() { + return NAME; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index bf5a6a23e57..f63ed443924 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -39,7 +39,9 @@ public static enum Op implements HttpOpParam.Op { GETXATTRS(false, HttpURLConnection.HTTP_OK), LISTXATTRS(false, HttpURLConnection.HTTP_OK), - NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); + NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED), + + CHECKACCESS(false, HttpURLConnection.HTTP_OK); final boolean redirect; final int expectedHttpResponseCode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index d2f92d64d0e..cd291a68604 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -654,6 +654,14 @@ message DeleteSnapshotRequestProto { message DeleteSnapshotResponseProto { // void response } +message CheckAccessRequestProto { + required string path = 1; + required AclEntryProto.FsActionProto mode = 2; +} + +message CheckAccessResponseProto { // void response +} + service ClientNamenodeProtocol { rpc getBlockLocations(GetBlockLocationsRequestProto) returns(GetBlockLocationsResponseProto); @@ -783,4 +791,6 @@ service ClientNamenodeProtocol { returns(ListXAttrsResponseProto); rpc removeXAttr(RemoveXAttrRequestProto) returns(RemoveXAttrResponseProto); + rpc checkAccess(CheckAccessRequestProto) + returns(CheckAccessResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm index 51bc574095d..c3f6a6b813b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm @@ -82,6 +82,9 @@ WebHDFS REST API * {{{List all XAttrs}<<>>}} (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs) + * {{{Check access}<<>>}} + (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access) + * HTTP PUT * {{{Create and Write to a File}<<>>}} @@ -927,6 +930,28 @@ Transfer-Encoding: chunked {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus +** {Check access} + + * Submit a HTTP GET request. + ++--------------------------------- +curl -i -X PUT "http://:/webhdfs/v1/?op=CHECKACCESS + &fsaction= ++--------------------------------- + + The client receives a response with zero content length: + ++--------------------------------- +HTTP/1.1 200 OK +Content-Length: 0 ++--------------------------------- + + [] + + See also: + {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access + + * {Extended Attributes(XAttrs) Operations} ** {Set XAttr} @@ -2166,6 +2191,25 @@ var tokenProperties = {{Proxy Users}} +** {Fs Action} + +*----------------+-------------------------------------------------------------------+ +|| Name | <<>> | +*----------------+-------------------------------------------------------------------+ +|| Description | File system operation read/write/execute | +*----------------+-------------------------------------------------------------------+ +|| Type | String | +*----------------+-------------------------------------------------------------------+ +|| Default Value | null (an invalid value) | +*----------------+-------------------------------------------------------------------+ +|| Valid Values | Strings matching regex pattern \"[rwx-]\{3\}\" | +*----------------+-------------------------------------------------------------------+ +|| Syntax | \"[rwx-]\{3\}\" | +*----------------+-------------------------------------------------------------------+ + + See also: + {{{Check access}<<>>}}, + ** {Group} *----------------+-------------------------------------------------------------------+ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 7d2b0ff7040..68349a2ac67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -20,8 +20,11 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import java.io.FileNotFoundException; import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.HashMap; import java.util.Map; import java.util.Random; @@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; @@ -421,6 +425,79 @@ public void testPermissionChecking() throws Exception { } } + @Test + public void testAccessOwner() throws IOException, InterruptedException { + FileSystem rootFs = FileSystem.get(conf); + Path p1 = new Path("/p1"); + rootFs.mkdirs(p1); + rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME); + fs = USER1.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(conf); + } + }); + fs.setPermission(p1, new FsPermission((short) 0444)); + fs.access(p1, FsAction.READ); + try { + fs.access(p1, FsAction.WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + + Path badPath = new Path("/bad/bad"); + try { + fs.access(badPath, FsAction.READ); + fail("The access call should have failed"); + } catch (FileNotFoundException e) { + // expected + } + } + + @Test + public void testAccessGroupMember() throws IOException, InterruptedException { + FileSystem rootFs = FileSystem.get(conf); + Path p2 = new Path("/p2"); + rootFs.mkdirs(p2); + rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME); + rootFs.setPermission(p2, new FsPermission((short) 0740)); + fs = USER1.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(conf); + } + }); + fs.access(p2, FsAction.READ); + try { + fs.access(p2, FsAction.EXECUTE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + } + + @Test + public void testAccessOthers() throws IOException, InterruptedException { + FileSystem rootFs = FileSystem.get(conf); + Path p3 = new Path("/p3"); + rootFs.mkdirs(p3); + rootFs.setPermission(p3, new FsPermission((short) 0774)); + fs = USER1.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(conf); + } + }); + fs.access(p3, FsAction.READ); + try { + fs.access(p3, FsAction.READ_WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + } + /* Check if namenode performs permission checking correctly * for the given user for operations mkdir, open, setReplication, * getFileInfo, isDirectory, exists, getContentLength, list, rename, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 25ec8c9eb0e..bda95c07525 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -26,6 +26,7 @@ import static org.junit.Assert.fail; import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.List; import org.apache.commons.logging.Log; @@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; @@ -47,6 +49,8 @@ import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; @@ -297,7 +301,8 @@ public void runFsFun(String msg, FSRun f) { * assert that they are either allowed or fail as expected. */ @Test - public void testOperationsWhileInSafeMode() throws IOException { + public void testOperationsWhileInSafeMode() throws IOException, + InterruptedException { final Path file1 = new Path("/file1"); assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET)); @@ -407,6 +412,22 @@ public void run(FileSystem fs) throws IOException { fail("getAclStatus failed while in SM"); } + // Test access + UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX"); + FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws IOException { + return FileSystem.get(conf); + } + }); + myfs.access(file1, FsAction.READ); + try { + myfs.access(file1, FsAction.WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + assertFalse("Could not leave SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java index f36483e642d..1ddc774c842 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -1256,6 +1257,33 @@ public void testGetAclStatusRequiresTraverseOrSuper() throws Exception { fsAsDiana.getAclStatus(bruceFile); } + @Test + public void testAccess() throws IOException, InterruptedException { + Path p1 = new Path("/p1"); + fs.mkdirs(p1); + fs.setOwner(p1, BRUCE.getShortUserName(), "groupX"); + fsAsBruce.setAcl(p1, Lists.newArrayList( + aclEntry(ACCESS, USER, READ), + aclEntry(ACCESS, USER, "bruce", READ), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, OTHER, NONE))); + fsAsBruce.access(p1, FsAction.READ); + try { + fsAsBruce.access(p1, FsAction.WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + + Path badPath = new Path("/bad/bad"); + try { + fsAsBruce.access(badPath, FsAction.READ); + fail("The access call should have failed"); + } catch (FileNotFoundException e) { + // expected + } + } + /** * Creates a FileSystem for the super-user. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 704bc1669d0..a739b7aa6ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSClient; @@ -581,6 +582,7 @@ public void testInodeIdBasedPaths() throws Exception { fs.getAclStatus(testFileInodePath); fs.getXAttrs(testFileInodePath); fs.listXAttrs(testFileInodePath); + fs.access(testFileInodePath, FsAction.READ_WRITE); } // symbolic link related tests diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java index 3deb47ff3af..3be1d36ca51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -674,6 +675,13 @@ private static void assertDirPermissionDenied(FileSystem fs, } catch (AccessControlException e) { // expected } + + try { + fs.access(pathToCheck, FsAction.READ); + fail("The access call should have failed for "+pathToCheck); + } catch (AccessControlException e) { + // expected + } } /** @@ -689,6 +697,7 @@ private static void assertDirPermissionGranted(FileSystem fs, UserGroupInformation user, Path pathToCheck) throws Exception { try { fs.listStatus(pathToCheck); + fs.access(pathToCheck, FsAction.READ); } catch (AccessControlException e) { fail("expected permission granted for user " + user + ", path = " + pathToCheck); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index 09f025c65ac..46e433d6df8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.AppendTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -49,6 +50,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; +import org.junit.Test; public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { private static final Configuration conf = new Configuration(); @@ -530,4 +532,35 @@ public void testResponseCode() throws IOException { } } } + + @Test + public void testAccess() throws IOException, InterruptedException { + Path p1 = new Path("/pathX"); + try { + UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alpha", + new String[]{"beta"}); + WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, + WebHdfsFileSystem.SCHEME); + + fs.mkdirs(p1); + fs.setPermission(p1, new FsPermission((short) 0444)); + fs.access(p1, FsAction.READ); + try { + fs.access(p1, FsAction.WRITE); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + + Path badPath = new Path("/bad"); + try { + fs.access(badPath, FsAction.READ); + fail("The access call should have failed"); + } catch (FileNotFoundException e) { + // expected + } + } finally { + fs.delete(p1, true); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index a84918e13d1..45cd8fe3afb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -40,6 +41,7 @@ import org.apache.hadoop.hdfs.web.resources.PutOpParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.apache.hadoop.hdfs.web.resources.FsActionParam; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; @@ -283,6 +285,28 @@ public void testSecureProxyAuthParamsInUrl() throws IOException { }, fileStatusUrl); } + + @Test(timeout=60000) + public void testCheckAccessUrl() throws IOException { + Configuration conf = new Configuration(); + + UserGroupInformation ugi = + UserGroupInformation.createRemoteUser("test-user"); + UserGroupInformation.setLoginUser(ugi); + + WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf); + Path fsPath = new Path("/p1"); + + URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS, + fsPath, new FsActionParam(FsAction.READ_WRITE)); + checkQueryParams( + new String[]{ + GetOpParam.Op.CHECKACCESS.toQueryString(), + new UserParam(ugi.getShortUserName()).toString(), + FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL + }, + checkAccessUrl); + } private void checkQueryParams(String[] expected, URL url) { Arrays.sort(expected); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java index 13a9610a346..bc41edc1107 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java @@ -27,6 +27,7 @@ import static org.junit.Assert.fail; import java.io.IOException; +import java.io.FileNotFoundException; import java.security.PrivilegedExceptionAction; import java.util.Arrays; @@ -39,6 +40,7 @@ import org.apache.hadoop.fs.FileSystemTestWrapper; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -393,4 +395,37 @@ public Object run() throws IOException { GenericTestUtils.assertExceptionContains("Permission denied", e); } } + + @Test + public void testAccess() throws Exception { + fs.setPermission(target, new FsPermission((short) 0002)); + fs.setAcl(target, Arrays.asList( + aclEntry(ACCESS, USER, ALL), + aclEntry(ACCESS, GROUP, NONE), + aclEntry(ACCESS, USER, user.getShortUserName(), WRITE), + aclEntry(ACCESS, OTHER, WRITE))); + FileContext myfc = user.doAs(new PrivilegedExceptionAction() { + @Override + public FileContext run() throws IOException { + return FileContext.getFileContext(conf); + } + }); + + // Path to targetChild via symlink + myfc.access(link, FsAction.WRITE); + try { + myfc.access(link, FsAction.ALL); + fail("The access call should have failed."); + } catch (AccessControlException e) { + // expected + } + + Path badPath = new Path(link, "bad"); + try { + myfc.access(badPath, FsAction.READ); + fail("The access call should have failed"); + } catch (FileNotFoundException e) { + // expected + } + } } From bda9c584c828fdd18e9c066747d58dbf751a585d Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Wed, 30 Jul 2014 17:54:10 +0000 Subject: [PATCH 084/354] HADOOP-10756. KMS audit log should consolidate successful similar requests. (asuresh via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614725 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../hadoop/crypto/key/kms/server/KMS.java | 82 ++++--- .../crypto/key/kms/server/KMSAudit.java | 205 ++++++++++++++++-- .../kms/server/KMSAuthenticationFilter.java | 6 +- .../key/kms/server/KMSConfiguration.java | 7 +- .../key/kms/server/KMSExceptionsProvider.java | 10 +- .../crypto/key/kms/server/KMSWebApp.java | 11 + .../hadoop-kms/src/site/apt/index.apt.vm | 19 ++ .../crypto/key/kms/server/TestKMSAudit.java | 134 ++++++++++++ .../test/resources/log4j-kmsaudit.properties | 25 +++ 10 files changed, 443 insertions(+), 59 deletions(-) create mode 100644 hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java create mode 100644 hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c2f6f91edaf..9805a01bd7b 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -192,6 +192,9 @@ Trunk (Unreleased) HADOOP-10891. Add EncryptedKeyVersion factory method to KeyProviderCryptoExtension. (wang) + HADOOP-10756. KMS audit log should consolidate successful similar requests. + (asuresh via tucu) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java index 2b663368737..9c4e7940929 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java @@ -20,6 +20,7 @@ import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; @@ -27,7 +28,6 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; -import org.apache.hadoop.util.StringUtils; import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; @@ -59,22 +59,25 @@ @Path(KMSRESTConstants.SERVICE_VERSION) @InterfaceAudience.Private public class KMS { - private static final String CREATE_KEY = "CREATE_KEY"; - private static final String DELETE_KEY = "DELETE_KEY"; - private static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION"; - private static final String GET_KEYS = "GET_KEYS"; - private static final String GET_KEYS_METADATA = "GET_KEYS_METADATA"; - private static final String GET_KEY_VERSION = "GET_KEY_VERSION"; - private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY"; - private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS"; - private static final String GET_METADATA = "GET_METADATA"; - private static final String GENERATE_EEK = "GENERATE_EEK"; - private static final String DECRYPT_EEK = "DECRYPT_EEK"; + public static final String CREATE_KEY = "CREATE_KEY"; + public static final String DELETE_KEY = "DELETE_KEY"; + public static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION"; + public static final String GET_KEYS = "GET_KEYS"; + public static final String GET_KEYS_METADATA = "GET_KEYS_METADATA"; + public static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS"; + public static final String GET_METADATA = "GET_METADATA"; + public static final String GET_KEY_VERSION = "GET_KEY_VERSION"; + public static final String GET_CURRENT_KEY = "GET_CURRENT_KEY"; + public static final String GENERATE_EEK = "GENERATE_EEK"; + public static final String DECRYPT_EEK = "DECRYPT_EEK"; + private KeyProviderCryptoExtension provider; + private KMSAudit kmsAudit; public KMS() throws Exception { provider = KMSWebApp.getKeyProvider(); + kmsAudit= KMSWebApp.getKMSAudit(); } private static Principal getPrincipal(SecurityContext securityContext) @@ -86,13 +89,26 @@ private static Principal getPrincipal(SecurityContext securityContext) return user; } - private static void assertAccess(KMSACLs.Type aclType, Principal principal, + + private static final String UNAUTHORIZED_MSG_WITH_KEY = + "User:{0} not allowed to do ''{1}'' on ''{2}''"; + + private static final String UNAUTHORIZED_MSG_WITHOUT_KEY = + "User:{0} not allowed to do ''{1}''"; + + private void assertAccess(KMSACLs.Type aclType, Principal principal, + String operation) throws AccessControlException { + assertAccess(aclType, principal, operation, null); + } + + private void assertAccess(KMSACLs.Type aclType, Principal principal, String operation, String key) throws AccessControlException { if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) { KMSWebApp.getUnauthorizedCallsMeter().mark(); - KMSAudit.unauthorized(principal, operation, key); + kmsAudit.unauthorized(principal, operation, key); throw new AuthorizationException(MessageFormat.format( - "User:{0} not allowed to do ''{1}'' on ''{2}''", + (key != null) ? UNAUTHORIZED_MSG_WITH_KEY + : UNAUTHORIZED_MSG_WITHOUT_KEY, principal.getName(), operation, key)); } } @@ -149,7 +165,7 @@ public Response createKey(@Context SecurityContext securityContext, provider.flush(); - KMSAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" + + kmsAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" + (material != null) + " Description:" + description); if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) { @@ -175,7 +191,7 @@ public Response deleteKey(@Context SecurityContext securityContext, provider.deleteKey(name); provider.flush(); - KMSAudit.ok(user, DELETE_KEY, name, ""); + kmsAudit.ok(user, DELETE_KEY, name, ""); return Response.ok().build(); } @@ -203,7 +219,7 @@ public Response rolloverKey(@Context SecurityContext securityContext, provider.flush(); - KMSAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" + + kmsAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" + (material != null) + " NewVersion:" + keyVersion.getVersionName()); if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) { @@ -222,11 +238,10 @@ public Response getKeysMetadata(@Context SecurityContext securityContext, KMSWebApp.getAdminCallsMeter().mark(); Principal user = getPrincipal(securityContext); String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]); - String names = StringUtils.arrayToString(keyNames); - assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA, names); + assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA); KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames); Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta); - KMSAudit.ok(user, GET_KEYS_METADATA, names, ""); + kmsAudit.ok(user, GET_KEYS_METADATA, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -237,9 +252,9 @@ public Response getKeyNames(@Context SecurityContext securityContext) throws Exception { KMSWebApp.getAdminCallsMeter().mark(); Principal user = getPrincipal(securityContext); - assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS, "*"); + assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS); Object json = provider.getKeys(); - KMSAudit.ok(user, GET_KEYS, "*", ""); + kmsAudit.ok(user, GET_KEYS, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -263,7 +278,7 @@ public Response getMetadata(@Context SecurityContext securityContext, KMSWebApp.getAdminCallsMeter().mark(); assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name); Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name)); - KMSAudit.ok(user, GET_METADATA, name, ""); + kmsAudit.ok(user, GET_METADATA, name, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -279,7 +294,7 @@ public Response getCurrentVersion(@Context SecurityContext securityContext, KMSWebApp.getKeyCallsMeter().mark(); assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name); Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name)); - KMSAudit.ok(user, GET_CURRENT_KEY, name, ""); + kmsAudit.ok(user, GET_CURRENT_KEY, name, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -292,9 +307,12 @@ public Response getKeyVersion(@Context SecurityContext securityContext, Principal user = getPrincipal(securityContext); KMSClientProvider.checkNotEmpty(versionName, "versionName"); KMSWebApp.getKeyCallsMeter().mark(); - assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION, versionName); - Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersion(versionName)); - KMSAudit.ok(user, GET_KEY_VERSION, versionName, ""); + KeyVersion keyVersion = provider.getKeyVersion(versionName); + assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION); + if (keyVersion != null) { + kmsAudit.ok(user, GET_KEY_VERSION, keyVersion.getName(), ""); + } + Object json = KMSServerJSONUtils.toJSON(keyVersion); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } @@ -327,7 +345,7 @@ public Response generateEncryptedKeys( } catch (Exception e) { throw new IOException(e); } - KMSAudit.ok(user, GENERATE_EEK, name, ""); + kmsAudit.ok(user, GENERATE_EEK, name, ""); retJSON = new ArrayList(); for (EncryptedKeyVersion edek : retEdeks) { ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek)); @@ -362,7 +380,7 @@ public Response decryptEncryptedKey(@Context SecurityContext securityContext, (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD); Object retJSON; if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) { - assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, versionName); + assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, keyName); KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD); byte[] iv = Base64.decodeBase64(ivStr); KMSClientProvider.checkNotNull(encMaterialStr, @@ -373,7 +391,7 @@ public Response decryptEncryptedKey(@Context SecurityContext securityContext, new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName, iv, KeyProviderCryptoExtension.EEK, encMaterial)); retJSON = KMSServerJSONUtils.toJSON(retKeyVersion); - KMSAudit.ok(user, DECRYPT_EEK, versionName, ""); + kmsAudit.ok(user, DECRYPT_EEK, keyName, ""); } else { throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP + " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " + @@ -396,7 +414,7 @@ public Response getKeyVersions(@Context SecurityContext securityContext, KMSWebApp.getKeyCallsMeter().mark(); assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name); Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name)); - KMSAudit.ok(user, GET_KEY_VERSIONS, name, ""); + kmsAudit.ok(user, GET_KEY_VERSIONS, name, ""); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); } diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java index e212d7d97a1..3d387eb354b 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java @@ -20,43 +20,202 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Joiner; +import com.google.common.base.Strings; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + import java.security.Principal; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; /** * Provides convenience methods for audit logging consistently the different * types of events. */ public class KMSAudit { + + private static class AuditEvent { + private final AtomicLong accessCount = new AtomicLong(-1); + private final String keyName; + private final String user; + private final String op; + private final String extraMsg; + private final long startTime = System.currentTimeMillis(); + + private AuditEvent(String keyName, String user, String op, String msg) { + this.keyName = keyName; + this.user = user; + this.op = op; + this.extraMsg = msg; + } + + public String getExtraMsg() { + return extraMsg; + } + + public AtomicLong getAccessCount() { + return accessCount; + } + + public String getKeyName() { + return keyName; + } + + public String getUser() { + return user; + } + + public String getOp() { + return op; + } + + public long getStartTime() { + return startTime; + } + } + + public static enum OpStatus { + OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR; + } + + private static Set AGGREGATE_OPS_WHITELIST = Sets.newHashSet( + KMS.GET_KEY_VERSION, KMS.GET_CURRENT_KEY, KMS.DECRYPT_EEK, KMS.GENERATE_EEK + ); + + private Cache cache; + + private ScheduledExecutorService executor; + public static final String KMS_LOGGER_NAME = "kms-audit"; private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME); - private static void op(String status, String op, Principal user, String key, - String extraMsg) { - AUDIT_LOG.info("Status:{} User:{} Op:{} Name:{}{}", status, user.getName(), - op, key, extraMsg); + KMSAudit(long delay) { + cache = CacheBuilder.newBuilder() + .expireAfterWrite(delay, TimeUnit.MILLISECONDS) + .removalListener( + new RemovalListener() { + @Override + public void onRemoval( + RemovalNotification entry) { + AuditEvent event = entry.getValue(); + if (event.getAccessCount().get() > 0) { + KMSAudit.this.logEvent(event); + event.getAccessCount().set(0); + KMSAudit.this.cache.put(entry.getKey(), event); + } + } + }).build(); + executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() + .setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build()); + executor.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + cache.cleanUp(); + } + }, delay / 10, delay / 10, TimeUnit.MILLISECONDS); } - public static void ok(Principal user, String op, String key, - String extraMsg) { - op("OK", op, user, key, extraMsg); - } - - public static void unauthorized(Principal user, String op, String key) { - op("UNAUTHORIZED", op, user, key, ""); - } - - public static void error(Principal user, String method, String url, - String extraMsg) { - AUDIT_LOG.info("Status:ERROR User:{} Method:{} URL:{} Exception:'{}'", - user.getName(), method, url, extraMsg); - } - - public static void unauthenticated(String remoteHost, String method, - String url, String extraMsg) { + private void logEvent(AuditEvent event) { AUDIT_LOG.info( - "Status:UNAUTHENTICATED RemoteHost:{} Method:{} URL:{} ErrorMsg:'{}'", - remoteHost, method, url, extraMsg); + "OK[op={}, key={}, user={}, accessCount={}, interval={}ms] {}", + event.getOp(), event.getKeyName(), event.getUser(), + event.getAccessCount().get(), + (System.currentTimeMillis() - event.getStartTime()), + event.getExtraMsg()); } + private void op(OpStatus opStatus, final String op, final String user, + final String key, final String extraMsg) { + if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key) + && !Strings.isNullOrEmpty(op) + && AGGREGATE_OPS_WHITELIST.contains(op)) { + String cacheKey = createCacheKey(user, key, op); + if (opStatus == OpStatus.UNAUTHORIZED) { + cache.invalidate(cacheKey); + AUDIT_LOG.info("UNAUTHORIZED[op={}, key={}, user={}] {}", op, key, user, + extraMsg); + } else { + try { + AuditEvent event = cache.get(cacheKey, new Callable() { + @Override + public AuditEvent call() throws Exception { + return new AuditEvent(key, user, op, extraMsg); + } + }); + // Log first access (initialized as -1 so + // incrementAndGet() == 0 implies first access) + if (event.getAccessCount().incrementAndGet() == 0) { + event.getAccessCount().incrementAndGet(); + logEvent(event); + } + } catch (ExecutionException ex) { + throw new RuntimeException(ex); + } + } + } else { + List kvs = new LinkedList(); + if (!Strings.isNullOrEmpty(op)) { + kvs.add("op=" + op); + } + if (!Strings.isNullOrEmpty(key)) { + kvs.add("key=" + key); + } + if (!Strings.isNullOrEmpty(user)) { + kvs.add("user=" + user); + } + if (kvs.size() == 0) { + AUDIT_LOG.info("{} {}", opStatus.toString(), extraMsg); + } else { + String join = Joiner.on(", ").join(kvs); + AUDIT_LOG.info("{}[{}] {}", opStatus.toString(), join, extraMsg); + } + } + } + + public void ok(Principal user, String op, String key, + String extraMsg) { + op(OpStatus.OK, op, user.getName(), key, extraMsg); + } + + public void ok(Principal user, String op, String extraMsg) { + op(OpStatus.OK, op, user.getName(), null, extraMsg); + } + + public void unauthorized(Principal user, String op, String key) { + op(OpStatus.UNAUTHORIZED, op, user.getName(), key, ""); + } + + public void error(Principal user, String method, String url, + String extraMsg) { + op(OpStatus.ERROR, null, user.getName(), null, "Method:'" + method + + "' Exception:'" + extraMsg + "'"); + } + + public void unauthenticated(String remoteHost, String method, + String url, String extraMsg) { + op(OpStatus.UNAUTHENTICATED, null, null, null, "RemoteHost:" + + remoteHost + " Method:" + method + + " URL:" + url + " ErrorMsg:'" + extraMsg + "'"); + } + + private static String createCacheKey(String user, String key, String op) { + return user + "#" + key + "#" + op; + } + + public void shutdown() { + executor.shutdownNow(); + } } diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java index f1872a24e8b..db60b097ee7 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java @@ -115,8 +115,10 @@ public void doFilter(ServletRequest request, ServletResponse response, if (queryString != null) { requestURL.append("?").append(queryString); } - KMSAudit.unauthenticated(request.getRemoteHost(), method, - requestURL.toString(), kmsResponse.msg); + + KMSWebApp.getKMSAudit().unauthenticated( + request.getRemoteHost(), method, requestURL.toString(), + kmsResponse.msg); } } diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java index e2b8fc4c093..30d742e7fe8 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java @@ -43,12 +43,17 @@ public class KMSConfiguration { // TImeout for the Current Key cache public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX + "current.key.cache.timeout.ms"; - + // Delay for Audit logs that need aggregation + public static final String KMS_AUDIT_AGGREGATION_DELAY = CONFIG_PREFIX + + "aggregation.delay.ms"; + public static final boolean KEY_CACHE_ENABLE_DEFAULT = true; // 10 mins public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000; // 30 secs public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000; + // 10 secs + public static final long KMS_AUDIT_AGGREGATION_DELAY_DEFAULT = 10000; static Configuration getConfiguration(boolean loadHadoopDefaults, String ... resources) { diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java index 1c4c32ddb7f..bf24ed8a108 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java @@ -20,9 +20,11 @@ import org.apache.hadoop.classification.InterfaceAudience; import com.sun.jersey.api.container.ContainerException; + import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authorize.AuthorizationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,6 +32,7 @@ import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; + import java.io.IOException; import java.security.Principal; import java.util.LinkedHashMap; @@ -83,6 +86,10 @@ public Response toResponse(Exception exception) { status = Response.Status.FORBIDDEN; // we don't audit here because we did it already when checking access doAudit = false; + } else if (throwable instanceof AuthorizationException) { + status = Response.Status.UNAUTHORIZED; + // we don't audit here because we did it already when checking access + doAudit = false; } else if (throwable instanceof AccessControlException) { status = Response.Status.FORBIDDEN; } else if (exception instanceof IOException) { @@ -95,7 +102,8 @@ public Response toResponse(Exception exception) { status = Response.Status.INTERNAL_SERVER_ERROR; } if (doAudit) { - KMSAudit.error(KMSMDCFilter.getPrincipal(), KMSMDCFilter.getMethod(), + KMSWebApp.getKMSAudit().error(KMSMDCFilter.getPrincipal(), + KMSMDCFilter.getMethod(), KMSMDCFilter.getURL(), getOneLineMessage(exception)); } return createResponse(status, throwable); diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java index d794463ac32..571ab965351 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java @@ -76,6 +76,7 @@ public class KMSWebApp implements ServletContextListener { private static Meter decryptEEKCallsMeter; private static Meter generateEEKCallsMeter; private static Meter invalidCallsMeter; + private static KMSAudit kmsAudit; private static KeyProviderCryptoExtension keyProviderCryptoExtension; static { @@ -144,6 +145,11 @@ public void contextInitialized(ServletContextEvent sce) { unauthenticatedCallsMeter = metricRegistry.register( UNAUTHENTICATED_CALLS_METER, new Meter()); + kmsAudit = + new KMSAudit(kmsConf.getLong( + KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY, + KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY_DEFAULT)); + // this is required for the the JMXJsonServlet to work properly. // the JMXJsonServlet is behind the authentication filter, // thus the '*' ACL. @@ -199,6 +205,7 @@ public void contextInitialized(ServletContextEvent sce) { @Override public void contextDestroyed(ServletContextEvent sce) { + kmsAudit.shutdown(); acls.stopReloader(); jmxReporter.stop(); jmxReporter.close(); @@ -245,4 +252,8 @@ public static Meter getUnauthenticatedCallsMeter() { public static KeyProviderCryptoExtension getKeyProvider() { return keyProviderCryptoExtension; } + + public static KMSAudit getKMSAudit() { + return kmsAudit; + } } diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index 41a2cd968af..557cafa2f8a 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -104,6 +104,25 @@ Hadoop Key Management Server (KMS) - Documentation Sets ${project.version} +---+ +** KMS Aggregated Audit logs + +Audit logs are aggregated for API accesses to the GET_KEY_VERSION, +GET_CURRENT_KEY, DECRYPT_EEK, GENERATE_EEK operations. + +Entries are grouped by the (user,key,operation) combined key for a configurable +aggregation interval after which the number of accesses to the specified +end-point by the user for a given key is flushed to the audit log. + +The Aggregation interval is configured via the property : + ++---+ + + hadoop.kms.aggregation.delay.ms + 10000 + ++---+ + + ** Start/Stop the KMS To start/stop KMS use KMS's bin/kms.sh script. For example: diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java new file mode 100644 index 00000000000..b5d9a36d198 --- /dev/null +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.key.kms.server; + +import java.io.ByteArrayOutputStream; +import java.io.FilterOutputStream; +import java.io.OutputStream; +import java.io.PrintStream; +import java.security.Principal; + +import org.apache.log4j.LogManager; +import org.apache.log4j.PropertyConfigurator; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +public class TestKMSAudit { + + private PrintStream originalOut; + private ByteArrayOutputStream memOut; + private FilterOut filterOut; + private PrintStream capturedOut; + + private KMSAudit kmsAudit; + + private static class FilterOut extends FilterOutputStream { + public FilterOut(OutputStream out) { + super(out); + } + + public void setOutputStream(OutputStream out) { + this.out = out; + } + } + + @Before + public void setUp() { + originalOut = System.err; + memOut = new ByteArrayOutputStream(); + filterOut = new FilterOut(memOut); + capturedOut = new PrintStream(filterOut); + System.setErr(capturedOut); + PropertyConfigurator.configure(Thread.currentThread(). + getContextClassLoader() + .getResourceAsStream("log4j-kmsaudit.properties")); + this.kmsAudit = new KMSAudit(1000); + } + + @After + public void cleanUp() { + System.setErr(originalOut); + LogManager.resetConfiguration(); + kmsAudit.shutdown(); + } + + private String getAndResetLogOutput() { + capturedOut.flush(); + String logOutput = new String(memOut.toByteArray()); + memOut = new ByteArrayOutputStream(); + filterOut.setOutputStream(memOut); + return logOutput; + } + + @Test + public void testAggregation() throws Exception { + Principal luser = Mockito.mock(Principal.class); + Mockito.when(luser.getName()).thenReturn("luser"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DELETE_KEY, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.ROLL_NEW_VERSION, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + Thread.sleep(1500); + kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); + Thread.sleep(1500); + String out = getAndResetLogOutput(); + System.out.println(out); + Assert.assertTrue( + out.matches( + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" + // Not aggregated !! + + "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg" + + "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg" + // Aggregated + + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg")); + } + + @Test + public void testAggregationUnauth() throws Exception { + Principal luser = Mockito.mock(Principal.class); + Mockito.when(luser.getName()).thenReturn("luser"); + kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k2"); + Thread.sleep(1000); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k3"); + kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); + Thread.sleep(2000); + String out = getAndResetLogOutput(); + System.out.println(out); + Assert.assertTrue( + out.matches( + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] " + + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg" + + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg" + + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] " + + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg")); + } + +} diff --git a/hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties b/hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties new file mode 100644 index 00000000000..cca6941d14b --- /dev/null +++ b/hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# LOG Appender +log4j.appender.kms-audit=org.apache.log4j.ConsoleAppender +log4j.appender.kms-audit.Target=System.err +log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout +log4j.appender.kms-audit.layout.ConversionPattern=%m + +log4j.rootLogger=INFO, kms-audit \ No newline at end of file From 18360e71f1d993ad28be8a7ca8966bc40b8290b5 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 30 Jul 2014 18:37:41 +0000 Subject: [PATCH 085/354] HDFS-6768. Fix a few unit tests that use hard-coded port numbers. (Arpit Agarwal) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614732 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../org/apache/hadoop/TestGenericRefresh.java | 29 +++++++++----- .../apache/hadoop/TestRefreshCallQueue.java | 40 ++++++++++++++----- .../server/datanode/TestBlockRecovery.java | 1 - 4 files changed, 51 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1e33c194b54..61e48af5819 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -956,6 +956,9 @@ Release 2.5.0 - UNRELEASED HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config (brandonli) + HDFS-6768. Fix a few unit tests that use hard-coded port numbers. (Arpit + Agarwal) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java index 664a478f27a..3c73c28c2a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java @@ -47,7 +47,6 @@ public class TestGenericRefresh { private static MiniDFSCluster cluster; private static Configuration config; - private static final int NNPort = 54222; private static RefreshHandler firstHandler; private static RefreshHandler secondHandler; @@ -57,8 +56,8 @@ public static void setUpBeforeClass() throws Exception { config = new Configuration(); config.set("hadoop.security.authorization", "true"); - FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort); - cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build(); + FileSystem.setDefaultUri(config, "hdfs://localhost:0"); + cluster = new MiniDFSCluster.Builder(config).build(); cluster.waitActive(); } @@ -103,7 +102,8 @@ public void testInvalidCommand() throws Exception { @Test public void testInvalidIdentifier() throws Exception { DFSAdmin admin = new DFSAdmin(config); - String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"}; + String [] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "unregisteredIdentity"}; int exitCode = admin.run(args); assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode); } @@ -111,7 +111,8 @@ public void testInvalidIdentifier() throws Exception { @Test public void testValidIdentifier() throws Exception { DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"}; + String[] args = new String[]{"-refresh", + "localhost:" + cluster.getNameNodePort(), "firstHandler"}; int exitCode = admin.run(args); assertEquals("DFSAdmin should succeed", 0, exitCode); @@ -124,11 +125,13 @@ public void testValidIdentifier() throws Exception { @Test public void testVariableArgs() throws Exception { DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "secondHandler", "one"}; int exitCode = admin.run(args); assertEquals("DFSAdmin should return 2", 2, exitCode); - exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"}); + exitCode = admin.run(new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "secondHandler", "one", "two"}); assertEquals("DFSAdmin should now return 3", 3, exitCode); Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"}); @@ -141,7 +144,8 @@ public void testUnregistration() throws Exception { // And now this should fail DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "firstHandler"}; int exitCode = admin.run(args); assertEquals("DFSAdmin should return -1", -1, exitCode); } @@ -161,7 +165,8 @@ public void testMultipleRegistration() throws Exception { // this should trigger both DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "sharedId", "one"}; int exitCode = admin.run(args); assertEquals(-1, exitCode); // -1 because one of the responses is unregistered @@ -189,7 +194,8 @@ public void testMultipleReturnCodeMerging() throws Exception { // We refresh both DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "shared"}; int exitCode = admin.run(args); assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes @@ -215,7 +221,8 @@ public void testExceptionResultsInNormalError() throws Exception { RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler); DFSAdmin admin = new DFSAdmin(config); - String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"}; + String[] args = new String[]{"-refresh", "localhost:" + + cluster.getNameNodePort(), "exceptional"}; int exitCode = admin.run(args); assertEquals(-1, exitCode); // Exceptions result in a -1 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java index 9b0acbc8ca6..f66f9b6105f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java @@ -24,6 +24,8 @@ import static org.junit.Assert.fail; import java.io.IOException; +import java.net.BindException; +import java.util.Random; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; @@ -42,24 +44,42 @@ public class TestRefreshCallQueue { private FileSystem fs; static int mockQueueConstructions; static int mockQueuePuts; - private static final int NNPort = 54222; - private static String CALLQUEUE_CONFIG_KEY = "ipc." + NNPort + ".callqueue.impl"; + private String callQueueConfigKey = ""; + private final Random rand = new Random(); @Before public void setUp() throws Exception { // We want to count additional events, so we reset here mockQueueConstructions = 0; mockQueuePuts = 0; + int portRetries = 5; + int nnPort; - config = new Configuration(); - config.setClass(CALLQUEUE_CONFIG_KEY, - MockCallQueue.class, BlockingQueue.class); - config.set("hadoop.security.authorization", "true"); + for (; portRetries > 0; --portRetries) { + // Pick a random port in the range [30000,60000). + nnPort = 30000 + rand.nextInt(30000); + config = new Configuration(); + callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl"; + config.setClass(callQueueConfigKey, + MockCallQueue.class, BlockingQueue.class); + config.set("hadoop.security.authorization", "true"); - FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort); - fs = FileSystem.get(config); - cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build(); - cluster.waitActive(); + FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort); + fs = FileSystem.get(config); + + try { + cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build(); + cluster.waitActive(); + break; + } catch (BindException be) { + // Retry with a different port number. + } + } + + if (portRetries == 0) { + // Bail if we get very unlucky with our choice of ports. + fail("Failed to pick an ephemeral port for the NameNode RPC server."); + } } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index a3622a465ad..67805c08f63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -590,7 +590,6 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000"); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(MiniDFSNNTopology.simpleSingleNN(8020, 50070)) .numDataNodes(1).build(); try { cluster.waitClusterUp(); From 74059536928beafed48a6a55a3b0146256d51f90 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 30 Jul 2014 18:39:48 +0000 Subject: [PATCH 086/354] HDFS-6692. Add more HDFS encryption tests. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1614735 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../namenode/EncryptionFaultInjector.java | 22 +++ .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../hadoop/hdfs/TestEncryptionZones.java | 185 +++++++++++++++++- 4 files changed, 208 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 34a86e39db7..560eb5e9ad5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -70,6 +70,8 @@ fs-encryption (Unreleased) HDFS-6730. Create a .RAW extended attribute namespace. (clamb) + HDFS-6692. Add more HDFS encryption tests. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java new file mode 100644 index 00000000000..2e65a892046 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java @@ -0,0 +1,22 @@ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Used to inject certain faults for testing. + */ +public class EncryptionFaultInjector { + @VisibleForTesting + public static EncryptionFaultInjector instance = + new EncryptionFaultInjector(); + + @VisibleForTesting + public static EncryptionFaultInjector getInstance() { + return instance; + } + + @VisibleForTesting + public void startFileAfterGenerateKey() throws IOException {} +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 3ad238bffc1..e4c7509ea02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2498,7 +2498,7 @@ private HdfsFileStatus startFileInt(final String srcArg, // Generate EDEK if necessary while not holding the lock EncryptedKeyVersion edek = generateEncryptedDataEncryptionKey(ezKeyName); - + EncryptionFaultInjector.getInstance().startFileAfterGenerateKey(); // Try to create the file with the computed cipher suite and EDEK writeLock(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index e1fb878139b..c0551f2896e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -23,6 +23,12 @@ import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; @@ -42,6 +48,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector; import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -103,6 +110,7 @@ public void teardown() { if (cluster != null) { cluster.shutdown(); } + EncryptionFaultInjector.instance = new EncryptionFaultInjector(); } public void assertNumZones(final int numZones) throws IOException { @@ -158,7 +166,8 @@ public void testBasicOperations() throws Exception { int numZones = 0; /* Test failure of create EZ on a directory that doesn't exist. */ - final Path zone1 = new Path("/zone1"); + final Path zoneParent = new Path("/zones"); + final Path zone1 = new Path(zoneParent, "zone1"); try { dfsAdmin.createEncryptionZone(zone1, TEST_KEY); fail("expected /test doesn't exist"); @@ -189,6 +198,14 @@ public void testBasicOperations() throws Exception { assertExceptionContains("already in an encryption zone", e); } + /* create EZ on parent of an EZ should fail */ + try { + dfsAdmin.createEncryptionZone(zoneParent, TEST_KEY); + fail("EZ over an EZ"); + } catch (IOException e) { + assertExceptionContains("encryption zone for a non-empty directory", e); + } + /* create EZ on a folder with a folder fails */ final Path notEmpty = new Path("/notEmpty"); final Path notEmptyChild = new Path(notEmpty, "child"); @@ -449,6 +466,7 @@ public void testCreateEZWithNoProvider() throws Exception { final Configuration clusterConf = cluster.getConfiguration(0); clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ""); cluster.restartNameNode(true); + cluster.waitActive(); /* Test failure of create EZ on a directory that doesn't exist. */ final Path zone1 = new Path("/zone1"); /* Normal creation of an EZ */ @@ -462,6 +480,169 @@ public void testCreateEZWithNoProvider() throws Exception { clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" ); - cluster.restartNameNode(true); + // Try listing EZs as well + List zones = dfsAdmin.listEncryptionZones(); + assertEquals("Expected no zones", 0, zones.size()); + } + + private class MyInjector extends EncryptionFaultInjector { + int generateCount; + CountDownLatch ready; + CountDownLatch wait; + + public MyInjector() { + this.ready = new CountDownLatch(1); + this.wait = new CountDownLatch(1); + } + + @Override + public void startFileAfterGenerateKey() throws IOException { + ready.countDown(); + try { + wait.await(); + } catch (InterruptedException e) { + throw new IOException(e); + } + generateCount++; + } + } + + private class CreateFileTask implements Callable { + private FileSystemTestWrapper fsWrapper; + private Path name; + + CreateFileTask(FileSystemTestWrapper fsWrapper, Path name) { + this.fsWrapper = fsWrapper; + this.name = name; + } + + @Override + public Void call() throws Exception { + fsWrapper.createFile(name); + return null; + } + } + + private class InjectFaultTask implements Callable { + final Path zone1 = new Path("/zone1"); + final Path file = new Path(zone1, "file1"); + final ExecutorService executor = Executors.newSingleThreadExecutor(); + + MyInjector injector; + + @Override + public Void call() throws Exception { + // Set up the injector + injector = new MyInjector(); + EncryptionFaultInjector.instance = injector; + Future future = + executor.submit(new CreateFileTask(fsWrapper, file)); + injector.ready.await(); + // Do the fault + doFault(); + // Allow create to proceed + injector.wait.countDown(); + future.get(); + // Cleanup and postconditions + doCleanup(); + return null; + } + + public void doFault() throws Exception {} + + public void doCleanup() throws Exception {} + } + + /** + * Tests the retry logic in startFile. We release the lock while generating + * an EDEK, so tricky things can happen in the intervening time. + */ + @Test(timeout = 120000) + public void testStartFileRetry() throws Exception { + final Path zone1 = new Path("/zone1"); + final Path file = new Path(zone1, "file1"); + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + ExecutorService executor = Executors.newSingleThreadExecutor(); + + // Test when the parent directory becomes an EZ + executor.submit(new InjectFaultTask() { + @Override + public void doFault() throws Exception { + dfsAdmin.createEncryptionZone(zone1, TEST_KEY); + } + @Override + public void doCleanup() throws Exception { + assertEquals("Expected a startFile retry", 2, injector.generateCount); + fsWrapper.delete(file, false); + } + }).get(); + + // Test when the parent directory unbecomes an EZ + executor.submit(new InjectFaultTask() { + @Override + public void doFault() throws Exception { + fsWrapper.delete(zone1, true); + } + @Override + public void doCleanup() throws Exception { + assertEquals("Expected no startFile retries", 1, injector.generateCount); + fsWrapper.delete(file, false); + } + }).get(); + + // Test when the parent directory becomes a different EZ + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + final String otherKey = "otherKey"; + createKey(otherKey); + dfsAdmin.createEncryptionZone(zone1, TEST_KEY); + + executor.submit(new InjectFaultTask() { + @Override + public void doFault() throws Exception { + fsWrapper.delete(zone1, true); + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + dfsAdmin.createEncryptionZone(zone1, otherKey); + } + @Override + public void doCleanup() throws Exception { + assertEquals("Expected a startFile retry", 2, injector.generateCount); + fsWrapper.delete(zone1, true); + } + }).get(); + + // Test that the retry limit leads to an error + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + final String anotherKey = "anotherKey"; + createKey(anotherKey); + dfsAdmin.createEncryptionZone(zone1, anotherKey); + String keyToUse = otherKey; + + MyInjector injector = new MyInjector(); + EncryptionFaultInjector.instance = injector; + Future future = executor.submit(new CreateFileTask(fsWrapper, file)); + + // Flip-flop between two EZs to repeatedly fail + for (int i=0; i<10; i++) { + injector.ready.await(); + fsWrapper.delete(zone1, true); + fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); + dfsAdmin.createEncryptionZone(zone1, keyToUse); + if (keyToUse == otherKey) { + keyToUse = anotherKey; + } else { + keyToUse = otherKey; + } + injector.wait.countDown(); + injector = new MyInjector(); + EncryptionFaultInjector.instance = injector; + } + try { + future.get(); + fail("Expected exception from too many retries"); + } catch (ExecutionException e) { + assertExceptionContains( + "Too many retries because of encryption zone operations", + e.getCause()); + } } } From ab47b666d0b6580230afda951eec60c555c4c3d4 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Wed, 30 Jul 2014 20:00:24 +0000 Subject: [PATCH 087/354] HDFS-6785. Should not be able to create encryption zone using path to a non-directory file. (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1614755 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt | 3 +++ .../hdfs/server/namenode/EncryptionZoneManager.java | 5 +++++ .../java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 8 ++++++++ 3 files changed, 16 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 560eb5e9ad5..743873ea744 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -78,3 +78,6 @@ fs-encryption (Unreleased) HDFS-6733. Creating encryption zone results in NPE when KeyProvider is null. (clamb) + + HDFS-6785. Should not be able to create encryption zone using path + to a non-directory file. (clamb) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 7b1331d1496..a083ea3a87c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -213,6 +213,11 @@ XAttr createEncryptionZone(String src, String keyName) } final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false); + if (srcIIP != null && + srcIIP.getLastINode() != null && + !srcIIP.getLastINode().isDirectory()) { + throw new IOException("Attempt to create an encryption zone for a file."); + } EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP); if (ezi != null) { throw new IOException("Directory " + src + " is already in an " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index c0551f2896e..78f8d8ef3a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -227,6 +227,14 @@ public void testBasicOperations() throws Exception { assertExceptionContains("create an encryption zone", e); } + /* Test failure of create EZ on a file. */ + try { + dfsAdmin.createEncryptionZone(notEmptyChild, TEST_KEY); + fail("Created EZ on a file"); + } catch (IOException e) { + assertExceptionContains("create an encryption zone for a file.", e); + } + /* Test failure of creating an EZ passing a key that doesn't exist. */ final Path zone2 = new Path("/zone2"); fsWrapper.mkdir(zone2, FsPermission.getDirDefault(), false); From 4996bf8257a64b876e0f3ace5597c6778db2801a Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 30 Jul 2014 22:16:40 +0000 Subject: [PATCH 088/354] HADOOP-10910. Increase findbugs maxHeap size. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614779 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ hadoop-project-dist/pom.xml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 9805a01bd7b..08c16fe16c9 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -816,6 +816,8 @@ Release 2.5.0 - UNRELEASED HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka via Arpit Agarwal) + HADOOP-10910. Increase findbugs maxHeap size. (wang) + BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS HADOOP-10520. Extended attributes definition and FileSystem APIs for diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index dcb3875a12c..7b25508edbe 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -100,6 +100,8 @@ findbugs-maven-plugin ${basedir}/dev-support/findbugsExcludeFile.xml + true + 2048 From b8b8f3f5e7214d6fcfc30e1b94ff097e52868f4f Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Thu, 31 Jul 2014 06:02:46 +0000 Subject: [PATCH 089/354] HDFS-6441. Add ability to exclude/include specific datanodes while balancing. (Contributed by Benoy Antony and Yu Li) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614812 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/util/StringUtils.java | 14 + hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/server/balancer/Balancer.java | 151 +++++- .../hdfs/server/balancer/TestBalancer.java | 469 +++++++++++++++++- .../balancer/TestBalancerWithHANameNodes.java | 4 +- .../TestBalancerWithMultipleNameNodes.java | 4 +- .../balancer/TestBalancerWithNodeGroup.java | 4 +- 7 files changed, 609 insertions(+), 40 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index e7f983ac668..4e2783df88d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -27,6 +27,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Date; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; @@ -377,6 +378,19 @@ public static String[] getTrimmedStrings(String str){ return str.trim().split("\\s*,\\s*"); } + /** + * Trims all the strings in a Collection and returns a Set. + * @param strings + * @return + */ + public static Set getTrimmedStrings(Collection strings) { + Set trimmedStrings = new HashSet(); + for (String string: strings) { + trimmedStrings.add(string.trim()); + } + return trimmedStrings; + } + final public static String[] emptyStringArray = {}; final public static char COMMA = ','; final public static String COMMA_STR = ","; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 61e48af5819..880e209f97e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -335,6 +335,9 @@ Release 2.6.0 - UNRELEASED HDFS-6570. add api that enables checking if a user has certain permissions on a file. (Jitendra Pandey via cnauroth) + HDFS-6441. Add ability to exclude/include specific datanodes while + balancing. (Benoy Antony and Yu Li via Arpit Agarwal) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 5dbdd643cdf..1ddb3a41993 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -45,6 +45,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -83,6 +84,7 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.HostsFileReader; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; @@ -203,12 +205,20 @@ public class Balancer { + "\n\t[-policy ]\tthe balancing policy: " + BalancingPolicy.Node.INSTANCE.getName() + " or " + BalancingPolicy.Pool.INSTANCE.getName() - + "\n\t[-threshold ]\tPercentage of disk capacity"; + + "\n\t[-threshold ]\tPercentage of disk capacity" + + "\n\t[-exclude [-f | comma-sperated list of hosts]]" + + "\tExcludes the specified datanodes." + + "\n\t[-include [-f | comma-sperated list of hosts]]" + + "\tIncludes only the specified datanodes."; private final NameNodeConnector nnc; private final BalancingPolicy policy; private final SaslDataTransferClient saslClient; private final double threshold; + // set of data nodes to be excluded from balancing operations. + Set nodesToBeExcluded; + //Restrict balancing to the following nodes. + Set nodesToBeIncluded; // all data node lists private final Collection overUtilizedDatanodes @@ -869,6 +879,8 @@ private static void checkReplicationPolicyCompatibility(Configuration conf Balancer(NameNodeConnector theblockpool, Parameters p, Configuration conf) { this.threshold = p.threshold; this.policy = p.policy; + this.nodesToBeExcluded = p.nodesToBeExcluded; + this.nodesToBeIncluded = p.nodesToBeIncluded; this.nnc = theblockpool; cluster = NetworkTopology.getInstance(conf); @@ -905,8 +917,13 @@ private static void checkReplicationPolicyCompatibility(Configuration conf private long initNodes(DatanodeInfo[] datanodes) { // compute average utilization for (DatanodeInfo datanode : datanodes) { - if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) { - continue; // ignore decommissioning or decommissioned nodes + // ignore decommissioning or decommissioned nodes or + // ignore nodes in exclude list + // or nodes not in the include list (if include list is not empty) + if (datanode.isDecommissioned() || datanode.isDecommissionInProgress() || + Util.shouldBeExcluded(nodesToBeExcluded, datanode) || + !Util.shouldBeIncluded(nodesToBeIncluded, datanode)) { + continue; } policy.accumulateSpaces(datanode); } @@ -919,8 +936,16 @@ private long initNodes(DatanodeInfo[] datanodes) { */ long overLoadedBytes = 0L, underLoadedBytes = 0L; for (DatanodeInfo datanode : DFSUtil.shuffle(datanodes)) { - if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) { - continue; // ignore decommissioning or decommissioned nodes + // ignore decommissioning or decommissioned nodes or + // ignore nodes in exclude list + // or nodes not in the include list (if include list is not empty) + if (datanode.isDecommissioned() || datanode.isDecommissionInProgress() || + Util.shouldBeExcluded(nodesToBeExcluded, datanode) || + !Util.shouldBeIncluded(nodesToBeIncluded, datanode)) { + if (LOG.isTraceEnabled()) { + LOG.trace("Excluding datanode " + datanode); + } + continue; } cluster.add(datanode); BalancerDatanode datanodeS; @@ -1526,21 +1551,101 @@ private static String time2Str(long elapsedTime) { } static class Parameters { - static final Parameters DEFALUT = new Parameters( - BalancingPolicy.Node.INSTANCE, 10.0); + static final Parameters DEFAULT = new Parameters( + BalancingPolicy.Node.INSTANCE, 10.0, + Collections. emptySet(), Collections. emptySet()); final BalancingPolicy policy; final double threshold; + // exclude the nodes in this set from balancing operations + Set nodesToBeExcluded; + //include only these nodes in balancing operations + Set nodesToBeIncluded; - Parameters(BalancingPolicy policy, double threshold) { + Parameters(BalancingPolicy policy, double threshold, + Set nodesToBeExcluded, Set nodesToBeIncluded) { this.policy = policy; this.threshold = threshold; + this.nodesToBeExcluded = nodesToBeExcluded; + this.nodesToBeIncluded = nodesToBeIncluded; } @Override public String toString() { return Balancer.class.getSimpleName() + "." + getClass().getSimpleName() - + "[" + policy + ", threshold=" + threshold + "]"; + + "[" + policy + ", threshold=" + threshold + + ", number of nodes to be excluded = "+ nodesToBeExcluded.size() + + ", number of nodes to be included = "+ nodesToBeIncluded.size() +"]"; + } + } + + static class Util { + + /** + * @param datanode + * @return returns true if data node is part of the excludedNodes. + */ + static boolean shouldBeExcluded(Set excludedNodes, DatanodeInfo datanode) { + return isIn(excludedNodes, datanode); + } + + /** + * @param datanode + * @return returns true if includedNodes is empty or data node is part of the includedNodes. + */ + static boolean shouldBeIncluded(Set includedNodes, DatanodeInfo datanode) { + return (includedNodes.isEmpty() || + isIn(includedNodes, datanode)); + } + /** + * Match is checked using host name , ip address with and without port number. + * @param datanodeSet + * @param datanode + * @return true if the datanode's transfer address matches the set of nodes. + */ + private static boolean isIn(Set datanodeSet, DatanodeInfo datanode) { + return isIn(datanodeSet, datanode.getPeerHostName(), datanode.getXferPort()) || + isIn(datanodeSet, datanode.getIpAddr(), datanode.getXferPort()) || + isIn(datanodeSet, datanode.getHostName(), datanode.getXferPort()); + } + + /** + * returns true if nodes contains host or host:port + * @param nodes + * @param host + * @param port + * @return + */ + private static boolean isIn(Set nodes, String host, int port) { + if (host == null) { + return false; + } + return (nodes.contains(host) || nodes.contains(host +":"+ port)); + } + + /** + * parse a comma separated string to obtain set of host names + * @param string + * @return + */ + static Set parseHostList(String string) { + String[] addrs = StringUtils.getTrimmedStrings(string); + return new HashSet(Arrays.asList(addrs)); + } + + /** + * read set of host names from a file + * @param fileName + * @return + */ + static Set getHostListFromFile(String fileName) { + Set nodes = new HashSet (); + try { + HostsFileReader.readFileToSet("nodes", fileName, nodes); + return StringUtils.getTrimmedStrings(nodes); + } catch (IOException e) { + throw new IllegalArgumentException("Unable to open file: " + fileName); + } } } @@ -1578,8 +1683,10 @@ public int run(String[] args) { /** parse command line arguments */ static Parameters parse(String[] args) { - BalancingPolicy policy = Parameters.DEFALUT.policy; - double threshold = Parameters.DEFALUT.threshold; + BalancingPolicy policy = Parameters.DEFAULT.policy; + double threshold = Parameters.DEFAULT.threshold; + Set nodesTobeExcluded = Parameters.DEFAULT.nodesToBeExcluded; + Set nodesTobeIncluded = Parameters.DEFAULT.nodesToBeIncluded; if (args != null) { try { @@ -1608,18 +1715,38 @@ static Parameters parse(String[] args) { System.err.println("Illegal policy name: " + args[i]); throw e; } + } else if ("-exclude".equalsIgnoreCase(args[i])) { + i++; + if ("-f".equalsIgnoreCase(args[i])) { + nodesTobeExcluded = Util.getHostListFromFile(args[++i]); + } else { + nodesTobeExcluded = Util.parseHostList(args[i]); + } + } else if ("-include".equalsIgnoreCase(args[i])) { + i++; + if ("-f".equalsIgnoreCase(args[i])) { + nodesTobeIncluded = Util.getHostListFromFile(args[++i]); + } else { + nodesTobeIncluded = Util.parseHostList(args[i]); + } } else { throw new IllegalArgumentException("args = " + Arrays.toString(args)); } } + if (!nodesTobeExcluded.isEmpty() && !nodesTobeIncluded.isEmpty()) { + System.err.println( + "-exclude and -include options cannot be specified together."); + throw new IllegalArgumentException( + "-exclude and -include options cannot be specified together."); + } } catch(RuntimeException e) { printUsage(System.err); throw e; } } - return new Parameters(policy, threshold); + return new Parameters(policy, threshold, nodesTobeExcluded, nodesTobeIncluded); } private static void printUsage(PrintStream out) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index fe774aaac9f..e9c86dfe3f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -18,17 +18,23 @@ package org.apache.hadoop.hdfs.server.balancer; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.File; import java.io.IOException; +import java.io.PrintWriter; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Random; +import java.util.Set; import java.util.concurrent.TimeoutException; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -48,6 +54,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli; +import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters; +import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; @@ -255,6 +263,18 @@ static void waitForHeartBeat(long expectedUsedSpace, } } } + + /** + * Wait until balanced: each datanode gives utilization within + * BALANCE_ALLOWED_VARIANCE of average + * @throws IOException + * @throws TimeoutException + */ + static void waitForBalancer(long totalUsedSpace, long totalCapacity, + ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p) + throws IOException, TimeoutException { + waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, 0); + } /** * Wait until balanced: each datanode gives utilization within @@ -263,11 +283,17 @@ static void waitForHeartBeat(long expectedUsedSpace, * @throws TimeoutException */ static void waitForBalancer(long totalUsedSpace, long totalCapacity, - ClientProtocol client, MiniDFSCluster cluster) - throws IOException, TimeoutException { + ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p, + int expectedExcludedNodes) throws IOException, TimeoutException { long timeout = TIMEOUT; long failtime = (timeout <= 0L) ? Long.MAX_VALUE : Time.now() + timeout; + if (!p.nodesToBeIncluded.isEmpty()) { + totalCapacity = p.nodesToBeIncluded.size() * CAPACITY; + } + if (!p.nodesToBeExcluded.isEmpty()) { + totalCapacity -= p.nodesToBeExcluded.size() * CAPACITY; + } final double avgUtilization = ((double)totalUsedSpace) / totalCapacity; boolean balanced; do { @@ -275,9 +301,20 @@ static void waitForBalancer(long totalUsedSpace, long totalCapacity, client.getDatanodeReport(DatanodeReportType.ALL); assertEquals(datanodeReport.length, cluster.getDataNodes().size()); balanced = true; + int actualExcludedNodeCount = 0; for (DatanodeInfo datanode : datanodeReport) { double nodeUtilization = ((double)datanode.getDfsUsed()) / datanode.getCapacity(); + if (Balancer.Util.shouldBeExcluded(p.nodesToBeExcluded, datanode)) { + assertTrue(nodeUtilization == 0); + actualExcludedNodeCount++; + continue; + } + if (!Balancer.Util.shouldBeIncluded(p.nodesToBeIncluded, datanode)) { + assertTrue(nodeUtilization == 0); + actualExcludedNodeCount++; + continue; + } if (Math.abs(avgUtilization - nodeUtilization) > BALANCE_ALLOWED_VARIANCE) { balanced = false; if (Time.now() > failtime) { @@ -294,6 +331,7 @@ static void waitForBalancer(long totalUsedSpace, long totalCapacity, break; } } + assertEquals(expectedExcludedNodes,actualExcludedNodeCount); } while (!balanced); } @@ -307,22 +345,118 @@ String long2String(long[] array) { } return b.append("]").toString(); } - /** This test start a cluster with specified number of nodes, + /** + * Class which contains information about the + * new nodes to be added to the cluster for balancing. + */ + static abstract class NewNodeInfo { + + Set nodesToBeExcluded = new HashSet(); + Set nodesToBeIncluded = new HashSet(); + + abstract String[] getNames(); + abstract int getNumberofNewNodes(); + abstract int getNumberofIncludeNodes(); + abstract int getNumberofExcludeNodes(); + + public Set getNodesToBeIncluded() { + return nodesToBeIncluded; + } + public Set getNodesToBeExcluded() { + return nodesToBeExcluded; + } + } + + /** + * The host names of new nodes are specified + */ + static class HostNameBasedNodes extends NewNodeInfo { + String[] hostnames; + + public HostNameBasedNodes(String[] hostnames, + Set nodesToBeExcluded, Set nodesToBeIncluded) { + this.hostnames = hostnames; + this.nodesToBeExcluded = nodesToBeExcluded; + this.nodesToBeIncluded = nodesToBeIncluded; + } + + @Override + String[] getNames() { + return hostnames; + } + @Override + int getNumberofNewNodes() { + return hostnames.length; + } + @Override + int getNumberofIncludeNodes() { + return nodesToBeIncluded.size(); + } + @Override + int getNumberofExcludeNodes() { + return nodesToBeExcluded.size(); + } + } + + /** + * The number of data nodes to be started are specified. + * The data nodes will have same host name, but different port numbers. + * + */ + static class PortNumberBasedNodes extends NewNodeInfo { + int newNodes; + int excludeNodes; + int includeNodes; + + public PortNumberBasedNodes(int newNodes, int excludeNodes, int includeNodes) { + this.newNodes = newNodes; + this.excludeNodes = excludeNodes; + this.includeNodes = includeNodes; + } + + @Override + String[] getNames() { + return null; + } + @Override + int getNumberofNewNodes() { + return newNodes; + } + @Override + int getNumberofIncludeNodes() { + return includeNodes; + } + @Override + int getNumberofExcludeNodes() { + return excludeNodes; + } + } + + private void doTest(Configuration conf, long[] capacities, String[] racks, + long newCapacity, String newRack, boolean useTool) throws Exception { + doTest(conf, capacities, racks, newCapacity, newRack, null, useTool, false); + } + + /** This test start a cluster with specified number of nodes, * and fills it to be 30% full (with a single file replicated identically * to all datanodes); * It then adds one new empty node and starts balancing. - * + * * @param conf - configuration * @param capacities - array of capacities of original nodes in cluster * @param racks - array of racks for original nodes in cluster * @param newCapacity - new node's capacity * @param newRack - new node's rack + * @param nodes - information about new nodes to be started. * @param useTool - if true run test via Cli with command-line argument * parsing, etc. Otherwise invoke balancer API directly. + * @param useFile - if true, the hosts to included or excluded will be stored in a + * file and then later read from the file. * @throws Exception */ - private void doTest(Configuration conf, long[] capacities, String[] racks, - long newCapacity, String newRack, boolean useTool) throws Exception { + private void doTest(Configuration conf, long[] capacities, + String[] racks, long newCapacity, String newRack, NewNodeInfo nodes, + boolean useTool, boolean useFile) throws Exception { LOG.info("capacities = " + long2String(capacities)); LOG.info("racks = " + Arrays.asList(racks)); LOG.info("newCapacity= " + newCapacity); @@ -346,17 +480,75 @@ private void doTest(Configuration conf, long[] capacities, String[] racks, long totalUsedSpace = totalCapacity*3/10; createFile(cluster, filePath, totalUsedSpace / numOfDatanodes, (short) numOfDatanodes, 0); - // start up an empty node with the same capacity and on the same rack - cluster.startDataNodes(conf, 1, true, null, - new String[]{newRack}, new long[]{newCapacity}); - totalCapacity += newCapacity; + if (nodes == null) { // there is no specification of new nodes. + // start up an empty node with the same capacity and on the same rack + cluster.startDataNodes(conf, 1, true, null, + new String[]{newRack}, null,new long[]{newCapacity}); + totalCapacity += newCapacity; + } else { + //if running a test with "include list", include original nodes as well + if (nodes.getNumberofIncludeNodes()>0) { + for (DataNode dn: cluster.getDataNodes()) + nodes.getNodesToBeIncluded().add(dn.getDatanodeId().getHostName()); + } + String[] newRacks = new String[nodes.getNumberofNewNodes()]; + long[] newCapacities = new long[nodes.getNumberofNewNodes()]; + for (int i=0; i < nodes.getNumberofNewNodes(); i++) { + newRacks[i] = newRack; + newCapacities[i] = newCapacity; + } + // if host names are specified for the new nodes to be created. + if (nodes.getNames() != null) { + cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null, + newRacks, nodes.getNames(), newCapacities); + totalCapacity += newCapacity*nodes.getNumberofNewNodes(); + } else { // host names are not specified + cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null, + newRacks, null, newCapacities); + totalCapacity += newCapacity*nodes.getNumberofNewNodes(); + //populate the include nodes + if (nodes.getNumberofIncludeNodes() > 0) { + int totalNodes = cluster.getDataNodes().size(); + for (int i=0; i < nodes.getNumberofIncludeNodes(); i++) { + nodes.getNodesToBeIncluded().add (cluster.getDataNodes().get( + totalNodes-1-i).getDatanodeId().getXferAddr()); + } + } + //polulate the exclude nodes + if (nodes.getNumberofExcludeNodes() > 0) { + int totalNodes = cluster.getDataNodes().size(); + for (int i=0; i < nodes.getNumberofExcludeNodes(); i++) { + nodes.getNodesToBeExcluded().add (cluster.getDataNodes().get( + totalNodes-1-i).getDatanodeId().getXferAddr()); + } + } + } + } + // run balancer and validate results + Balancer.Parameters p = Balancer.Parameters.DEFAULT; + if (nodes != null) { + p = new Balancer.Parameters( + Balancer.Parameters.DEFAULT.policy, + Balancer.Parameters.DEFAULT.threshold, + nodes.getNodesToBeExcluded(), nodes.getNodesToBeIncluded()); + } + + int expectedExcludedNodes = 0; + if (nodes != null) { + if (!nodes.getNodesToBeExcluded().isEmpty()) { + expectedExcludedNodes = nodes.getNodesToBeExcluded().size(); + } else if (!nodes.getNodesToBeIncluded().isEmpty()) { + expectedExcludedNodes = + cluster.getDataNodes().size() - nodes.getNodesToBeIncluded().size(); + } + } // run balancer and validate results if (useTool) { - runBalancerCli(conf, totalUsedSpace, totalCapacity); + runBalancerCli(conf, totalUsedSpace, totalCapacity, p, useFile, expectedExcludedNodes); } else { - runBalancer(conf, totalUsedSpace, totalCapacity); + runBalancer(conf, totalUsedSpace, totalCapacity, p, expectedExcludedNodes); } } finally { cluster.shutdown(); @@ -365,11 +557,17 @@ private void doTest(Configuration conf, long[] capacities, String[] racks, private void runBalancer(Configuration conf, long totalUsedSpace, long totalCapacity) throws Exception { + runBalancer(conf, totalUsedSpace, totalCapacity, Balancer.Parameters.DEFAULT, 0); + } + + private void runBalancer(Configuration conf, + long totalUsedSpace, long totalCapacity, Balancer.Parameters p, + int excludedNodes) throws Exception { waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + final int r = Balancer.run(namenodes, p, conf); if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) { assertEquals(Balancer.ReturnStatus.NO_MOVE_PROGRESS.code, r); @@ -379,22 +577,66 @@ private void runBalancer(Configuration conf, } waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); LOG.info("Rebalancing with default ctor."); - waitForBalancer(totalUsedSpace, totalCapacity, client, cluster); + waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes); } - - private void runBalancerCli(Configuration conf, - long totalUsedSpace, long totalCapacity) throws Exception { - waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - final String[] args = { "-policy", "datanode" }; + private void runBalancerCli(Configuration conf, + long totalUsedSpace, long totalCapacity, + Balancer.Parameters p, boolean useFile, int expectedExcludedNodes) throws Exception { + waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); + List args = new ArrayList(); + args.add("-policy"); + args.add("datanode"); + + File excludeHostsFile = null; + if (!p.nodesToBeExcluded.isEmpty()) { + args.add("-exclude"); + if (useFile) { + excludeHostsFile = new File ("exclude-hosts-file"); + PrintWriter pw = new PrintWriter(excludeHostsFile); + for (String host: p.nodesToBeExcluded) { + pw.write( host + "\n"); + } + pw.close(); + args.add("-f"); + args.add("exclude-hosts-file"); + } else { + args.add(StringUtils.join(p.nodesToBeExcluded, ',')); + } + } + + File includeHostsFile = null; + if (!p.nodesToBeIncluded.isEmpty()) { + args.add("-include"); + if (useFile) { + includeHostsFile = new File ("include-hosts-file"); + PrintWriter pw = new PrintWriter(includeHostsFile); + for (String host: p.nodesToBeIncluded){ + pw.write( host + "\n"); + } + pw.close(); + args.add("-f"); + args.add("include-hosts-file"); + } else { + args.add(StringUtils.join(p.nodesToBeIncluded, ',')); + } + } + final Tool tool = new Cli(); tool.setConf(conf); - final int r = tool.run(args); // start rebalancing + final int r = tool.run(args.toArray(new String[0])); // start rebalancing assertEquals("Tools should exit 0 on success", 0, r); waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); LOG.info("Rebalancing with default ctor."); - waitForBalancer(totalUsedSpace, totalCapacity, client, cluster); + waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, expectedExcludedNodes); + + if (excludeHostsFile != null && excludeHostsFile.exists()) { + excludeHostsFile.delete(); + } + if (includeHostsFile != null && includeHostsFile.exists()) { + includeHostsFile.delete(); + } } /** one-node cluster test*/ @@ -440,7 +682,7 @@ public void testBalancerCliParseWithThresholdOutOfBoundaries() { } } - /** Test a cluster with even distribution, + /** Test a cluster with even distribution, * then a new empty node is added to the cluster*/ @Test(timeout=100000) public void testBalancer0() throws Exception { @@ -554,7 +796,13 @@ public void testBalancerCliParseWithWrongParams() { } catch (IllegalArgumentException e) { } + parameters = new String[] {"-include", "testnode1", "-exclude", "testnode2"}; + try { + Balancer.Cli.parse(parameters); + fail("IllegalArgumentException is expected when both -exclude and -include are specified"); + } catch (IllegalArgumentException e) { + } } /** @@ -569,6 +817,183 @@ public void testExitZeroOnSuccess() throws Exception { oneNodeTest(conf, true); } + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerWithExcludeList() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set excludeHosts = new HashSet(); + excludeHosts.add( "datanodeY"); + excludeHosts.add( "datanodeZ"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), false, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerWithExcludeListWithPorts() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), false, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerCliWithExcludeList() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set excludeHosts = new HashSet(); + excludeHosts.add( "datanodeY"); + excludeHosts.add( "datanodeZ"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, excludeHosts, + Parameters.DEFAULT.nodesToBeIncluded), true, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerCliWithExcludeListWithPorts() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), true, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list in a file + */ + @Test(timeout=100000) + public void testBalancerCliWithExcludeListInAFile() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set excludeHosts = new HashSet(); + excludeHosts.add( "datanodeY"); + excludeHosts.add( "datanodeZ"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), true, true); + } + + /** + * Test a cluster with even distribution,G + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the exclude list + */ + @Test(timeout=100000) + public void testBalancerCliWithExcludeListWithPortsInAFile() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), true, true); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerWithIncludeList() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set includeHosts = new HashSet(); + includeHosts.add( "datanodeY"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + Parameters.DEFAULT.nodesToBeExcluded, includeHosts), false, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerWithIncludeListWithPorts() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), false, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerCliWithIncludeList() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set includeHosts = new HashSet(); + includeHosts.add( "datanodeY"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + Parameters.DEFAULT.nodesToBeExcluded, includeHosts), true, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerCliWithIncludeListWithPorts() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, false); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerCliWithIncludeListInAFile() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + Set includeHosts = new HashSet(); + includeHosts.add( "datanodeY"); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2, + new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, + Parameters.DEFAULT.nodesToBeExcluded, includeHosts), true, true); + } + + /** + * Test a cluster with even distribution, + * then three nodes are added to the cluster, + * runs balancer with two of the nodes in the include list + */ + @Test(timeout=100000) + public void testBalancerCliWithIncludeListWithPortsInAFile() throws Exception { + final Configuration conf = new HdfsConfiguration(); + initConf(conf); + doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, + CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, true); + } + /** * @param args */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index 1a309910eb9..9652f8636a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -97,10 +97,10 @@ public void testBalancerWithHANameNodes() throws Exception { Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); assertEquals(1, namenodes.size()); assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, - cluster); + cluster, Balancer.Parameters.DEFAULT); } finally { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index f5848041bcf..1a7ddd331b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -159,7 +159,7 @@ static void runBalancer(Suite s, // start rebalancing final Collection namenodes = DFSUtil.getNsServiceRpcUris(s.conf); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf); Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); LOG.info("BALANCER 2"); @@ -195,7 +195,7 @@ static void runBalancer(Suite s, balanced = true; for(int d = 0; d < used.length; d++) { final double p = used[d]*100.0/cap[d]; - balanced = p <= avg + Balancer.Parameters.DEFALUT.threshold; + balanced = p <= avg + Balancer.Parameters.DEFAULT.threshold; if (!balanced) { if (i % 100 == 0) { LOG.warn("datanodes " + d + " is not yet balanced: " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index 667204c0c9b..153ced3a243 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -175,7 +175,7 @@ private void runBalancer(Configuration conf, // start rebalancing Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); waitForHeartBeat(totalUsedSpace, totalCapacity); @@ -189,7 +189,7 @@ private void runBalancerCanFinish(Configuration conf, // start rebalancing Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); - final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); + final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); Assert.assertTrue(r == Balancer.ReturnStatus.SUCCESS.code || (r == Balancer.ReturnStatus.NO_MOVE_PROGRESS.code)); waitForHeartBeat(totalUsedSpace, totalCapacity); From 1d6e178144e9e3915ceea92d8c5de8b14cd02714 Mon Sep 17 00:00:00 2001 From: Zhijie Shen Date: Thu, 31 Jul 2014 09:27:43 +0000 Subject: [PATCH 090/354] YARN-2347. Consolidated RMStateVersion and NMDBSchemaVersion into Version in yarn-server-common. Contributed by Junping Du. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614838 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/mapred/ShuffleHandler.java | 28 +++---- .../hadoop/mapred/TestShuffleHandler.java | 8 +- hadoop-yarn-project/CHANGES.txt | 3 + ...erver_resourcemanager_service_protos.proto | 5 -- .../hadoop/yarn/server/records/Version.java} | 24 +++--- .../records/impl/pb/VersionPBImpl.java} | 29 +++---- .../proto/yarn_server_common_protos.proto | 8 +- .../recovery/NMLeveldbStateStoreService.java | 28 +++---- .../impl/pb/NMDBSchemaVersionPBImpl.java | 81 ------------------- .../yarn_server_nodemanager_recovery.proto | 5 -- .../TestNMLeveldbStateStoreService.java | 12 +-- .../recovery/FileSystemRMStateStore.java | 18 ++--- .../recovery/MemoryRMStateStore.java | 6 +- .../recovery/NullRMStateStore.java | 6 +- .../recovery/RMStateStore.java | 10 +-- .../recovery/ZKRMStateStore.java | 18 ++--- .../recovery/records/RMStateVersion.java | 80 ------------------ .../recovery/RMStateStoreTestBase.java | 16 ++-- .../recovery/TestFSRMStateStore.java | 12 +-- .../recovery/TestZKRMStateStore.java | 12 +-- 20 files changed, 126 insertions(+), 283 deletions(-) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/{hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/NMDBSchemaVersion.java => hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java} (74%) rename hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/{hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java => hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java} (62%) delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/impl/pb/NMDBSchemaVersionPBImpl.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java index 1d781be1451..33220c6cbdb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java @@ -82,13 +82,13 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext; import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext; import org.apache.hadoop.yarn.server.api.AuxiliaryService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.impl.pb.NMDBSchemaVersionPBImpl; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.utils.LeveldbIterator; import org.apache.hadoop.yarn.util.ConverterUtils; import org.fusesource.leveldbjni.JniDBFactory; @@ -151,8 +151,8 @@ public class ShuffleHandler extends AuxiliaryService { private static final String STATE_DB_NAME = "mapreduce_shuffle_state"; private static final String STATE_DB_SCHEMA_VERSION_KEY = "shuffle-schema-version"; - protected static final NMDBSchemaVersion CURRENT_VERSION_INFO = - NMDBSchemaVersion.newInstance(1, 0); + protected static final Version CURRENT_VERSION_INFO = + Version.newInstance(1, 0); private int port; private ChannelFactory selector; @@ -491,21 +491,21 @@ private void startStore(Path recoveryRoot) throws IOException { } @VisibleForTesting - NMDBSchemaVersion loadVersion() throws IOException { + Version loadVersion() throws IOException { byte[] data = stateDb.get(bytes(STATE_DB_SCHEMA_VERSION_KEY)); // if version is not stored previously, treat it as 1.0. if (data == null || data.length == 0) { - return NMDBSchemaVersion.newInstance(1, 0); + return Version.newInstance(1, 0); } - NMDBSchemaVersion version = - new NMDBSchemaVersionPBImpl(NMDBSchemaVersionProto.parseFrom(data)); + Version version = + new VersionPBImpl(VersionProto.parseFrom(data)); return version; } - private void storeSchemaVersion(NMDBSchemaVersion version) throws IOException { + private void storeSchemaVersion(Version version) throws IOException { String key = STATE_DB_SCHEMA_VERSION_KEY; byte[] data = - ((NMDBSchemaVersionPBImpl) version).getProto().toByteArray(); + ((VersionPBImpl) version).getProto().toByteArray(); try { stateDb.put(bytes(key), data); } catch (DBException e) { @@ -519,11 +519,11 @@ private void storeVersion() throws IOException { // Only used for test @VisibleForTesting - void storeVersion(NMDBSchemaVersion version) throws IOException { + void storeVersion(Version version) throws IOException { storeSchemaVersion(version); } - protected NMDBSchemaVersion getCurrentVersion() { + protected Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -538,7 +538,7 @@ protected NMDBSchemaVersion getCurrentVersion() { * upgrade shuffle info or remove incompatible old state. */ private void checkVersion() throws IOException { - NMDBSchemaVersion loadedVersion = loadVersion(); + Version loadedVersion = loadVersion(); LOG.info("Loaded state DB schema version info " + loadedVersion); if (loadedVersion.equals(getCurrentVersion())) { return; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java index 0974cc6ab1f..70536536617 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java @@ -75,7 +75,7 @@ import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext; import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; +import org.apache.hadoop.yarn.server.records.Version; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelFuture; import org.jboss.netty.channel.ChannelHandlerContext; @@ -764,11 +764,11 @@ public void testRecoveryFromOtherVersions() throws IOException { // verify we are still authorized to shuffle to the old application rc = getShuffleResponseCode(shuffle, jt); Assert.assertEquals(HttpURLConnection.HTTP_OK, rc); - NMDBSchemaVersion version = NMDBSchemaVersion.newInstance(1, 0); + Version version = Version.newInstance(1, 0); Assert.assertEquals(version, shuffle.getCurrentVersion()); // emulate shuffle handler restart with compatible version - NMDBSchemaVersion version11 = NMDBSchemaVersion.newInstance(1, 1); + Version version11 = Version.newInstance(1, 1); // update version info before close shuffle shuffle.storeVersion(version11); Assert.assertEquals(version11, shuffle.loadVersion()); @@ -785,7 +785,7 @@ public void testRecoveryFromOtherVersions() throws IOException { Assert.assertEquals(HttpURLConnection.HTTP_OK, rc); // emulate shuffle handler restart with incompatible version - NMDBSchemaVersion version21 = NMDBSchemaVersion.newInstance(2, 1); + Version version21 = Version.newInstance(2, 1); shuffle.storeVersion(version21); Assert.assertEquals(version21, shuffle.loadVersion()); shuffle.close(); diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ad368cc95e8..50ae2aff6ec 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -74,6 +74,9 @@ Release 2.6.0 - UNRELEASED YARN-2328. FairScheduler: Verify update and continuous scheduling threads are stopped when the scheduler is stopped. (kasha) + YARN-2347. Consolidated RMStateVersion and NMDBSchemaVersion into Version in + yarn-server-common. (Junping Du via zjshen) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto index 2eb61487504..08c937f68d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto @@ -130,11 +130,6 @@ message ApplicationAttemptStateDataProto { optional int32 am_container_exit_status = 9 [default = -1000]; } -message RMStateVersionProto { - optional int32 major_version = 1; - optional int32 minor_version = 2; -} - message EpochProto { optional int64 epoch = 1; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/NMDBSchemaVersion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java similarity index 74% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/NMDBSchemaVersion.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java index 1ee59ea4d83..66042ea8ad2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/NMDBSchemaVersion.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java @@ -15,21 +15,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.yarn.server.nodemanager.recovery.records; -import org.apache.hadoop.classification.InterfaceAudience.Private; +package org.apache.hadoop.yarn.server.records; + +import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; /** - * The version information of DB Schema for NM. + * The version information for state get stored in YARN components, + * i.e. RMState, NMState, etc., which include: majorVersion and + * minorVersion. + * The major version update means incompatible changes happen while + * minor version update indicates compatible changes. */ -@Private +@LimitedPrivate({"YARN", "MapReduce"}) @Unstable -public abstract class NMDBSchemaVersion { +public abstract class Version { - public static NMDBSchemaVersion newInstance(int majorVersion, int minorVersion) { - NMDBSchemaVersion version = Records.newRecord(NMDBSchemaVersion.class); + public static Version newInstance(int majorVersion, int minorVersion) { + Version version = Records.newRecord(Version.class); version.setMajorVersion(majorVersion); version.setMinorVersion(minorVersion); return version; @@ -47,7 +52,7 @@ public String toString() { return getMajorVersion() + "." + getMinorVersion(); } - public boolean isCompatibleTo(NMDBSchemaVersion version) { + public boolean isCompatibleTo(Version version) { return getMajorVersion() == version.getMajorVersion(); } @@ -68,7 +73,7 @@ public boolean equals(Object obj) { return false; if (getClass() != obj.getClass()) return false; - NMDBSchemaVersion other = (NMDBSchemaVersion) obj; + Version other = (Version) obj; if (this.getMajorVersion() == other.getMajorVersion() && this.getMinorVersion() == other.getMinorVersion()) { return true; @@ -76,5 +81,4 @@ public boolean equals(Object obj) { return false; } } - } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java similarity index 62% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java index f960413ce64..a99f22af5ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java @@ -16,28 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb; +package org.apache.hadoop.yarn.server.records.impl.pb; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProtoOrBuilder; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProtoOrBuilder; -public class RMStateVersionPBImpl extends RMStateVersion { +import org.apache.hadoop.yarn.server.records.Version; - RMStateVersionProto proto = RMStateVersionProto.getDefaultInstance(); - RMStateVersionProto.Builder builder = null; +public class VersionPBImpl extends Version { + + VersionProto proto = VersionProto.getDefaultInstance(); + VersionProto.Builder builder = null; boolean viaProto = false; - public RMStateVersionPBImpl() { - builder = RMStateVersionProto.newBuilder(); + public VersionPBImpl() { + builder = VersionProto.newBuilder(); } - public RMStateVersionPBImpl(RMStateVersionProto proto) { + public VersionPBImpl(VersionProto proto) { this.proto = proto; viaProto = true; } - public RMStateVersionProto getProto() { + public VersionProto getProto() { proto = viaProto ? proto : builder.build(); viaProto = true; return proto; @@ -45,14 +46,14 @@ public RMStateVersionProto getProto() { private void maybeInitBuilder() { if (viaProto || builder == null) { - builder = RMStateVersionProto.newBuilder(proto); + builder = VersionProto.newBuilder(proto); } viaProto = false; } @Override public int getMajorVersion() { - RMStateVersionProtoOrBuilder p = viaProto ? proto : builder; + VersionProtoOrBuilder p = viaProto ? proto : builder; return p.getMajorVersion(); } @@ -64,7 +65,7 @@ public void setMajorVersion(int major) { @Override public int getMinorVersion() { - RMStateVersionProtoOrBuilder p = viaProto ? proto : builder; + VersionProtoOrBuilder p = viaProto ? proto : builder; return p.getMinorVersion(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto index 4f5d16895be..01fac329a12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto @@ -47,4 +47,10 @@ message NodeHealthStatusProto { optional bool is_node_healthy = 1; optional string health_report = 2; optional int64 last_health_report_time = 3; -} \ No newline at end of file +} + +message VersionProto { + optional int32 major_version = 1; + optional int32 minor_version = 2; +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java index 008da7a2b8a..cc7656c0826 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java @@ -41,13 +41,13 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; -import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.impl.pb.NMDBSchemaVersionPBImpl; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.utils.LeveldbIterator; import org.apache.hadoop.yarn.util.ConverterUtils; import org.fusesource.leveldbjni.JniDBFactory; @@ -68,7 +68,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { private static final String DB_NAME = "yarn-nm-state"; private static final String DB_SCHEMA_VERSION_KEY = "nm-schema-version"; - private static final NMDBSchemaVersion CURRENT_VERSION_INFO = NMDBSchemaVersion + private static final Version CURRENT_VERSION_INFO = Version .newInstance(1, 0); private static final String DELETION_TASK_KEY_PREFIX = @@ -617,14 +617,14 @@ public void log(String message) { } - NMDBSchemaVersion loadVersion() throws IOException { + Version loadVersion() throws IOException { byte[] data = db.get(bytes(DB_SCHEMA_VERSION_KEY)); // if version is not stored previously, treat it as 1.0. if (data == null || data.length == 0) { - return NMDBSchemaVersion.newInstance(1, 0); + return Version.newInstance(1, 0); } - NMDBSchemaVersion version = - new NMDBSchemaVersionPBImpl(NMDBSchemaVersionProto.parseFrom(data)); + Version version = + new VersionPBImpl(VersionProto.parseFrom(data)); return version; } @@ -634,14 +634,14 @@ private void storeVersion() throws IOException { // Only used for test @VisibleForTesting - void storeVersion(NMDBSchemaVersion state) throws IOException { + void storeVersion(Version state) throws IOException { dbStoreVersion(state); } - private void dbStoreVersion(NMDBSchemaVersion state) throws IOException { + private void dbStoreVersion(Version state) throws IOException { String key = DB_SCHEMA_VERSION_KEY; byte[] data = - ((NMDBSchemaVersionPBImpl) state).getProto().toByteArray(); + ((VersionPBImpl) state).getProto().toByteArray(); try { db.put(bytes(key), data); } catch (DBException e) { @@ -649,7 +649,7 @@ private void dbStoreVersion(NMDBSchemaVersion state) throws IOException { } } - NMDBSchemaVersion getCurrentVersion() { + Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -664,9 +664,9 @@ NMDBSchemaVersion getCurrentVersion() { * upgrade NM state or remove incompatible old state. */ private void checkVersion() throws IOException { - NMDBSchemaVersion loadedVersion = loadVersion(); + Version loadedVersion = loadVersion(); LOG.info("Loaded NM state version info " + loadedVersion); - if (loadedVersion != null && loadedVersion.equals(getCurrentVersion())) { + if (loadedVersion.equals(getCurrentVersion())) { return; } if (loadedVersion.isCompatibleTo(getCurrentVersion())) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/impl/pb/NMDBSchemaVersionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/impl/pb/NMDBSchemaVersionPBImpl.java deleted file mode 100644 index f42c1bee331..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/impl/pb/NMDBSchemaVersionPBImpl.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.nodemanager.recovery.records.impl.pb; - -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Evolving; -import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProto; -import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProtoOrBuilder; - -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; - -@Private -@Evolving -public class NMDBSchemaVersionPBImpl extends NMDBSchemaVersion { - - NMDBSchemaVersionProto proto = NMDBSchemaVersionProto.getDefaultInstance(); - NMDBSchemaVersionProto.Builder builder = null; - boolean viaProto = false; - - public NMDBSchemaVersionPBImpl() { - builder = NMDBSchemaVersionProto.newBuilder(); - } - - public NMDBSchemaVersionPBImpl(NMDBSchemaVersionProto proto) { - this.proto = proto; - viaProto = true; - } - - public NMDBSchemaVersionProto getProto() { - proto = viaProto ? proto : builder.build(); - viaProto = true; - return proto; - } - - private void maybeInitBuilder() { - if (viaProto || builder == null) { - builder = NMDBSchemaVersionProto.newBuilder(proto); - } - viaProto = false; - } - - @Override - public int getMajorVersion() { - NMDBSchemaVersionProtoOrBuilder p = viaProto ? proto : builder; - return p.getMajorVersion(); - } - - @Override - public void setMajorVersion(int majorVersion) { - maybeInitBuilder(); - builder.setMajorVersion(majorVersion); - } - - @Override - public int getMinorVersion() { - NMDBSchemaVersionProtoOrBuilder p = viaProto ? proto : builder; - return p.getMinorVersion(); - } - - @Override - public void setMinorVersion(int minorVersion) { - maybeInitBuilder(); - builder.setMinorVersion(minorVersion); - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto index a07e7ad6b2d..460c4932342 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto @@ -39,8 +39,3 @@ message LocalizedResourceProto { optional int64 size = 3; } -message NMDBSchemaVersionProto { - optional int32 majorVersion = 1; - optional int32 minorVersion = 2; -} - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java index 833a062d3b8..294f424b0f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java @@ -49,7 +49,7 @@ import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLocalizationState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredUserResources; -import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; @@ -114,12 +114,12 @@ public void testEmptyState() throws IOException { @Test public void testCheckVersion() throws IOException { // default version - NMDBSchemaVersion defaultVersion = stateStore.getCurrentVersion(); + Version defaultVersion = stateStore.getCurrentVersion(); Assert.assertEquals(defaultVersion, stateStore.loadVersion()); // compatible version - NMDBSchemaVersion compatibleVersion = - NMDBSchemaVersion.newInstance(defaultVersion.getMajorVersion(), + Version compatibleVersion = + Version.newInstance(defaultVersion.getMajorVersion(), defaultVersion.getMinorVersion() + 2); stateStore.storeVersion(compatibleVersion); Assert.assertEquals(compatibleVersion, stateStore.loadVersion()); @@ -128,8 +128,8 @@ public void testCheckVersion() throws IOException { Assert.assertEquals(defaultVersion, stateStore.loadVersion()); // incompatible version - NMDBSchemaVersion incompatibleVersion = - NMDBSchemaVersion.newInstance(defaultVersion.getMajorVersion() + 1, + Version incompatibleVersion = + Version.newInstance(defaultVersion.getMajorVersion() + 1, defaultVersion.getMinorVersion()); stateStore.storeVersion(incompatibleVersion); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index 243c7a19912..2f8a944f666 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -44,22 +44,22 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Epoch; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.AMRMTokenSecretManagerStatePBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.annotations.VisibleForTesting; @@ -77,7 +77,7 @@ public class FileSystemRMStateStore extends RMStateStore { public static final Log LOG = LogFactory.getLog(FileSystemRMStateStore.class); protected static final String ROOT_DIR_NAME = "FSRMStateRoot"; - protected static final RMStateVersion CURRENT_VERSION_INFO = RMStateVersion + protected static final Version CURRENT_VERSION_INFO = Version .newInstance(1, 1); protected static final String AMRMTOKEN_SECRET_MANAGER_NODE = "AMRMTokenSecretManagerNode"; @@ -130,18 +130,18 @@ protected synchronized void closeInternal() throws Exception { } @Override - protected RMStateVersion getCurrentVersion() { + protected Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @Override - protected synchronized RMStateVersion loadVersion() throws Exception { + protected synchronized Version loadVersion() throws Exception { Path versionNodePath = getNodePath(rootDirPath, VERSION_NODE); if (fs.exists(versionNodePath)) { FileStatus status = fs.getFileStatus(versionNodePath); byte[] data = readFile(versionNodePath, status.getLen()); - RMStateVersion version = - new RMStateVersionPBImpl(RMStateVersionProto.parseFrom(data)); + Version version = + new VersionPBImpl(VersionProto.parseFrom(data)); return version; } return null; @@ -151,7 +151,7 @@ protected synchronized RMStateVersion loadVersion() throws Exception { protected synchronized void storeVersion() throws Exception { Path versionNodePath = getNodePath(rootDirPath, VERSION_NODE); byte[] data = - ((RMStateVersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); + ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); if (fs.exists(versionNodePath)) { updateFile(versionNodePath, data); } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index 369f89a545e..f56517cd828 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -32,10 +32,10 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import com.google.common.annotations.VisibleForTesting; @@ -259,7 +259,7 @@ public synchronized void removeRMDTMasterKeyState(DelegationKey delegationKey) } @Override - protected RMStateVersion loadVersion() throws Exception { + protected Version loadVersion() throws Exception { return null; } @@ -268,7 +268,7 @@ protected void storeVersion() throws Exception { } @Override - protected RMStateVersion getCurrentVersion() { + protected Version getCurrentVersion() { return null; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java index ea7087176c9..e910c19629e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java @@ -25,10 +25,10 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; @Unstable public class NullRMStateStore extends RMStateStore { @@ -123,7 +123,7 @@ public void checkVersion() throws Exception { } @Override - protected RMStateVersion loadVersion() throws Exception { + protected Version loadVersion() throws Exception { // Do nothing return null; } @@ -134,7 +134,7 @@ protected void storeVersion() throws Exception { } @Override - protected RMStateVersion getCurrentVersion() { + protected Version getCurrentVersion() { // Do nothing return null; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index e2c4e7e47fa..da08d80466d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -47,12 +47,12 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEvent; import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEventType; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNewSavedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; @@ -493,14 +493,14 @@ protected void serviceStop() throws Exception { * upgrade RM state. */ public void checkVersion() throws Exception { - RMStateVersion loadedVersion = loadVersion(); + Version loadedVersion = loadVersion(); LOG.info("Loaded RM state version info " + loadedVersion); if (loadedVersion != null && loadedVersion.equals(getCurrentVersion())) { return; } // if there is no version info, treat it as 1.0; if (loadedVersion == null) { - loadedVersion = RMStateVersion.newInstance(1, 0); + loadedVersion = Version.newInstance(1, 0); } if (loadedVersion.isCompatibleTo(getCurrentVersion())) { LOG.info("Storing RM state version info " + getCurrentVersion()); @@ -516,7 +516,7 @@ public void checkVersion() throws Exception { * Derived class use this method to load the version information from state * store. */ - protected abstract RMStateVersion loadVersion() throws Exception; + protected abstract Version loadVersion() throws Exception; /** * Derived class use this method to store the version information. @@ -526,7 +526,7 @@ public void checkVersion() throws Exception { /** * Get the current version of the underlying state store. */ - protected abstract RMStateVersion getCurrentVersion(); + protected abstract Version getCurrentVersion(); /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 5644ad9e34a..bb379c5b8b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -44,23 +44,23 @@ import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.AMRMTokenSecretManagerStateProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationAttemptStateDataProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ApplicationStateDataProto; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.EpochProto; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.RMZKUtils; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Epoch; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.AMRMTokenSecretManagerStatePBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; @@ -86,7 +86,7 @@ public class ZKRMStateStore extends RMStateStore { private final SecureRandom random = new SecureRandom(); protected static final String ROOT_ZNODE_NAME = "ZKRMStateRoot"; - protected static final RMStateVersion CURRENT_VERSION_INFO = RMStateVersion + protected static final Version CURRENT_VERSION_INFO = Version .newInstance(1, 1); private static final String RM_DELEGATION_TOKENS_ROOT_ZNODE_NAME = "RMDelegationTokensRoot"; @@ -377,7 +377,7 @@ protected synchronized void closeInternal() throws Exception { } @Override - protected RMStateVersion getCurrentVersion() { + protected Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -385,7 +385,7 @@ protected RMStateVersion getCurrentVersion() { protected synchronized void storeVersion() throws Exception { String versionNodePath = getNodePath(zkRootNodePath, VERSION_NODE); byte[] data = - ((RMStateVersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); + ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray(); if (existsWithRetries(versionNodePath, true) != null) { setDataWithRetries(versionNodePath, data, -1); } else { @@ -394,13 +394,13 @@ protected synchronized void storeVersion() throws Exception { } @Override - protected synchronized RMStateVersion loadVersion() throws Exception { + protected synchronized Version loadVersion() throws Exception { String versionNodePath = getNodePath(zkRootNodePath, VERSION_NODE); if (existsWithRetries(versionNodePath, true) != null) { byte[] data = getDataWithRetries(versionNodePath, true); - RMStateVersion version = - new RMStateVersionPBImpl(RMStateVersionProto.parseFrom(data)); + Version version = + new VersionPBImpl(VersionProto.parseFrom(data)); return version; } return null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java deleted file mode 100644 index cfee512b5d4..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/RMStateVersion.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.server.resourcemanager.recovery.records; - -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.yarn.util.Records; - -/** - * The version information of RM state. - */ -@Private -@Unstable -public abstract class RMStateVersion { - - public static RMStateVersion newInstance(int majorVersion, int minorVersion) { - RMStateVersion version = Records.newRecord(RMStateVersion.class); - version.setMajorVersion(majorVersion); - version.setMinorVersion(minorVersion); - return version; - } - - public abstract int getMajorVersion(); - - public abstract void setMajorVersion(int majorVersion); - - public abstract int getMinorVersion(); - - public abstract void setMinorVersion(int minorVersion); - - public String toString() { - return getMajorVersion() + "." + getMinorVersion(); - } - - public boolean isCompatibleTo(RMStateVersion version) { - return getMajorVersion() == version.getMajorVersion(); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + getMajorVersion(); - result = prime * result + getMinorVersion(); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - RMStateVersion other = (RMStateVersion) obj; - if (this.getMajorVersion() == other.getMajorVersion() - && this.getMinorVersion() == other.getMinorVersion()) { - return true; - } else { - return false; - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java index a61f23f5a71..5d3e51a398e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java @@ -55,13 +55,13 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMDTSecretManagerState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.AMRMTokenSecretManagerState; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -111,8 +111,8 @@ public EventHandler getEventHandler() { interface RMStateStoreHelper { RMStateStore getRMStateStore() throws Exception; boolean isFinalStateValid() throws Exception; - void writeVersion(RMStateVersion version) throws Exception; - RMStateVersion getCurrentVersion() throws Exception; + void writeVersion(Version version) throws Exception; + Version getCurrentVersion() throws Exception; boolean appExists(RMApp app) throws Exception; } @@ -477,13 +477,13 @@ public void testCheckVersion(RMStateStoreHelper stateStoreHelper) store.setRMDispatcher(new TestDispatcher()); // default version - RMStateVersion defaultVersion = stateStoreHelper.getCurrentVersion(); + Version defaultVersion = stateStoreHelper.getCurrentVersion(); store.checkVersion(); Assert.assertEquals(defaultVersion, store.loadVersion()); // compatible version - RMStateVersion compatibleVersion = - RMStateVersion.newInstance(defaultVersion.getMajorVersion(), + Version compatibleVersion = + Version.newInstance(defaultVersion.getMajorVersion(), defaultVersion.getMinorVersion() + 2); stateStoreHelper.writeVersion(compatibleVersion); Assert.assertEquals(compatibleVersion, store.loadVersion()); @@ -492,8 +492,8 @@ public void testCheckVersion(RMStateStoreHelper stateStoreHelper) Assert.assertEquals(defaultVersion, store.loadVersion()); // incompatible version - RMStateVersion incompatibleVersion = - RMStateVersion.newInstance(defaultVersion.getMajorVersion() + 2, + Version incompatibleVersion = + Version.newInstance(defaultVersion.getMajorVersion() + 2, defaultVersion.getMinorVersion()); stateStoreHelper.writeVersion(incompatibleVersion); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index f5b3e8a8a67..88e5393f14d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -36,9 +36,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; +import org.apache.hadoop.yarn.server.records.Version; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -70,7 +70,7 @@ public Path getVersionNode() { return new Path(new Path(workingDirPathURI, ROOT_DIR_NAME), VERSION_NODE); } - public RMStateVersion getCurrentVersion() { + public Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -111,13 +111,13 @@ public boolean isFinalStateValid() throws Exception { } @Override - public void writeVersion(RMStateVersion version) throws Exception { - store.updateFile(store.getVersionNode(), ((RMStateVersionPBImpl) version) + public void writeVersion(Version version) throws Exception { + store.updateFile(store.getVersionNode(), ((VersionPBImpl) version) .getProto().toByteArray()); } @Override - public RMStateVersion getCurrentVersion() throws Exception { + public Version getCurrentVersion() throws Exception { return store.getCurrentVersion(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 1dee533ac05..3c7170adaeb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -32,9 +32,9 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.records.Version; +import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.RMStateVersionPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; @@ -69,7 +69,7 @@ public String getVersionNode() { return znodeWorkingPath + "/" + ROOT_ZNODE_NAME + "/" + VERSION_NODE; } - public RMStateVersion getCurrentVersion() { + public Version getCurrentVersion() { return CURRENT_VERSION_INFO; } @@ -96,13 +96,13 @@ public boolean isFinalStateValid() throws Exception { } @Override - public void writeVersion(RMStateVersion version) throws Exception { - client.setData(store.getVersionNode(), ((RMStateVersionPBImpl) version) + public void writeVersion(Version version) throws Exception { + client.setData(store.getVersionNode(), ((VersionPBImpl) version) .getProto().toByteArray(), -1); } @Override - public RMStateVersion getCurrentVersion() throws Exception { + public Version getCurrentVersion() throws Exception { return store.getCurrentVersion(); } From e52f67e3897a67a0b6d29e557a31cfa881738821 Mon Sep 17 00:00:00 2001 From: Xuan Gong Date: Thu, 31 Jul 2014 20:06:02 +0000 Subject: [PATCH 091/354] YARN-1994. Expose YARN/MR endpoints on multiple interfaces. Contributed by Craig Welch, Milan Potocnik,and Arpit Agarwal git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614981 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/conf/Configuration.java | 66 +++++++++ hadoop-mapreduce-project/CHANGES.txt | 3 + .../mapred/TaskAttemptListenerImpl.java | 4 +- .../hadoop/mapreduce/v2/app/AppContext.java | 1 + .../hadoop/mapreduce/v2/app/MRAppMaster.java | 5 + .../v2/app/client/MRClientService.java | 3 +- .../mapred/TestTaskAttemptListenerImpl.java | 13 +- .../mapreduce/v2/app/MockAppContext.java | 5 + .../v2/app/TestRuntimeEstimators.java | 5 + .../v2/jobhistory/JHAdminConfig.java | 4 +- .../mapreduce/v2/util/MRWebAppUtil.java | 9 +- .../mapreduce/v2/hs/HistoryClientService.java | 11 +- .../hadoop/mapreduce/v2/hs/JobHistory.java | 5 + .../mapreduce/v2/hs/server/HSAdminServer.java | 5 +- hadoop-yarn-project/CHANGES.txt | 3 + .../hadoop/yarn/conf/YarnConfiguration.java | 12 ++ .../hadoop/yarn/webapp/util/WebAppUtils.java | 32 +++++ .../src/main/resources/yarn-default.xml | 34 +++++ .../yarn/conf/TestYarnConfiguration.java | 128 ++++++++++++++++++ .../ApplicationHistoryClientService.java | 16 ++- .../ApplicationHistoryServer.java | 4 +- .../ContainerManagerImpl.java | 19 ++- .../ResourceLocalizationService.java | 6 +- .../server/nodemanager/webapp/WebServer.java | 4 +- .../server/resourcemanager/AdminService.java | 16 ++- .../ApplicationMasterService.java | 5 +- .../resourcemanager/ClientRMService.java | 8 +- .../resourcemanager/ResourceManager.java | 7 +- .../ResourceTrackerService.java | 8 +- .../yarn/server/resourcemanager/TestRMHA.java | 23 +++- 30 files changed, 429 insertions(+), 35 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 22ccf63e9bd..31c40f60f30 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -1843,6 +1843,38 @@ protected char[] getPasswordFromConfig(String name) { return pass; } + /** + * Get the socket address for hostProperty as a + * InetSocketAddress. If hostProperty is + * null, addressProperty will be used. This + * is useful for cases where we want to differentiate between host + * bind address and address clients should use to establish connection. + * + * @param hostProperty bind host property name. + * @param addressProperty address property name. + * @param defaultAddressValue the default value + * @param defaultPort the default port + * @return InetSocketAddress + */ + public InetSocketAddress getSocketAddr( + String hostProperty, + String addressProperty, + String defaultAddressValue, + int defaultPort) { + + InetSocketAddress bindAddr = getSocketAddr( + addressProperty, defaultAddressValue, defaultPort); + + final String host = get(hostProperty); + + if (host == null || host.isEmpty()) { + return bindAddr; + } + + return NetUtils.createSocketAddr( + host, bindAddr.getPort(), hostProperty); + } + /** * Get the socket address for name property as a * InetSocketAddress. @@ -1864,6 +1896,40 @@ public InetSocketAddress getSocketAddr( public void setSocketAddr(String name, InetSocketAddress addr) { set(name, NetUtils.getHostPortString(addr)); } + + /** + * Set the socket address a client can use to connect for the + * name property as a host:port. The wildcard + * address is replaced with the local host's address. If the host and address + * properties are configured the host component of the address will be combined + * with the port component of the addr to generate the address. This is to allow + * optional control over which host name is used in multi-home bind-host + * cases where a host can have multiple names + * @param hostProperty the bind-host configuration name + * @param addressProperty the service address configuration name + * @param defaultAddressValue the service default address configuration value + * @param addr InetSocketAddress of the service listener + * @return InetSocketAddress for clients to connect + */ + public InetSocketAddress updateConnectAddr( + String hostProperty, + String addressProperty, + String defaultAddressValue, + InetSocketAddress addr) { + + final String host = get(hostProperty); + final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue); + + if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) { + //not our case, fall back to original logic + return updateConnectAddr(addressProperty, addr); + } + + final String connectHost = connectHostPort.split(":")[0]; + // Create connect address using client address hostname and server port. + return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost( + connectHost, addr.getPort())); + } /** * Set the socket address a client can use to connect for the diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5760cef3060..86ae7139b8c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -83,6 +83,9 @@ Trunk (Unreleased) MAPREDUCE-5912. Task.calculateOutputSize does not handle Windows files after MAPREDUCE-5196. (Remus Rusanu via cnauroth) + MAPREDUCE-6019. MapReduce changes for exposing YARN/MR endpoints on multiple + interfaces. (Craig Welch, Milan Potocnik, Arpit Agarwal via xgong) + BUG FIXES MAPREDUCE-5714. Removed forceful JVM exit in shutDownJob. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java index 074e3f0fbb8..5f39edd72e8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java @@ -141,7 +141,9 @@ protected void startRpcServer() { } server.start(); - this.address = NetUtils.getConnectAddress(server); + this.address = NetUtils.createSocketAddrForHost( + context.getNMHostname(), + server.getListenerAddress().getPort()); } catch (IOException e) { throw new YarnRuntimeException(e); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java index 6f036c4a74a..31e282a63e9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java @@ -66,4 +66,5 @@ public interface AppContext { boolean hasSuccessfullyUnregistered(); + String getNMHostname(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 8c1892af392..59e72490496 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -1018,6 +1018,11 @@ public void markSuccessfulUnregistration() { public void resetIsLastAMRetry() { isLastAMRetry = false; } + + @Override + public String getNMHostname() { + return nmHost; + } } @SuppressWarnings("unchecked") diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 3c0e100b5cc..11235322f6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -131,7 +131,8 @@ protected void serviceStart() throws Exception { } server.start(); - this.bindAddress = NetUtils.getConnectAddress(server); + this.bindAddress = NetUtils.createSocketAddrForHost(appContext.getNMHostname(), + server.getListenerAddress().getPort()); LOG.info("Instantiated MRClientService at " + this.bindAddress); try { // Explicitly disabling SSL for map reduce task as we can't allow MR users diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java index 256f0b7bb7a..2c81cf06d2d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java @@ -61,6 +61,13 @@ public class TestTaskAttemptListenerImpl { public static class MockTaskAttemptListenerImpl extends TaskAttemptListenerImpl { + public MockTaskAttemptListenerImpl(AppContext context, + JobTokenSecretManager jobTokenSecretManager, + RMHeartbeatHandler rmHeartbeatHandler, AMPreemptionPolicy policy) { + + super(context, jobTokenSecretManager, rmHeartbeatHandler, policy); + } + public MockTaskAttemptListenerImpl(AppContext context, JobTokenSecretManager jobTokenSecretManager, RMHeartbeatHandler rmHeartbeatHandler, @@ -210,7 +217,7 @@ public void testGetMapCompletionEvents() throws IOException { when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy(); policy.init(appCtx); - TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl( + TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl( appCtx, secret, rmHeartbeatHandler, policy) { @Override protected void registerHeartbeatHandler(Configuration conf) { @@ -271,7 +278,7 @@ public void testCommitWindow() throws IOException { when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy(); policy.init(appCtx); - TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl( + TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl( appCtx, secret, rmHeartbeatHandler, policy) { @Override protected void registerHeartbeatHandler(Configuration conf) { @@ -326,7 +333,7 @@ public void testCheckpointIDTracking() when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy(); policy.init(appCtx); - TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl( + TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl( appCtx, secret, rmHeartbeatHandler, policy) { @Override protected void registerHeartbeatHandler(Configuration conf) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java index 511731a9ba1..dae0aa77fc0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java @@ -143,4 +143,9 @@ public boolean hasSuccessfullyUnregistered() { return true; } +@Override + public String getNMHostname() { + // bogus - Not Required + return null; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index 6fadf350f82..c25cf5060e9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -879,5 +879,10 @@ public boolean hasSuccessfullyUnregistered() { return true; } + @Override + public String getNMHostname() { + // bogus - Not Required + return null; + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java index 2e1a22e4310..9fa8a090a4b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java @@ -38,7 +38,9 @@ public class JHAdminConfig { public static final int DEFAULT_MR_HISTORY_PORT = 10020; public static final String DEFAULT_MR_HISTORY_ADDRESS = "0.0.0.0:" + DEFAULT_MR_HISTORY_PORT; - + public static final String MR_HISTORY_BIND_HOST = MR_HISTORY_PREFIX + + "bind-host"; + /** The address of the History server admin interface. */ public static final String JHS_ADMIN_ADDRESS = MR_HISTORY_PREFIX + "admin.address"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java index 2d453f1d308..cac01191fcd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java @@ -29,6 +29,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.ipc.RPCUtil; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -105,11 +106,15 @@ public static String getJHSWebappURLWithScheme(Configuration conf) { public static InetSocketAddress getJHSWebBindAddress(Configuration conf) { if (httpPolicyInJHS == Policy.HTTPS_ONLY) { - return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS, + return conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_BIND_HOST, + JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT); } else { - return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, + return conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_BIND_HOST, + JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index 96b81054460..001608b2596 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -83,6 +83,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.webapp.WebApp; @@ -119,6 +120,7 @@ protected void serviceStart() throws Exception { YarnRPC rpc = YarnRPC.create(conf); initializeWebApp(conf); InetSocketAddress address = conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_BIND_HOST, JHAdminConfig.MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_PORT); @@ -137,9 +139,11 @@ protected void serviceStart() throws Exception { } server.start(); - this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_ADDRESS, + this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_BIND_HOST, + JHAdminConfig.MR_HISTORY_ADDRESS, + JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS, server.getListenerAddress()); - LOG.info("Instantiated MRClientService at " + this.bindAddress); + LOG.info("Instantiated HistoryClientService at " + this.bindAddress); super.serviceStart(); } @@ -158,8 +162,9 @@ protected void initializeWebApp(Configuration conf) { JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY) .at(NetUtils.getHostPortString(bindAddress)).start(webApp); + String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0]; MRWebAppUtil.setJHSWebappURLWithoutScheme(conf, - NetUtils.getHostPortString(webApp.getListenerAddress())); + connectHost + ":" + webApp.getListenerAddress().getPort()); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java index b7823a0c50d..194b85a5a29 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java @@ -394,4 +394,9 @@ public boolean hasSuccessfullyUnregistered() { return true; } + @Override + public String getNMHostname() { + // bogus - Not Required + return null; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java index 23a34a47b94..858d945dfe1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.logaggregation.AggregatedLogDeletionService; import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService; import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB; @@ -94,7 +95,9 @@ public void serviceInit(Configuration conf) throws Exception { WritableRpcEngine.ensureInitialized(); - clientRpcAddress = conf.getSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS, + clientRpcAddress = conf.getSocketAddr( + JHAdminConfig.MR_HISTORY_BIND_HOST, + JHAdminConfig.JHS_ADMIN_ADDRESS, JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS, JHAdminConfig.DEFAULT_JHS_ADMIN_PORT); clientRpcServer = new RPC.Builder(conf) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 50ae2aff6ec..7b7fa0ddbad 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -77,6 +77,9 @@ Release 2.6.0 - UNRELEASED YARN-2347. Consolidated RMStateVersion and NMDBSchemaVersion into Version in yarn-server-common. (Junping Du via zjshen) + YARN-1994. Expose YARN/MR endpoints on multiple interfaces. (Craig Welch, + Milan Potocnik, Arpit Agarwal via xgong) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index ab6b20e574e..9e08ef52008 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -126,6 +126,10 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_RM_ADDRESS = "0.0.0.0:" + DEFAULT_RM_PORT; + /** The actual bind address for the RM.*/ + public static final String RM_BIND_HOST = + RM_PREFIX + "bind-host"; + /** The number of threads used to handle applications manager requests.*/ public static final String RM_CLIENT_THREAD_COUNT = RM_PREFIX + "client.thread-count"; @@ -545,6 +549,10 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_NM_ADDRESS = "0.0.0.0:" + DEFAULT_NM_PORT; + /** The actual bind address or the NM.*/ + public static final String NM_BIND_HOST = + NM_PREFIX + "bind-host"; + /** who will execute(launch) the containers.*/ public static final String NM_CONTAINER_EXECUTOR = NM_PREFIX + "container-executor.class"; @@ -1132,6 +1140,10 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_TIMELINE_SERVICE_ADDRESS = "0.0.0.0:" + DEFAULT_TIMELINE_SERVICE_PORT; + /** The listening endpoint for the timeline service application.*/ + public static final String TIMELINE_SERVICE_BIND_HOST = + TIMELINE_SERVICE_PREFIX + "bind-host"; + /** The number of threads to handle client RPC API requests. */ public static final String TIMELINE_SERVICE_HANDLER_THREAD_COUNT = TIMELINE_SERVICE_PREFIX + "handler-thread-count"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java index 29fd8c14e6d..6cbe6f94d6f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java @@ -34,6 +34,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.HAUtil; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.RMHAUtils; @Private @@ -170,6 +171,37 @@ private static String getResolvedAddress(InetSocketAddress address) { return sb.toString(); } + /** + * Get the URL to use for binding where bind hostname can be specified + * to override the hostname in the webAppURLWithoutScheme. Port specified in the + * webAppURLWithoutScheme will be used. + * + * @param conf the configuration + * @param hostProperty bind host property name + * @param webAppURLWithoutScheme web app URL without scheme String + * @return String representing bind URL + */ + public static String getWebAppBindURL( + Configuration conf, + String hostProperty, + String webAppURLWithoutScheme) { + + // If the bind-host setting exists then it overrides the hostname + // portion of the corresponding webAppURLWithoutScheme + String host = conf.getTrimmed(hostProperty); + if (host != null && !host.isEmpty()) { + if (webAppURLWithoutScheme.contains(":")) { + webAppURLWithoutScheme = host + ":" + webAppURLWithoutScheme.split(":")[1]; + } + else { + throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " + + webAppURLWithoutScheme); + } + } + + return webAppURLWithoutScheme; + } + public static String getNMWebAppURLWithoutScheme(Configuration conf) { if (YarnConfiguration.useHttps(conf)) { return conf.get(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index edc2f8cab61..94b4d7f20ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -70,6 +70,17 @@ ${yarn.resourcemanager.hostname}:8032 + + + The actual address the server will bind to. If this optional address is + set, the RPC and webapp servers will bind to this address and the port specified in + yarn.resourcemanager.address and yarn.resourcemanager.webapp.address, respectively. This + is most useful for making RM listen to all interfaces by setting to 0.0.0.0. + + yarn.resourcemanager.bind-host + + + The number of threads used to handle applications manager requests. yarn.resourcemanager.client.thread-count @@ -635,6 +646,17 @@ ${yarn.nodemanager.hostname}:0 + + + The actual address the server will bind to. If this optional address is + set, the RPC and webapp servers will bind to this address and the port specified in + yarn.nodemanager.address and yarn.nodemanager.webapp.address, respectively. This is + most useful for making NM listen to all interfaces by setting to 0.0.0.0. + + yarn.nodemanager.bind-host + + + Environment variables that should be forwarded from the NodeManager's environment to the container's. yarn.nodemanager.admin-env @@ -1172,6 +1194,18 @@ ${yarn.timeline-service.hostname}:8190 + + + The actual address the server will bind to. If this optional address is + set, the RPC and webapp servers will bind to this address and the port specified in + yarn.timeline-service.address and yarn.timeline-service.webapp.address, respectively. + This is most useful for making the service listen to all interfaces by setting to + 0.0.0.0. + + yarn.timeline-service.bind-host + + + Store class name for timeline store. yarn.timeline-service.store-class diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java index 5e40e5d5000..1d925a733d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java @@ -28,6 +28,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; public class TestYarnConfiguration { @@ -75,4 +76,131 @@ public void testGetSocketAddressForNMWithHA() { YarnConfiguration.DEFAULT_NM_PORT); assertEquals(1234, addr.getPort()); } + + @Test + public void testGetSocketAddr() throws Exception { + + YarnConfiguration conf; + InetSocketAddress resourceTrackerAddress; + + //all default + conf = new YarnConfiguration(); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0], + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT), + resourceTrackerAddress); + + //with address + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.1"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "10.0.0.1", + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT), + resourceTrackerAddress); + + //address and socket + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2:5001"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "10.0.0.2", + 5001), + resourceTrackerAddress); + + //bind host only + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_BIND_HOST, "10.0.0.3"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "10.0.0.3", + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT), + resourceTrackerAddress); + + //bind host and address no port + conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0"); + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "0.0.0.0", + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT), + resourceTrackerAddress); + + //bind host and address with port + conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0"); + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2:5003"); + resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + assertEquals( + new InetSocketAddress( + "0.0.0.0", + 5003), + resourceTrackerAddress); + + } + + @Test + public void testUpdateConnectAddr() throws Exception { + YarnConfiguration conf; + InetSocketAddress resourceTrackerConnectAddress; + InetSocketAddress serverAddress; + + //no override, old behavior. Won't work on a host named "yo.yo.yo" + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo"); + serverAddress = new InetSocketAddress( + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0], + Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1])); + + resourceTrackerConnectAddress = conf.updateConnectAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + serverAddress); + + assertFalse(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo")); + + //cause override with address + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo"); + conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0"); + serverAddress = new InetSocketAddress( + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0], + Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1])); + + resourceTrackerConnectAddress = conf.updateConnectAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, + serverAddress); + + assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo")); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java index e15198b13f0..6372056cb5d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java @@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; public class ApplicationHistoryClientService extends AbstractService { @@ -75,10 +76,11 @@ public ApplicationHistoryClientService(ApplicationHistoryManager history) { protected void serviceStart() throws Exception { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); - InetSocketAddress address = - conf.getSocketAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS, - YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS, - YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT); + InetSocketAddress address = conf.getSocketAddr( + YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, + YarnConfiguration.TIMELINE_SERVICE_ADDRESS, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT); server = rpc.getServer(ApplicationHistoryProtocol.class, protocolHandler, @@ -88,8 +90,10 @@ protected void serviceStart() throws Exception { server.start(); this.bindAddress = - conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS, - server.getListenerAddress()); + conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, + YarnConfiguration.TIMELINE_SERVICE_ADDRESS, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS, + server.getListenerAddress()); LOG.info("Instantiated ApplicationHistoryClientService at " + this.bindAddress); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java index 02a3bb12fc0..ce05d503986 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java @@ -192,7 +192,9 @@ protected void startWebApp() { TimelineAuthenticationFilterInitializer.class.getName() + initializers); } - String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(conf); + String bindAddress = WebAppUtils.getWebAppBindURL(conf, + YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, + WebAppUtils.getAHSWebAppURLWithoutScheme(conf)); LOG.info("Instantiating AHSWebApp at " + bindAddress); try { AHSWebApp ahsWebApp = AHSWebApp.getInstance(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 1e155d27b84..0625df0edde 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -275,6 +275,7 @@ protected void serviceStart() throws Exception { YarnRPC rpc = YarnRPC.create(conf); InetSocketAddress initialAddress = conf.getSocketAddr( + YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS, YarnConfiguration.DEFAULT_NM_PORT); @@ -296,7 +297,22 @@ protected void serviceStart() throws Exception { " server is still starting."); this.setBlockNewContainerRequests(true); server.start(); - InetSocketAddress connectAddress = NetUtils.getConnectAddress(server); + + InetSocketAddress connectAddress; + String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST); + String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS); + if (bindHost == null || bindHost.isEmpty() || + nmAddress == null || nmAddress.isEmpty()) { + connectAddress = NetUtils.getConnectAddress(server); + } else { + //a bind-host case with an address, to support overriding the first hostname + //found when querying for our hostname with the specified address, combine + //the specified address with the actual port listened on by the server + connectAddress = NetUtils.getConnectAddress( + new InetSocketAddress(nmAddress.split(":")[0], + server.getListenerAddress().getPort())); + } + NodeId nodeId = NodeId.newInstance( connectAddress.getAddress().getCanonicalHostName(), connectAddress.getPort()); @@ -304,6 +320,7 @@ protected void serviceStart() throws Exception { this.context.getNMTokenSecretManager().setNodeId(nodeId); this.context.getContainerTokenSecretManager().setNodeId(nodeId); LOG.info("ContainerManager started at " + connectAddress); + LOG.info("ContainerManager bound to " + initialAddress); super.serviceStart(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 554b368dd5f..64a0b37cc31 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -81,6 +81,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; @@ -251,6 +252,7 @@ public void serviceInit(Configuration conf) throws Exception { cacheCleanupPeriod = conf.getLong(YarnConfiguration.NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS, YarnConfiguration.DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS); localizationServerAddress = conf.getSocketAddr( + YarnConfiguration.NM_BIND_HOST, YarnConfiguration.NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, YarnConfiguration.DEFAULT_NM_LOCALIZER_PORT); @@ -341,7 +343,9 @@ public void serviceStart() throws Exception { server = createServer(); server.start(); localizationServerAddress = - getConfig().updateConnectAddr(YarnConfiguration.NM_LOCALIZER_ADDRESS, + getConfig().updateConnectAddr(YarnConfiguration.NM_BIND_HOST, + YarnConfiguration.NM_LOCALIZER_ADDRESS, + YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS, server.getListenerAddress()); LOG.info("Localizer started on port " + server.getPort()); super.serviceStart(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java index 2f78ec4cb27..ca2f239e223 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java @@ -55,7 +55,9 @@ public WebServer(Context nmContext, ResourceView resourceView, @Override protected void serviceStart() throws Exception { - String bindAddress = WebAppUtils.getNMWebAppURLWithoutScheme(getConfig()); + String bindAddress = WebAppUtils.getWebAppBindURL(getConfig(), + YarnConfiguration.NM_BIND_HOST, + WebAppUtils.getNMWebAppURLWithoutScheme(getConfig())); LOG.info("Instantiating NMWebApp at " + bindAddress); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index 1d2f376d325..c47f49e207e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -90,7 +90,9 @@ public class AdminService extends CompositeService implements private EmbeddedElectorService embeddedElector; private Server server; - private InetSocketAddress masterServiceAddress; + + // Address to use for binding. May be a wildcard address. + private InetSocketAddress masterServiceBindAddress; private AccessControlList adminAcl; private final RecordFactory recordFactory = @@ -114,10 +116,12 @@ public void serviceInit(Configuration conf) throws Exception { } } - masterServiceAddress = conf.getSocketAddr( + masterServiceBindAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_PORT); + adminAcl = new AccessControlList(conf.get( YarnConfiguration.YARN_ADMIN_ACL, YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); @@ -141,7 +145,7 @@ protected void startServer() throws Exception { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); this.server = (Server) rpc.getServer( - ResourceManagerAdministrationProtocol.class, this, masterServiceAddress, + ResourceManagerAdministrationProtocol.class, this, masterServiceBindAddress, conf, null, conf.getInt(YarnConfiguration.RM_ADMIN_CLIENT_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT)); @@ -170,8 +174,10 @@ protected void startServer() throws Exception { } this.server.start(); - conf.updateConnectAddr(YarnConfiguration.RM_ADMIN_ADDRESS, - server.getListenerAddress()); + conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_ADMIN_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, + server.getListenerAddress()); } protected void stopServer() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index e60add44bb1..eda4c7b658e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -127,6 +127,7 @@ protected void serviceStart() throws Exception { YarnRPC rpc = YarnRPC.create(conf); InetSocketAddress masterServiceAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); @@ -159,7 +160,9 @@ protected void serviceStart() throws Exception { this.server.start(); this.bindAddress = - conf.updateConnectAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, + conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_SCHEDULER_ADDRESS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, server.getListenerAddress()); super.serviceStart(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 974376091b0..71f873c26a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -199,7 +199,9 @@ protected void serviceStart() throws Exception { } this.server.start(); - clientBindAddress = conf.updateConnectAddr(YarnConfiguration.RM_ADDRESS, + clientBindAddress = conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_ADDRESS, + YarnConfiguration.DEFAULT_RM_ADDRESS, server.getListenerAddress()); super.serviceStart(); } @@ -213,7 +215,9 @@ protected void serviceStop() throws Exception { } InetSocketAddress getBindAddress(Configuration conf) { - return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, + return conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 4b5d94875ad..40e346c680a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -155,7 +155,8 @@ public class ResourceManager extends CompositeService implements Recoverable { private AppReportFetcher fetcher = null; protected ResourceTrackerService resourceTracker; - private String webAppAddress; + @VisibleForTesting + protected String webAppAddress; private ConfigurationProvider configurationProvider = null; /** End of Active services */ @@ -230,7 +231,9 @@ protected void serviceInit(Configuration conf) throws Exception { } createAndInitActiveServices(); - webAppAddress = WebAppUtils.getRMWebAppURLWithoutScheme(this.conf); + webAppAddress = WebAppUtils.getWebAppBindURL(this.conf, + YarnConfiguration.RM_BIND_HOST, + WebAppUtils.getRMWebAppURLWithoutScheme(this.conf)); this.rmLoginUGI = UserGroupInformation.getCurrentUser(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index f2a83763bc8..6eebb4d7bff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.api.ResourceTracker; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; @@ -121,6 +122,7 @@ public ResourceTrackerService(RMContext rmContext, @Override protected void serviceInit(Configuration conf) throws Exception { resourceTrackerAddress = conf.getSocketAddr( + YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); @@ -175,9 +177,11 @@ protected void serviceStart() throws Exception { } refreshServiceAcls(conf, RMPolicyProvider.getInstance()); } - + this.server.start(); - conf.updateConnectAddr(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST, + YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, + YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, server.getListenerAddress()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java index 610023b73dc..b8290de5127 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java @@ -380,7 +380,19 @@ public void testHAIDLookup() { } @Test - public void testHAWithRMHostName() { + public void testHAWithRMHostName() throws Exception { + innerTestHAWithRMHostName(false); + configuration.clear(); + setUp(); + innerTestHAWithRMHostName(true); + } + + public void innerTestHAWithRMHostName(boolean includeBindHost) { + //this is run two times, with and without a bind host configured + if (includeBindHost) { + configuration.set(YarnConfiguration.RM_BIND_HOST, "9.9.9.9"); + } + //test if both RM_HOSTBANE_{rm_id} and RM_RPCADDRESS_{rm_id} are set //We should only read rpc addresses from RM_RPCADDRESS_{rm_id} configuration configuration.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, @@ -400,6 +412,15 @@ public void testHAWithRMHostName() { RM2_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM2_NODE_ID))); assertEquals("RPC address not set for " + confKey, RM3_ADDRESS, conf.get(HAUtil.addSuffix(confKey, RM3_NODE_ID))); + if (includeBindHost) { + assertEquals("Web address misconfigured WITH bind-host", + rm.webAppAddress.substring(0, 7), "9.9.9.9"); + } else { + //YarnConfiguration tries to figure out which rm host it's on by binding to it, + //which doesn't happen for any of these fake addresses, so we end up with 0.0.0.0 + assertEquals("Web address misconfigured WITHOUT bind-host", + rm.webAppAddress.substring(0, 7), "0.0.0.0"); + } } } catch (YarnRuntimeException e) { fail("Should not throw any exceptions."); From beebb36595317153b240ab7aa6bc46ead8112560 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 31 Jul 2014 22:45:08 +0000 Subject: [PATCH 092/354] Fix some merge errors that slipped through. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1615013 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/hdfs/protocol/ClientProtocol.java | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index b3b9573d462..effc10d9fe5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1336,28 +1336,11 @@ public List listXAttrs(String src) /** * Remove xattr of a file or directory.Value in xAttr parameter is ignored. -<<<<<<< .working - * Name must be prefixed with user/trusted/security/system/raw. -======= * The name must be prefixed with the namespace followed by ".". For example, * "user.attr". ->>>>>>> .merge-right.r1614550 *

-<<<<<<< .working - * A regular user only can remove xattr of "user" namespace. - * A super user can remove xattr of "user" and "trusted" namespace. - * XAttr of "security" and "system" namespace is only used/exposed - * internally to the FS impl. - * The xattrs of the "raw" namespace are only used/exposed when accessed in - * the /.reserved/raw HDFS directory hierarchy. These attributes can only be - * accessed by the superuser. - *

- * @see - * http://en.wikipedia.org/wiki/Extended_file_attributes -======= * Refer to the HDFS extended attributes user documentation for details. * ->>>>>>> .merge-right.r1614550 * @param src file or directory * @param xAttr XAttr to remove * @throws IOException From b8597e6a10b2e8df1bee4e8ce0c8be345f7e007d Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 1 Aug 2014 01:05:33 +0000 Subject: [PATCH 093/354] HDFS-6685. Balancer should preserve storage type of replicas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615015 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/StorageType.java | 10 + .../hadoop/hdfs/protocolPB/PBHelper.java | 8 +- .../hadoop/hdfs/server/balancer/Balancer.java | 682 ++++++++++-------- .../hdfs/server/balancer/BalancingPolicy.java | 86 ++- .../server/blockmanagement/BlockManager.java | 5 +- .../server/protocol/BlocksWithLocations.java | 25 +- .../apache/hadoop/hdfs/util/EnumCounters.java | 9 +- .../apache/hadoop/hdfs/util/EnumDoubles.java | 128 ++++ .../hadoop-hdfs/src/main/proto/hdfs.proto | 1 + .../hadoop/hdfs/protocolPB/TestPBHelper.java | 4 +- 11 files changed, 619 insertions(+), 341 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 880e209f97e..72268d30554 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -338,6 +338,8 @@ Release 2.6.0 - UNRELEASED HDFS-6441. Add ability to exclude/include specific datanodes while balancing. (Benoy Antony and Yu Li via Arpit Agarwal) + HDFS-6685. Balancer should preserve storage type of replicas. (szetszwo) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java index 408f678d650..3d8133c7ce5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hdfs; +import java.util.Arrays; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -32,4 +35,11 @@ public enum StorageType { SSD; public static final StorageType DEFAULT = DISK; + public static final StorageType[] EMPTY_ARRAY = {}; + + private static final StorageType[] VALUES = values(); + + public static List asList() { + return Arrays.asList(VALUES); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 9ca93a5ae29..7e98a88bf72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -352,15 +352,19 @@ public static BlockWithLocationsProto convert(BlockWithLocations blk) { return BlockWithLocationsProto.newBuilder() .setBlock(convert(blk.getBlock())) .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids())) - .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())).build(); + .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())) + .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes())) + .build(); } public static BlockWithLocations convert(BlockWithLocationsProto b) { final List datanodeUuids = b.getDatanodeUuidsList(); final List storageUuids = b.getStorageUuidsList(); + final List storageTypes = b.getStorageTypesList(); return new BlockWithLocations(convert(b.getBlock()), datanodeUuids.toArray(new String[datanodeUuids.size()]), - storageUuids.toArray(new String[storageUuids.size()])); + storageUuids.toArray(new String[storageUuids.size()]), + convertStorageTypes(storageTypes, storageUuids.size())); } public static BlocksWithLocationsProto convert(BlocksWithLocations blks) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 1ddb3a41993..dad39494861 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -38,6 +38,7 @@ import java.util.Collection; import java.util.Collections; import java.util.Date; +import java.util.EnumMap; import java.util.Formatter; import java.util.HashMap; import java.util.HashSet; @@ -79,6 +80,8 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; @@ -90,6 +93,8 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import com.google.common.base.Preconditions; + /**

The balancer is a tool that balances disk space usage on an HDFS cluster * when some datanodes become full or when new empty nodes join the cluster. * The tool is deployed as an application program that can be run by the @@ -190,7 +195,9 @@ @InterfaceAudience.Private public class Balancer { static final Log LOG = LogFactory.getLog(Balancer.class); - final private static long MAX_BLOCKS_SIZE_TO_FETCH = 2*1024*1024*1024L; //2GB + final private static long GB = 1L << 30; //1GB + final private static long MAX_SIZE_TO_MOVE = 10*GB; + final private static long MAX_BLOCKS_SIZE_TO_FETCH = 2*GB; private static long WIN_WIDTH = 5400*1000L; // 1.5 hour /** The maximum number of concurrent blocks moves for @@ -221,26 +228,22 @@ public class Balancer { Set nodesToBeIncluded; // all data node lists - private final Collection overUtilizedDatanodes - = new LinkedList(); - private final Collection aboveAvgUtilizedDatanodes - = new LinkedList(); - private final Collection belowAvgUtilizedDatanodes - = new LinkedList(); - private final Collection underUtilizedDatanodes - = new LinkedList(); + private final Collection overUtilized = new LinkedList(); + private final Collection aboveAvgUtilized = new LinkedList(); + private final Collection belowAvgUtilized + = new LinkedList(); + private final Collection underUtilized + = new LinkedList(); - private final Collection sources - = new HashSet(); - private final Collection targets - = new HashSet(); + private final Collection sources = new HashSet(); + private final Collection targets + = new HashSet(); private final Map globalBlockList = new HashMap(); private final MovedBlocks movedBlocks = new MovedBlocks(); - /** Map (datanodeUuid -> BalancerDatanodes) */ - private final Map datanodeMap - = new HashMap(); + /** Map (datanodeUuid,storageType -> StorageGroup) */ + private final StorageGroupMap storageGroupMap = new StorageGroupMap(); private NetworkTopology cluster; @@ -248,12 +251,39 @@ public class Balancer { private final ExecutorService dispatcherExecutor; private final int maxConcurrentMovesPerNode; + + private static class StorageGroupMap { + private static String toKey(String datanodeUuid, StorageType storageType) { + return datanodeUuid + ":" + storageType; + } + + private final Map map + = new HashMap(); + + BalancerDatanode.StorageGroup get(String datanodeUuid, StorageType storageType) { + return map.get(toKey(datanodeUuid, storageType)); + } + + void put(BalancerDatanode.StorageGroup g) { + final String key = toKey(g.getDatanode().getDatanodeUuid(), g.storageType); + final BalancerDatanode.StorageGroup existing = map.put(key, g); + Preconditions.checkState(existing == null); + } + + int size() { + return map.size(); + } + + void clear() { + map.clear(); + } + } /* This class keeps track of a scheduled block move */ private class PendingBlockMove { private BalancerBlock block; private Source source; private BalancerDatanode proxySource; - private BalancerDatanode target; + private BalancerDatanode.StorageGroup target; /** constructor */ private PendingBlockMove() { @@ -264,7 +294,7 @@ public String toString() { final Block b = block.getBlock(); return b + " with size=" + b.getNumBytes() + " from " + source.getDisplayName() + " to " + target.getDisplayName() - + " through " + proxySource.getDisplayName(); + + " through " + proxySource.datanode; } /* choose a block & a proxy source for this pendingMove @@ -316,20 +346,20 @@ private boolean chooseProxySource() { final DatanodeInfo targetDN = target.getDatanode(); // if node group is supported, first try add nodes in the same node group if (cluster.isNodeGroupAware()) { - for (BalancerDatanode loc : block.getLocations()) { + for (BalancerDatanode.StorageGroup loc : block.getLocations()) { if (cluster.isOnSameNodeGroup(loc.getDatanode(), targetDN) && addTo(loc)) { return true; } } } // check if there is replica which is on the same rack with the target - for (BalancerDatanode loc : block.getLocations()) { + for (BalancerDatanode.StorageGroup loc : block.getLocations()) { if (cluster.isOnSameRack(loc.getDatanode(), targetDN) && addTo(loc)) { return true; } } // find out a non-busy replica - for (BalancerDatanode loc : block.getLocations()) { + for (BalancerDatanode.StorageGroup loc : block.getLocations()) { if (addTo(loc)) { return true; } @@ -337,8 +367,9 @@ private boolean chooseProxySource() { return false; } - // add a BalancerDatanode as proxy source for specific block movement - private boolean addTo(BalancerDatanode bdn) { + /** add to a proxy source for specific block movement */ + private boolean addTo(BalancerDatanode.StorageGroup g) { + final BalancerDatanode bdn = g.getBalancerDatanode(); if (bdn.addPendingBlock(this)) { proxySource = bdn; return true; @@ -354,7 +385,7 @@ private void dispatch() { DataInputStream in = null; try { sock.connect( - NetUtils.createSocketAddr(target.datanode.getXferAddr()), + NetUtils.createSocketAddr(target.getDatanode().getXferAddr()), HdfsServerConstants.READ_TIMEOUT); /* Unfortunately we don't have a good way to know if the Datanode is * taking a really long time to move a block, OR something has @@ -371,7 +402,7 @@ private void dispatch() { ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock()); Token accessToken = nnc.getAccessToken(eb); IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut, - unbufIn, nnc, accessToken, target.datanode); + unbufIn, nnc, accessToken, target.getDatanode()); unbufOut = saslStreams.out; unbufIn = saslStreams.in; out = new DataOutputStream(new BufferedOutputStream(unbufOut, @@ -391,14 +422,14 @@ private void dispatch() { * gets out of sync with work going on in datanode. */ proxySource.activateDelay(DELAY_AFTER_ERROR); - target.activateDelay(DELAY_AFTER_ERROR); + target.getBalancerDatanode().activateDelay(DELAY_AFTER_ERROR); } finally { IOUtils.closeStream(out); IOUtils.closeStream(in); IOUtils.closeSocket(sock); proxySource.removePendingBlock(this); - target.removePendingBlock(this); + target.getBalancerDatanode().removePendingBlock(this); synchronized (this ) { reset(); @@ -414,7 +445,7 @@ private void sendRequest(DataOutputStream out, ExtendedBlock eb, StorageType storageType, Token accessToken) throws IOException { new Sender(out).replaceBlock(eb, storageType, accessToken, - source.getStorageID(), proxySource.getDatanode()); + source.getDatanode().getDatanodeUuid(), proxySource.datanode); } /* Receive a block copy response from the input stream */ @@ -454,8 +485,9 @@ public void run() { /* A class for keeping track of blocks in the Balancer */ static private class BalancerBlock { private final Block block; // the block - private final List locations - = new ArrayList(3); // its locations + /** The locations of the replicas of the block. */ + private final List locations + = new ArrayList(3); /* Constructor */ private BalancerBlock(Block block) { @@ -468,20 +500,19 @@ private synchronized void clearLocations() { } /* add a location */ - private synchronized void addLocation(BalancerDatanode datanode) { - if (!locations.contains(datanode)) { - locations.add(datanode); + private synchronized void addLocation(BalancerDatanode.StorageGroup g) { + if (!locations.contains(g)) { + locations.add(g); } } - /* Return if the block is located on datanode */ - private synchronized boolean isLocatedOnDatanode( - BalancerDatanode datanode) { - return locations.contains(datanode); + /** @return if the block is located on the given storage group. */ + private synchronized boolean isLocatedOn(BalancerDatanode.StorageGroup g) { + return locations.contains(g); } /* Return its locations */ - private synchronized List getLocations() { + private synchronized List getLocations() { return locations; } @@ -498,37 +529,84 @@ private long getNumBytes() { /* The class represents a desired move of bytes between two nodes * and the target. - * An object of this class is stored in a source node. + * An object of this class is stored in a source. */ - static private class NodeTask { - private final BalancerDatanode datanode; //target node + static private class Task { + private final BalancerDatanode.StorageGroup target; private long size; //bytes scheduled to move /* constructor */ - private NodeTask(BalancerDatanode datanode, long size) { - this.datanode = datanode; + private Task(BalancerDatanode.StorageGroup target, long size) { + this.target = target; this.size = size; } - - /* Get the node */ - private BalancerDatanode getDatanode() { - return datanode; - } - - /* Get the number of bytes that need to be moved */ - private long getSize() { - return size; - } } /* A class that keeps track of a datanode in Balancer */ private static class BalancerDatanode { - final private static long MAX_SIZE_TO_MOVE = 10*1024*1024*1024L; //10GB + + /** A group of storages in a datanode with the same storage type. */ + private class StorageGroup { + final StorageType storageType; + final double utilization; + final long maxSize2Move; + private long scheduledSize = 0L; + + private StorageGroup(StorageType storageType, double utilization, + long maxSize2Move) { + this.storageType = storageType; + this.utilization = utilization; + this.maxSize2Move = maxSize2Move; + } + + BalancerDatanode getBalancerDatanode() { + return BalancerDatanode.this; + } + + DatanodeInfo getDatanode() { + return BalancerDatanode.this.datanode; + } + + /** Decide if still need to move more bytes */ + protected synchronized boolean hasSpaceForScheduling() { + return availableSizeToMove() > 0L; + } + + /** @return the total number of bytes that need to be moved */ + synchronized long availableSizeToMove() { + return maxSize2Move - scheduledSize; + } + + /** increment scheduled size */ + synchronized void incScheduledSize(long size) { + scheduledSize += size; + } + + /** @return scheduled size */ + synchronized long getScheduledSize() { + return scheduledSize; + } + + /** Reset scheduled size to zero. */ + synchronized void resetScheduledSize() { + scheduledSize = 0L; + } + + /** @return the name for display */ + String getDisplayName() { + return datanode + ":" + storageType; + } + + @Override + public String toString() { + return "" + utilization; + } + } + final DatanodeInfo datanode; - final double utilization; - final long maxSize2Move; - private long scheduledSize = 0L; + final EnumMap storageMap + = new EnumMap(StorageType.class); protected long delayUntil = 0L; // blocks being moved but not confirmed yet private final List pendingBlocks; @@ -536,78 +614,38 @@ private static class BalancerDatanode { @Override public String toString() { - return getClass().getSimpleName() + "[" + datanode - + ", utilization=" + utilization + "]"; + return getClass().getSimpleName() + ":" + datanode + ":" + storageMap; } /* Constructor * Depending on avgutil & threshold, calculate maximum bytes to move */ - private BalancerDatanode(DatanodeInfo node, BalancingPolicy policy, double threshold, - int maxConcurrentMoves) { - datanode = node; - utilization = policy.getUtilization(node); - final double avgUtil = policy.getAvgUtilization(); - long maxSizeToMove; - - if (utilization >= avgUtil+threshold - || utilization <= avgUtil-threshold) { - maxSizeToMove = (long)(threshold*datanode.getCapacity()/100); - } else { - maxSizeToMove = - (long)(Math.abs(avgUtil-utilization)*datanode.getCapacity()/100); - } - if (utilization < avgUtil ) { - maxSizeToMove = Math.min(datanode.getRemaining(), maxSizeToMove); - } - this.maxSize2Move = Math.min(MAX_SIZE_TO_MOVE, maxSizeToMove); + private BalancerDatanode(DatanodeStorageReport report, + double threshold, int maxConcurrentMoves) { + this.datanode = report.getDatanodeInfo(); this.maxConcurrentMoves = maxConcurrentMoves; this.pendingBlocks = new ArrayList(maxConcurrentMoves); } - /** Get the datanode */ - protected DatanodeInfo getDatanode() { - return datanode; - } - - /** Get the name of the datanode */ - protected String getDisplayName() { - return datanode.toString(); - } - - /* Get the storage id of the datanode */ - protected String getStorageID() { - return datanode.getDatanodeUuid(); - } - - /** Decide if still need to move more bytes */ - protected synchronized boolean hasSpaceForScheduling() { - return scheduledSize nodeTasks = new ArrayList(2); + private final List tasks = new ArrayList(2); private long blocksToReceive = 0L; /* source blocks point to balancerBlocks in the global list because * we want to keep one copy of a block in balancer and be aware that @@ -673,17 +711,17 @@ public void run() { = new ArrayList(); /* constructor */ - private Source(DatanodeInfo node, BalancingPolicy policy, double threshold, - int maxConcurrentMoves) { - super(node, policy, threshold, maxConcurrentMoves); + private Source(StorageType storageType, double utilization, + long maxSize2Move, BalancerDatanode dn) { + dn.super(storageType, utilization, maxSize2Move); } - /** Add a node task */ - private void addNodeTask(NodeTask task) { - assert (task.datanode != this) : - "Source and target are the same " + datanode; - incScheduledSize(task.getSize()); - nodeTasks.add(task); + /** Add a task */ + private void addTask(Task task) { + Preconditions.checkState(task.target != this, + "Source and target are the same storage group " + getDisplayName()); + incScheduledSize(task.size); + tasks.add(task); } /* Return an iterator to this source's blocks */ @@ -696,8 +734,10 @@ private Iterator getBlockIterator() { * Return the total size of the received blocks in the number of bytes. */ private long getBlockList() throws IOException { - BlockWithLocations[] newBlocks = nnc.namenode.getBlocks(datanode, - Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive)).getBlocks(); + final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive); + final BlockWithLocations[] newBlocks = nnc.namenode.getBlocks( + getDatanode(), size).getBlocks(); + long bytesReceived = 0; for (BlockWithLocations blk : newBlocks) { bytesReceived += blk.getBlock().getNumBytes(); @@ -713,10 +753,13 @@ private long getBlockList() throws IOException { synchronized (block) { // update locations - for (String datanodeUuid : blk.getDatanodeUuids()) { - final BalancerDatanode d = datanodeMap.get(datanodeUuid); - if (d != null) { // not an unknown datanode - block.addLocation(d); + final String[] datanodeUuids = blk.getDatanodeUuids(); + final StorageType[] storageTypes = blk.getStorageTypes(); + for (int i = 0; i < datanodeUuids.length; i++) { + final BalancerDatanode.StorageGroup g = storageGroupMap.get( + datanodeUuids[i], storageTypes[i]); + if (g != null) { // not unknown + block.addLocation(g); } } } @@ -731,8 +774,8 @@ private long getBlockList() throws IOException { /* Decide if the given block is a good candidate to move or not */ private boolean isGoodBlockCandidate(BalancerBlock block) { - for (NodeTask nodeTask : nodeTasks) { - if (Balancer.this.isGoodBlockCandidate(this, nodeTask.datanode, block)) { + for (Task t : tasks) { + if (Balancer.this.isGoodBlockCandidate(this, t.target, block)) { return true; } } @@ -747,20 +790,20 @@ private boolean isGoodBlockCandidate(BalancerBlock block) { * The block should be dispatched immediately after this method is returned. */ private PendingBlockMove chooseNextBlockToMove() { - for ( Iterator tasks=nodeTasks.iterator(); tasks.hasNext(); ) { - NodeTask task = tasks.next(); - BalancerDatanode target = task.getDatanode(); + for (Iterator i = tasks.iterator(); i.hasNext();) { + final Task task = i.next(); + final BalancerDatanode target = task.target.getBalancerDatanode(); PendingBlockMove pendingBlock = new PendingBlockMove(); if (target.addPendingBlock(pendingBlock)) { // target is not busy, so do a tentative block allocation pendingBlock.source = this; - pendingBlock.target = target; + pendingBlock.target = task.target; if ( pendingBlock.chooseBlockAndProxy() ) { long blockSize = pendingBlock.block.getNumBytes(); - decScheduledSize(blockSize); + incScheduledSize(-blockSize); task.size -= blockSize; if (task.size == 0) { - tasks.remove(); + i.remove(); } return pendingBlock; } else { @@ -834,7 +877,7 @@ private void dispatchBlocks() { // in case no blocks can be moved for source node's task, // jump out of while-loop after 5 iterations. if (noPendingBlockIteration >= MAX_NO_PENDING_BLOCK_ITERATIONS) { - setScheduledSize(0); + resetScheduledSize(); } } @@ -901,108 +944,154 @@ private static void checkReplicationPolicyCompatibility(Configuration conf IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT)); } - /* Given a data node set, build a network topology and decide - * over-utilized datanodes, above average utilized datanodes, - * below average utilized datanodes, and underutilized datanodes. - * The input data node set is shuffled before the datanodes - * are put into the over-utilized datanodes, above average utilized - * datanodes, below average utilized datanodes, and - * underutilized datanodes lists. This will add some randomness - * to the node matching later on. - * + + private static long getCapacity(DatanodeStorageReport report, StorageType t) { + long capacity = 0L; + for(StorageReport r : report.getStorageReports()) { + if (r.getStorage().getStorageType() == t) { + capacity += r.getCapacity(); + } + } + return capacity; + } + + private static long getRemaining(DatanodeStorageReport report, StorageType t) { + long remaining = 0L; + for(StorageReport r : report.getStorageReports()) { + if (r.getStorage().getStorageType() == t) { + remaining += r.getRemaining(); + } + } + return remaining; + } + + private boolean shouldIgnore(DatanodeInfo dn) { + //ignore decommissioned nodes + final boolean decommissioned = dn.isDecommissioned(); + //ignore decommissioning nodes + final boolean decommissioning = dn.isDecommissionInProgress(); + // ignore nodes in exclude list + final boolean excluded = Util.shouldBeExcluded(nodesToBeExcluded, dn); + // ignore nodes not in the include list (if include list is not empty) + final boolean notIncluded = !Util.shouldBeIncluded(nodesToBeIncluded, dn); + + if (decommissioned || decommissioning || excluded || notIncluded) { + if (LOG.isTraceEnabled()) { + LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", " + + decommissioning + ", " + excluded + ", " + notIncluded); + } + return true; + } + return false; + } + + /** + * Given a datanode storage set, build a network topology and decide + * over-utilized storages, above average utilized storages, + * below average utilized storages, and underutilized storages. + * The input datanode storage set is shuffled in order to randomize + * to the storage matching later on. + * * @return the total number of bytes that are * needed to move to make the cluster balanced. - * @param datanodes a set of datanodes + * @param reports a set of datanode storage reports */ - private long initNodes(DatanodeInfo[] datanodes) { + private long init(DatanodeStorageReport[] reports) { // compute average utilization - for (DatanodeInfo datanode : datanodes) { - // ignore decommissioning or decommissioned nodes or - // ignore nodes in exclude list - // or nodes not in the include list (if include list is not empty) - if (datanode.isDecommissioned() || datanode.isDecommissionInProgress() || - Util.shouldBeExcluded(nodesToBeExcluded, datanode) || - !Util.shouldBeIncluded(nodesToBeIncluded, datanode)) { - continue; + for (DatanodeStorageReport r : reports) { + if (shouldIgnore(r.getDatanodeInfo())) { + continue; } - policy.accumulateSpaces(datanode); + policy.accumulateSpaces(r); } policy.initAvgUtilization(); - /*create network topology and all data node lists: - * overloaded, above-average, below-average, and underloaded - * we alternates the accessing of the given datanodes array either by - * an increasing order or a decreasing order. - */ + // create network topology and classify utilization collections: + // over-utilized, above-average, below-average and under-utilized. long overLoadedBytes = 0L, underLoadedBytes = 0L; - for (DatanodeInfo datanode : DFSUtil.shuffle(datanodes)) { - // ignore decommissioning or decommissioned nodes or - // ignore nodes in exclude list - // or nodes not in the include list (if include list is not empty) - if (datanode.isDecommissioned() || datanode.isDecommissionInProgress() || - Util.shouldBeExcluded(nodesToBeExcluded, datanode) || - !Util.shouldBeIncluded(nodesToBeIncluded, datanode)) { - if (LOG.isTraceEnabled()) { - LOG.trace("Excluding datanode " + datanode); - } - continue; + for(DatanodeStorageReport r : DFSUtil.shuffle(reports)) { + final DatanodeInfo datanode = r.getDatanodeInfo(); + if (shouldIgnore(datanode)) { + continue; // ignore decommissioning or decommissioned nodes } cluster.add(datanode); - BalancerDatanode datanodeS; - final double avg = policy.getAvgUtilization(); - if (policy.getUtilization(datanode) >= avg) { - datanodeS = new Source(datanode, policy, threshold, maxConcurrentMovesPerNode); - if (isAboveAvgUtilized(datanodeS)) { - this.aboveAvgUtilizedDatanodes.add((Source)datanodeS); - } else { - assert(isOverUtilized(datanodeS)) : - datanodeS.getDisplayName()+ "is not an overUtilized node"; - this.overUtilizedDatanodes.add((Source)datanodeS); - overLoadedBytes += (long)((datanodeS.utilization-avg - -threshold)*datanodeS.datanode.getCapacity()/100.0); + + final BalancerDatanode dn = new BalancerDatanode(r, underLoadedBytes, + maxConcurrentMovesPerNode); + for(StorageType t : StorageType.asList()) { + final Double utilization = policy.getUtilization(r, t); + if (utilization == null) { // datanode does not have such storage type + continue; } - } else { - datanodeS = new BalancerDatanode(datanode, policy, threshold, - maxConcurrentMovesPerNode); - if ( isBelowOrEqualAvgUtilized(datanodeS)) { - this.belowAvgUtilizedDatanodes.add(datanodeS); + + final long capacity = getCapacity(r, t); + final double utilizationDiff = utilization - policy.getAvgUtilization(t); + final double thresholdDiff = Math.abs(utilizationDiff) - threshold; + final long maxSize2Move = computeMaxSize2Move(capacity, + getRemaining(r, t), utilizationDiff, threshold); + + final BalancerDatanode.StorageGroup g; + if (utilizationDiff > 0) { + final Source s = dn.addSource(t, utilization, maxSize2Move, this); + if (thresholdDiff <= 0) { // within threshold + aboveAvgUtilized.add(s); + } else { + overLoadedBytes += precentage2bytes(thresholdDiff, capacity); + overUtilized.add(s); + } + g = s; } else { - assert isUnderUtilized(datanodeS) : "isUnderUtilized(" - + datanodeS.getDisplayName() + ")=" + isUnderUtilized(datanodeS) - + ", utilization=" + datanodeS.utilization; - this.underUtilizedDatanodes.add(datanodeS); - underLoadedBytes += (long)((avg-threshold- - datanodeS.utilization)*datanodeS.datanode.getCapacity()/100.0); + g = dn.addStorageGroup(t, utilization, maxSize2Move); + if (thresholdDiff <= 0) { // within threshold + belowAvgUtilized.add(g); + } else { + underLoadedBytes += precentage2bytes(thresholdDiff, capacity); + underUtilized.add(g); + } } + storageGroupMap.put(g); } - datanodeMap.put(datanode.getDatanodeUuid(), datanodeS); } - //logging - logNodes(); + logUtilizationCollections(); - assert (this.datanodeMap.size() == - overUtilizedDatanodes.size()+underUtilizedDatanodes.size()+ - aboveAvgUtilizedDatanodes.size()+belowAvgUtilizedDatanodes.size()) - : "Mismatched number of datanodes"; + Preconditions.checkState(storageGroupMap.size() == overUtilized.size() + + underUtilized.size() + aboveAvgUtilized.size() + belowAvgUtilized.size(), + "Mismatched number of storage groups"); // return number of bytes to be moved in order to make the cluster balanced return Math.max(overLoadedBytes, underLoadedBytes); } - /* log the over utilized & under utilized nodes */ - private void logNodes() { - logNodes("over-utilized", overUtilizedDatanodes); - if (LOG.isTraceEnabled()) { - logNodes("above-average", aboveAvgUtilizedDatanodes); - logNodes("below-average", belowAvgUtilizedDatanodes); + private static long computeMaxSize2Move(final long capacity, final long remaining, + final double utilizationDiff, final double threshold) { + final double diff = Math.min(threshold, Math.abs(utilizationDiff)); + long maxSizeToMove = precentage2bytes(diff, capacity); + if (utilizationDiff < 0) { + maxSizeToMove = Math.min(remaining, maxSizeToMove); } - logNodes("underutilized", underUtilizedDatanodes); + return Math.min(MAX_SIZE_TO_MOVE, maxSizeToMove); } - private static void logNodes( - String name, Collection nodes) { - LOG.info(nodes.size() + " " + name + ": " + nodes); + private static long precentage2bytes(double precentage, long capacity) { + Preconditions.checkArgument(precentage >= 0, + "precentage = " + precentage + " < 0"); + return (long)(precentage * capacity / 100.0); + } + + /* log the over utilized & under utilized nodes */ + private void logUtilizationCollections() { + logUtilizationCollection("over-utilized", overUtilized); + if (LOG.isTraceEnabled()) { + logUtilizationCollection("above-average", aboveAvgUtilized); + logUtilizationCollection("below-average", belowAvgUtilized); + } + logUtilizationCollection("underutilized", underUtilized); + } + + private static + void logUtilizationCollection(String name, Collection items) { + LOG.info(items.size() + " " + name + ": " + items); } /** A matcher interface for matching nodes. */ @@ -1038,26 +1127,24 @@ public boolean match(NetworkTopology cluster, Node left, Node right) { /** * Decide all pairs and * the number of bytes to move from a source to a target - * Maximum bytes to be moved per node is - * Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). - * Return total number of bytes to move in this iteration + * Maximum bytes to be moved per storage group is + * min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). + * @return total number of bytes to move in this iteration */ - private long chooseNodes() { + private long chooseStorageGroups() { // First, match nodes on the same node group if cluster is node group aware if (cluster.isNodeGroupAware()) { - chooseNodes(SAME_NODE_GROUP); + chooseStorageGroups(SAME_NODE_GROUP); } // Then, match nodes on the same rack - chooseNodes(SAME_RACK); + chooseStorageGroups(SAME_RACK); // At last, match all remaining nodes - chooseNodes(ANY_OTHER); + chooseStorageGroups(ANY_OTHER); - assert (datanodeMap.size() >= sources.size()+targets.size()) - : "Mismatched number of datanodes (" + - datanodeMap.size() + " total, " + - sources.size() + " sources, " + - targets.size() + " targets)"; + Preconditions.checkState(storageGroupMap.size() >= sources.size() + targets.size(), + "Mismatched number of datanodes (" + storageGroupMap.size() + " < " + + sources.size() + " sources, " + targets.size() + " targets)"); long bytesToMove = 0L; for (Source src : sources) { @@ -1067,25 +1154,25 @@ private long chooseNodes() { } /** Decide all pairs according to the matcher. */ - private void chooseNodes(final Matcher matcher) { + private void chooseStorageGroups(final Matcher matcher) { /* first step: match each overUtilized datanode (source) to * one or more underUtilized datanodes (targets). */ - chooseDatanodes(overUtilizedDatanodes, underUtilizedDatanodes, matcher); + chooseStorageGroups(overUtilized, underUtilized, matcher); /* match each remaining overutilized datanode (source) to * below average utilized datanodes (targets). * Note only overutilized datanodes that haven't had that max bytes to move * satisfied in step 1 are selected */ - chooseDatanodes(overUtilizedDatanodes, belowAvgUtilizedDatanodes, matcher); + chooseStorageGroups(overUtilized, belowAvgUtilized, matcher); /* match each remaining underutilized datanode (target) to * above average utilized datanodes (source). * Note only underutilized datanodes that have not had that max bytes to * move satisfied in step 1 are selected. */ - chooseDatanodes(underUtilizedDatanodes, aboveAvgUtilizedDatanodes, matcher); + chooseStorageGroups(underUtilized, aboveAvgUtilized, matcher); } /** @@ -1093,13 +1180,14 @@ private void chooseNodes(final Matcher matcher) { * datanodes or the candidates are source nodes with (utilization > Avg), and * the others are target nodes with (utilization < Avg). */ - private void - chooseDatanodes(Collection datanodes, Collection candidates, + private + void chooseStorageGroups(Collection groups, Collection candidates, Matcher matcher) { - for (Iterator i = datanodes.iterator(); i.hasNext();) { - final D datanode = i.next(); - for(; chooseForOneDatanode(datanode, candidates, matcher); ); - if (!datanode.hasSpaceForScheduling()) { + for(final Iterator i = groups.iterator(); i.hasNext();) { + final G g = i.next(); + for(; choose4One(g, candidates, matcher); ); + if (!g.hasSpaceForScheduling()) { i.remove(); } } @@ -1109,18 +1197,19 @@ private void chooseNodes(final Matcher matcher) { * For the given datanode, choose a candidate and then schedule it. * @return true if a candidate is chosen; false if no candidates is chosen. */ - private boolean chooseForOneDatanode( - BalancerDatanode dn, Collection candidates, Matcher matcher) { + private + boolean choose4One(BalancerDatanode.StorageGroup g, + Collection candidates, Matcher matcher) { final Iterator i = candidates.iterator(); - final C chosen = chooseCandidate(dn, i, matcher); - + final C chosen = chooseCandidate(g, i, matcher); + if (chosen == null) { return false; } - if (dn instanceof Source) { - matchSourceWithTargetToMove((Source)dn, chosen); + if (g instanceof Source) { + matchSourceWithTargetToMove((Source)g, chosen); } else { - matchSourceWithTargetToMove((Source)chosen, dn); + matchSourceWithTargetToMove((Source)chosen, g); } if (!chosen.hasSpaceForScheduling()) { i.remove(); @@ -1128,27 +1217,28 @@ private boolean chooseForOneDatanode( return true; } - private void matchSourceWithTargetToMove( - Source source, BalancerDatanode target) { + private void matchSourceWithTargetToMove(Source source, + BalancerDatanode.StorageGroup target) { long size = Math.min(source.availableSizeToMove(), target.availableSizeToMove()); - NodeTask nodeTask = new NodeTask(target, size); - source.addNodeTask(nodeTask); - target.incScheduledSize(nodeTask.getSize()); + final Task task = new Task(target, size); + source.addTask(task); + target.incScheduledSize(task.size); sources.add(source); targets.add(target); LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from " - +source.datanode.getName() + " to " + target.datanode.getName()); + + source.getDisplayName() + " to " + target.getDisplayName()); } /** Choose a candidate for the given datanode. */ - private - C chooseCandidate(D dn, Iterator candidates, Matcher matcher) { - if (dn.hasSpaceForScheduling()) { + private + C chooseCandidate(G g, Iterator candidates, Matcher matcher) { + if (g.hasSpaceForScheduling()) { for(; candidates.hasNext(); ) { final C c = candidates.next(); if (!c.hasSpaceForScheduling()) { candidates.remove(); - } else if (matcher.match(cluster, dn.getDatanode(), c.getDatanode())) { + } else if (matcher.match(cluster, g.getDatanode(), c.getDatanode())) { return c; } } @@ -1202,9 +1292,10 @@ private void waitForMoveCompletion() { boolean shouldWait; do { shouldWait = false; - for (BalancerDatanode target : targets) { - if (!target.isPendingQEmpty()) { + for (BalancerDatanode.StorageGroup target : targets) { + if (!target.getBalancerDatanode().isPendingQEmpty()) { shouldWait = true; + break; } } if (shouldWait) { @@ -1273,12 +1364,15 @@ synchronized private void cleanup() { * 3. doing the move does not reduce the number of racks that the block has */ private boolean isGoodBlockCandidate(Source source, - BalancerDatanode target, BalancerBlock block) { + BalancerDatanode.StorageGroup target, BalancerBlock block) { + if (source.storageType != target.storageType) { + return false; + } // check if the block is moved or not if (movedBlocks.contains(block)) { - return false; + return false; } - if (block.isLocatedOnDatanode(target)) { + if (block.isLocatedOn(target)) { return false; } if (cluster.isNodeGroupAware() && @@ -1293,8 +1387,8 @@ private boolean isGoodBlockCandidate(Source source, } else { boolean notOnSameRack = true; synchronized (block) { - for (BalancerDatanode loc : block.locations) { - if (cluster.isOnSameRack(loc.datanode, target.datanode)) { + for (BalancerDatanode.StorageGroup loc : block.locations) { + if (cluster.isOnSameRack(loc.getDatanode(), target.getDatanode())) { notOnSameRack = false; break; } @@ -1305,9 +1399,9 @@ private boolean isGoodBlockCandidate(Source source, goodBlock = true; } else { // good if source is on the same rack as on of the replicas - for (BalancerDatanode loc : block.locations) { + for (BalancerDatanode.StorageGroup loc : block.locations) { if (loc != source && - cluster.isOnSameRack(loc.datanode, source.datanode)) { + cluster.isOnSameRack(loc.getDatanode(), source.getDatanode())) { goodBlock = true; break; } @@ -1328,25 +1422,26 @@ private boolean isGoodBlockCandidate(Source source, * @return true if there are any replica (other than source) on the same node * group with target */ - private boolean isOnSameNodeGroupWithReplicas(BalancerDatanode target, + private boolean isOnSameNodeGroupWithReplicas(BalancerDatanode.StorageGroup target, BalancerBlock block, Source source) { - for (BalancerDatanode loc : block.locations) { + final DatanodeInfo targetDn = target.getDatanode(); + for (BalancerDatanode.StorageGroup loc : block.locations) { if (loc != source && - cluster.isOnSameNodeGroup(loc.getDatanode(), target.getDatanode())) { - return true; - } + cluster.isOnSameNodeGroup(loc.getDatanode(), targetDn)) { + return true; } + } return false; } /* reset all fields in a balancer preparing for the next iteration */ private void resetData(Configuration conf) { this.cluster = NetworkTopology.getInstance(conf); - this.overUtilizedDatanodes.clear(); - this.aboveAvgUtilizedDatanodes.clear(); - this.belowAvgUtilizedDatanodes.clear(); - this.underUtilizedDatanodes.clear(); - this.datanodeMap.clear(); + this.overUtilized.clear(); + this.aboveAvgUtilized.clear(); + this.belowAvgUtilized.clear(); + this.underUtilized.clear(); + this.storageGroupMap.clear(); this.sources.clear(); this.targets.clear(); this.policy.reset(); @@ -1366,32 +1461,6 @@ private void cleanGlobalBlockList() { } } } - - /* Return true if the given datanode is overUtilized */ - private boolean isOverUtilized(BalancerDatanode datanode) { - return datanode.utilization > (policy.getAvgUtilization()+threshold); - } - - /* Return true if the given datanode is above or equal to average utilized - * but not overUtilized */ - private boolean isAboveAvgUtilized(BalancerDatanode datanode) { - final double avg = policy.getAvgUtilization(); - return (datanode.utilization <= (avg+threshold)) - && (datanode.utilization >= avg); - } - - /* Return true if the given datanode is underUtilized */ - private boolean isUnderUtilized(BalancerDatanode datanode) { - return datanode.utilization < (policy.getAvgUtilization()-threshold); - } - - /* Return true if the given datanode is below average utilized - * but not underUtilized */ - private boolean isBelowOrEqualAvgUtilized(BalancerDatanode datanode) { - final double avg = policy.getAvgUtilization(); - return (datanode.utilization >= (avg-threshold)) - && (datanode.utilization <= avg); - } // Exit status enum ReturnStatus { @@ -1419,7 +1488,8 @@ private ReturnStatus run(int iteration, Formatter formatter, /* get all live datanodes of a cluster and their disk usage * decide the number of bytes need to be moved */ - final long bytesLeftToMove = initNodes(nnc.client.getDatanodeReport(DatanodeReportType.LIVE)); + final long bytesLeftToMove = init( + nnc.client.getDatanodeStorageReport(DatanodeReportType.LIVE)); if (bytesLeftToMove == 0) { System.out.println("The cluster is balanced. Exiting..."); return ReturnStatus.SUCCESS; @@ -1433,7 +1503,7 @@ private ReturnStatus run(int iteration, Formatter formatter, * in this iteration. Maximum bytes to be moved per node is * Min(1 Band worth of bytes, MAX_SIZE_TO_MOVE). */ - final long bytesToMove = chooseNodes(); + final long bytesToMove = chooseStorageGroups(); if (bytesToMove == 0) { System.out.println("No block can be moved. Exiting..."); return ReturnStatus.NO_MOVE_BLOCK; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java index 3297a250a4e..646abd4ef48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java @@ -18,7 +18,11 @@ package org.apache.hadoop.hdfs.server.balancer; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.StorageType; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.util.EnumCounters; +import org.apache.hadoop.hdfs.util.EnumDoubles; /** * Balancing policy. @@ -28,31 +32,43 @@ */ @InterfaceAudience.Private abstract class BalancingPolicy { - long totalCapacity; - long totalUsedSpace; - private double avgUtilization; + final EnumCounters totalCapacities + = new EnumCounters(StorageType.class); + final EnumCounters totalUsedSpaces + = new EnumCounters(StorageType.class); + final EnumDoubles avgUtilizations + = new EnumDoubles(StorageType.class); void reset() { - totalCapacity = 0L; - totalUsedSpace = 0L; - avgUtilization = 0.0; + totalCapacities.reset(); + totalUsedSpaces.reset(); + avgUtilizations.reset(); } /** Get the policy name. */ abstract String getName(); /** Accumulate used space and capacity. */ - abstract void accumulateSpaces(DatanodeInfo d); + abstract void accumulateSpaces(DatanodeStorageReport r); void initAvgUtilization() { - this.avgUtilization = totalUsedSpace*100.0/totalCapacity; - } - double getAvgUtilization() { - return avgUtilization; + for(StorageType t : StorageType.asList()) { + final long capacity = totalCapacities.get(t); + if (capacity > 0L) { + final double avg = totalUsedSpaces.get(t)*100.0/capacity; + avgUtilizations.set(t, avg); + } + } } - /** Return the utilization of a datanode */ - abstract double getUtilization(DatanodeInfo d); + double getAvgUtilization(StorageType t) { + return avgUtilizations.get(t); + } + + /** @return the utilization of a particular storage type of a datanode; + * or return null if the datanode does not have such storage type. + */ + abstract Double getUtilization(DatanodeStorageReport r, StorageType t); @Override public String toString() { @@ -84,14 +100,25 @@ String getName() { } @Override - void accumulateSpaces(DatanodeInfo d) { - totalCapacity += d.getCapacity(); - totalUsedSpace += d.getDfsUsed(); + void accumulateSpaces(DatanodeStorageReport r) { + for(StorageReport s : r.getStorageReports()) { + final StorageType t = s.getStorage().getStorageType(); + totalCapacities.add(t, s.getCapacity()); + totalUsedSpaces.add(t, s.getDfsUsed()); + } } @Override - double getUtilization(DatanodeInfo d) { - return d.getDfsUsed()*100.0/d.getCapacity(); + Double getUtilization(DatanodeStorageReport r, final StorageType t) { + long capacity = 0L; + long dfsUsed = 0L; + for(StorageReport s : r.getStorageReports()) { + if (s.getStorage().getStorageType() == t) { + capacity += s.getCapacity(); + dfsUsed += s.getDfsUsed(); + } + } + return capacity == 0L? null: dfsUsed*100.0/capacity; } } @@ -108,14 +135,25 @@ String getName() { } @Override - void accumulateSpaces(DatanodeInfo d) { - totalCapacity += d.getCapacity(); - totalUsedSpace += d.getBlockPoolUsed(); + void accumulateSpaces(DatanodeStorageReport r) { + for(StorageReport s : r.getStorageReports()) { + final StorageType t = s.getStorage().getStorageType(); + totalCapacities.add(t, s.getCapacity()); + totalUsedSpaces.add(t, s.getBlockPoolUsed()); + } } @Override - double getUtilization(DatanodeInfo d) { - return d.getBlockPoolUsed()*100.0/d.getCapacity(); + Double getUtilization(DatanodeStorageReport r, final StorageType t) { + long capacity = 0L; + long blockPoolUsed = 0L; + for(StorageReport s : r.getStorageReports()) { + if (s.getStorage().getStorageType() == t) { + capacity += s.getCapacity(); + blockPoolUsed += s.getBlockPoolUsed(); + } + } + return capacity == 0L? null: blockPoolUsed*100.0/capacity; } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 4fe6de3246e..c4837db4d81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2826,12 +2826,15 @@ private long addBlock(Block block, List results) { } else { final String[] datanodeUuids = new String[locations.size()]; final String[] storageIDs = new String[datanodeUuids.length]; + final StorageType[] storageTypes = new StorageType[datanodeUuids.length]; for(int i = 0; i < locations.size(); i++) { final DatanodeStorageInfo s = locations.get(i); datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid(); storageIDs[i] = s.getStorageID(); + storageTypes[i] = s.getStorageType(); } - results.add(new BlockWithLocations(block, datanodeUuids, storageIDs)); + results.add(new BlockWithLocations(block, datanodeUuids, storageIDs, + storageTypes)); return block.getNumBytes(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java index bc446ac7541..c907f3be5b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import java.util.Arrays; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; /** @@ -39,12 +38,15 @@ public static class BlockWithLocations { final Block block; final String[] datanodeUuids; final String[] storageIDs; + final StorageType[] storageTypes; /** constructor */ - public BlockWithLocations(Block block, String[] datanodeUuids, String[] storageIDs) { + public BlockWithLocations(Block block, String[] datanodeUuids, + String[] storageIDs, StorageType[] storageTypes) { this.block = block; this.datanodeUuids = datanodeUuids; this.storageIDs = storageIDs; + this.storageTypes = storageTypes; } /** get the block */ @@ -61,7 +63,12 @@ public String[] getDatanodeUuids() { public String[] getStorageIDs() { return storageIDs; } - + + /** @return the storage types */ + public StorageType[] getStorageTypes() { + return storageTypes; + } + @Override public String toString() { final StringBuilder b = new StringBuilder(); @@ -70,12 +77,18 @@ public String toString() { return b.append("[]").toString(); } - b.append(storageIDs[0]).append('@').append(datanodeUuids[0]); + appendString(0, b.append("[")); for(int i = 1; i < datanodeUuids.length; i++) { - b.append(", ").append(storageIDs[i]).append("@").append(datanodeUuids[i]); + appendString(i, b.append(",")); } return b.append("]").toString(); } + + private StringBuilder appendString(int i, StringBuilder b) { + return b.append("[").append(storageTypes[i]).append("]") + .append(storageIDs[i]) + .append("@").append(datanodeUuids[i]); + } } private final BlockWithLocations[] blocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index e3975f6d68a..8bdea1fd59e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -37,7 +37,7 @@ public class EnumCounters> { /** The class of the enum. */ private final Class enumClass; - /** The counter array, counters[i] corresponds to the enumConstants[i]. */ + /** An array of longs corresponding to the enum type. */ private final long[] counters; /** @@ -75,6 +75,13 @@ public final void set(final EnumCounters that) { } } + /** Reset all counters to zero. */ + public final void reset() { + for(int i = 0; i < counters.length; i++) { + this.counters[i] = 0L; + } + } + /** Add the given value to counter e. */ public final void add(final E e, final long value) { counters[e.ordinal()] += value; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java new file mode 100644 index 00000000000..126070aa016 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.util.Arrays; + +import com.google.common.base.Preconditions; + +/** + * Similar to {@link EnumCounters} except that the value type is double. + * + * @param the enum type + */ +public class EnumDoubles> { + /** The class of the enum. */ + private final Class enumClass; + /** An array of doubles corresponding to the enum type. */ + private final double[] doubles; + + /** + * Construct doubles for the given enum constants. + * @param enumClass the enum class. + */ + public EnumDoubles(final Class enumClass) { + final E[] enumConstants = enumClass.getEnumConstants(); + Preconditions.checkNotNull(enumConstants); + this.enumClass = enumClass; + this.doubles = new double[enumConstants.length]; + } + + /** @return the value corresponding to e. */ + public final double get(final E e) { + return doubles[e.ordinal()]; + } + + /** Negate all values. */ + public final void negation() { + for(int i = 0; i < doubles.length; i++) { + doubles[i] = -doubles[i]; + } + } + + /** Set e to the given value. */ + public final void set(final E e, final double value) { + doubles[e.ordinal()] = value; + } + + /** Set the values of this object to that object. */ + public final void set(final EnumDoubles that) { + for(int i = 0; i < doubles.length; i++) { + this.doubles[i] = that.doubles[i]; + } + } + + /** Reset all values to zero. */ + public final void reset() { + for(int i = 0; i < doubles.length; i++) { + this.doubles[i] = 0.0; + } + } + + /** Add the given value to e. */ + public final void add(final E e, final double value) { + doubles[e.ordinal()] += value; + } + + /** Add the values of that object to this. */ + public final void add(final EnumDoubles that) { + for(int i = 0; i < doubles.length; i++) { + this.doubles[i] += that.doubles[i]; + } + } + + /** Subtract the given value from e. */ + public final void subtract(final E e, final double value) { + doubles[e.ordinal()] -= value; + } + + /** Subtract the values of this object from that object. */ + public final void subtract(final EnumDoubles that) { + for(int i = 0; i < doubles.length; i++) { + this.doubles[i] -= that.doubles[i]; + } + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (obj == null || !(obj instanceof EnumDoubles)) { + return false; + } + final EnumDoubles that = (EnumDoubles)obj; + return this.enumClass == that.enumClass + && Arrays.equals(this.doubles, that.doubles); + } + + @Override + public int hashCode() { + return Arrays.hashCode(doubles); + } + + @Override + public String toString() { + final E[] enumConstants = enumClass.getEnumConstants(); + final StringBuilder b = new StringBuilder(); + for(int i = 0; i < doubles.length; i++) { + final String name = enumConstants[i].name(); + b.append(name).append("=").append(doubles[i]).append(", "); + } + return b.substring(0, b.length() - 2); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 04fcc500e84..32c54b00b3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -404,6 +404,7 @@ message BlockWithLocationsProto { required BlockProto block = 1; // Block repeated string datanodeUuids = 2; // Datanodes with replicas of the block repeated string storageUuids = 3; // Storages with replicas of the block + repeated StorageTypeProto storageTypes = 4; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 440b4f3a6cc..cb85c7deb61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -184,8 +184,10 @@ public void testConvertBlock() { private static BlockWithLocations getBlockWithLocations(int bid) { final String[] datanodeUuids = {"dn1", "dn2", "dn3"}; final String[] storageIDs = {"s1", "s2", "s3"}; + final StorageType[] storageTypes = { + StorageType.DISK, StorageType.DISK, StorageType.DISK}; return new BlockWithLocations(new Block(bid, 0, 1), - datanodeUuids, storageIDs); + datanodeUuids, storageIDs, storageTypes); } private void compare(BlockWithLocations locs1, BlockWithLocations locs2) { From b5b862e3afd0797dc8f940204622e174c1382f5e Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 1 Aug 2014 01:13:42 +0000 Subject: [PATCH 094/354] HDFS-6797. DataNode logs wrong layoutversion during upgrade. (Contributed by Benoy Antony) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615017 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 72268d30554..d6253c2c939 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -406,6 +406,9 @@ Release 2.6.0 - UNRELEASED XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA. (Amir Sanjar via stevel) + HDFS-6797. DataNode logs wrong layoutversion during upgrade. (Benoy Antony + via Arpit Agarwal) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index c8c3a7ec822..dec0d55004d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -312,7 +312,7 @@ void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException { } LOG.info("Upgrading block pool storage directory " + bpSd.getRoot() + ".\n old LV = " + this.getLayoutVersion() + "; old CTime = " - + this.getCTime() + ".\n new LV = " + nsInfo.getLayoutVersion() + + this.getCTime() + ".\n new LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION + "; new CTime = " + nsInfo.getCTime()); // get /previous directory String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath()); From 8bca3c689ae77c39bf1065b6fb78508a24ebf537 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 1 Aug 2014 01:16:06 +0000 Subject: [PATCH 095/354] HDFS-3482. Update CHANGES.txt. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615019 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d6253c2c939..785c82523e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -184,9 +184,6 @@ Trunk (Unreleased) HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn) - HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException - if option is specified without values. ( Madhukara Phatak via umamahesh) - HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049. (acmurthy via eli) @@ -406,6 +403,9 @@ Release 2.6.0 - UNRELEASED XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA. (Amir Sanjar via stevel) + HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException + if option is specified without values. ( Madhukara Phatak via umamahesh) + HDFS-6797. DataNode logs wrong layoutversion during upgrade. (Benoy Antony via Arpit Agarwal) From 1141edc424a2975ab48871108e5da4bc3def49f6 Mon Sep 17 00:00:00 2001 From: Junping Du Date: Fri, 1 Aug 2014 04:16:08 +0000 Subject: [PATCH 096/354] YARN-2051. Fix bug in PBimpls and add more unit tests with reflection. (Contributed by Binglin Chang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615025 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../GetApplicationsRequest.java | 18 + .../yarn/api/records/ResourceOption.java | 2 + .../impl/pb/GetApplicationsRequestPBImpl.java | 10 + .../impl/pb/ApplicationReportPBImpl.java | 1 + .../ApplicationSubmissionContextPBImpl.java | 1 + .../pb/ResourceBlacklistRequestPBImpl.java | 13 +- .../records/impl/pb/ResourceOptionPBImpl.java | 15 + .../yarn/api/records/impl/pb/TokenPBImpl.java | 2 +- .../pb/UpdateNodeResourceRequestPBImpl.java | 16 +- .../hadoop/yarn/api/TestPBImplRecords.java | 895 ++++++++++++++++++ 11 files changed, 972 insertions(+), 4 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 7b7fa0ddbad..3458b2ceba7 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -120,6 +120,9 @@ Release 2.6.0 - UNRELEASED YARN-2354. DistributedShell may allocate more containers than client specified after AM restarts. (Li Lu via jianhe) + YARN-2051. Fix bug in PBimpls and add more unit tests with reflection. + (Binglin Chang via junping_du) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java index 4cc0b70e4af..7fc58d67aef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java @@ -305,6 +305,15 @@ public static GetApplicationsRequest newInstance( @Unstable public abstract LongRange getStartRange(); + /** + * Set the range of start times to filter applications on + * + * @param range + */ + @Private + @Unstable + public abstract void setStartRange(LongRange range); + /** * Set the range of start times to filter applications on * @@ -326,6 +335,15 @@ public abstract void setStartRange(long begin, long end) @Unstable public abstract LongRange getFinishRange(); + /** + * Set the range of finish times to filter applications on + * + * @param range + */ + @Private + @Unstable + public abstract void setFinishRange(LongRange range); + /** * Set the range of finish times to filter applications on * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java index d6393505f81..380f38d74a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java @@ -31,6 +31,8 @@ public static ResourceOption newInstance(Resource resource, int overCommitTimeout){ ResourceOption resourceOption = Records.newRecord(ResourceOption.class); resourceOption.setResource(resource); + resourceOption.setOverCommitTimeout(overCommitTimeout); + resourceOption.build(); return resourceOption; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java index 4fd49bcee8d..a8996f0298a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java @@ -326,6 +326,11 @@ public LongRange getStartRange() { return this.start; } + @Override + public void setStartRange(LongRange range) { + this.start = range; + } + @Override public void setStartRange(long begin, long end) throws IllegalArgumentException { @@ -349,6 +354,11 @@ public LongRange getFinishRange() { return this.finish; } + @Override + public void setFinishRange(LongRange range) { + this.finish = range; + } + @Override public void setFinishRange(long begin, long end) { if (begin > end) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java index 7e19d8fa2f8..dd3e2bc2136 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java @@ -479,6 +479,7 @@ private void mergeLocalToBuilder() { builder.setAmRmToken(convertToProtoFormat(this.amRmToken)); } if (this.applicationTags != null && !this.applicationTags.isEmpty()) { + builder.clearApplicationTags(); builder.addAllApplicationTags(this.applicationTags); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index c4a3a721990..c2f3268073e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -107,6 +107,7 @@ private void mergeLocalToBuilder() { builder.setResource(convertToProtoFormat(this.resource)); } if (this.applicationTags != null && !this.applicationTags.isEmpty()) { + builder.clearApplicationTags(); builder.addAllApplicationTags(this.applicationTags); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java index 743e5d12c3f..45d89488ac2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java @@ -90,7 +90,7 @@ private void addBlacklistAdditionsToProto() { private void addBlacklistRemovalsToProto() { maybeInitBuilder(); - builder.clearBlacklistAdditions(); + builder.clearBlacklistRemovals(); if (this.blacklistRemovals == null) { return; } @@ -159,5 +159,14 @@ public void setBlacklistRemovals(List resourceNames) { public int hashCode() { return getProto().hashCode(); } - + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java index 5440a8491e7..79f479ee99d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java @@ -86,4 +86,19 @@ protected void build() { builder = null; } + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java index 2835cbb65f6..7aeb460d525 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java @@ -48,7 +48,7 @@ public TokenPBImpl(TokenProto proto) { } public synchronized TokenProto getProto() { - mergeLocalToProto(); + mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java index 413e4a00c15..d44599664a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java @@ -162,5 +162,19 @@ public void remove() { }; this.builder.addAllNodeResourceMap(values); } - + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java new file mode 100644 index 00000000000..c6572e9f387 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -0,0 +1,895 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api; +import java.io.IOException; +import java.lang.reflect.Array; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.nio.ByteBuffer; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; +import java.util.Set; + +import org.apache.commons.lang.math.LongRange; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.proto.SecurityProtos.*; +import org.apache.hadoop.yarn.api.protocolrecords.*; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.*; +import org.apache.hadoop.yarn.api.records.*; +import org.apache.hadoop.yarn.api.records.impl.pb.*; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*; +import org.apache.hadoop.yarn.proto.YarnProtos.*; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.*; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.*; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +public class TestPBImplRecords { + static final Log LOG = LogFactory.getLog(TestPBImplRecords.class); + + private static HashMap typeValueCache = new HashMap(); + private static Random rand = new Random(); + private static byte [] bytes = new byte[] {'1', '2', '3', '4'}; + + @SuppressWarnings({"rawtypes", "unchecked"}) + private static Object genTypeValue(Type type) { + Object ret = typeValueCache.get(type); + if (ret != null) { + return ret; + } + // only use positive primitive values + if (type.equals(boolean.class)) { + return rand.nextBoolean(); + } else if (type.equals(byte.class)) { + return bytes[rand.nextInt(4)]; + } else if (type.equals(int.class)) { + return rand.nextInt(1000000); + } else if (type.equals(long.class)) { + return Long.valueOf(rand.nextInt(1000000)); + } else if (type.equals(float.class)) { + return rand.nextFloat(); + } else if (type.equals(double.class)) { + return rand.nextDouble(); + } else if (type.equals(String.class)) { + return String.format("%c%c%c", + 'a' + rand.nextInt(26), + 'a' + rand.nextInt(26), + 'a' + rand.nextInt(26)); + } else if (type instanceof Class) { + Class clazz = (Class)type; + if (clazz.isArray()) { + Class compClass = clazz.getComponentType(); + if (compClass != null) { + ret = Array.newInstance(compClass, 2); + Array.set(ret, 0, genTypeValue(compClass)); + Array.set(ret, 1, genTypeValue(compClass)); + } + } else if (clazz.isEnum()) { + Object [] values = clazz.getEnumConstants(); + ret = values[rand.nextInt(values.length)]; + } else if (clazz.equals(ByteBuffer.class)) { + // return new ByteBuffer every time + // to prevent potential side effects + ByteBuffer buff = ByteBuffer.allocate(4); + rand.nextBytes(buff.array()); + return buff; + } + } else if (type instanceof ParameterizedType) { + ParameterizedType pt = (ParameterizedType)type; + Type rawType = pt.getRawType(); + Type [] params = pt.getActualTypeArguments(); + // only support EnumSet, List, Set, Map + if (rawType.equals(EnumSet.class)) { + if (params[0] instanceof Class) { + Class c = (Class)(params[0]); + return EnumSet.allOf(c); + } + } if (rawType.equals(List.class)) { + ret = Lists.newArrayList(genTypeValue(params[0])); + } else if (rawType.equals(Set.class)) { + ret = Sets.newHashSet(genTypeValue(params[0])); + } else if (rawType.equals(Map.class)) { + Map map = Maps.newHashMap(); + map.put(genTypeValue(params[0]), genTypeValue(params[1])); + ret = map; + } + } + if (ret == null) { + throw new IllegalArgumentException("type " + type + " is not supported"); + } + typeValueCache.put(type, ret); + return ret; + } + + /** + * this method generate record instance by calling newIntance + * using reflection, add register the generated value to typeValueCache + */ + @SuppressWarnings("rawtypes") + private static Object generateByNewInstance(Class clazz) throws Exception { + Object ret = typeValueCache.get(clazz); + if (ret != null) { + return ret; + } + Method newInstance = null; + Type [] paramTypes = new Type[0]; + // get newInstance method with most parameters + for (Method m : clazz.getMethods()) { + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(clazz) && + Modifier.isPublic(mod) && + Modifier.isStatic(mod) && + m.getName().equals("newInstance")) { + Type [] pts = m.getGenericParameterTypes(); + if (newInstance == null + || (pts.length > paramTypes.length)) { + newInstance = m; + paramTypes = pts; + } + } + } + if (newInstance == null) { + throw new IllegalArgumentException("type " + clazz.getName() + + " does not have newInstance method"); + } + Object [] args = new Object[paramTypes.length]; + for (int i=0;i Map getGetSetPairs(Class recordClass) + throws Exception { + Map ret = new HashMap(); + Method [] methods = recordClass.getDeclaredMethods(); + // get all get methods + for (int i = 0; i < methods.length; i++) { + Method m = methods[i]; + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(recordClass) && + Modifier.isPublic(mod) && + (!Modifier.isStatic(mod))) { + String name = m.getName(); + if (name.equals("getProto")) { + continue; + } + if ((name.length() > 3) && name.startsWith("get") && + (m.getParameterTypes().length == 0)) { + String propertyName = name.substring(3); + Type valueType = m.getGenericReturnType(); + GetSetPair p = ret.get(propertyName); + if (p == null) { + p = new GetSetPair(); + p.propertyName = propertyName; + p.type = valueType; + p.getMethod = m; + ret.put(propertyName, p); + } else { + Assert.fail("Multiple get method with same name: " + recordClass + + p.propertyName); + } + } + } + } + // match get methods with set methods + for (int i = 0; i < methods.length; i++) { + Method m = methods[i]; + int mod = m.getModifiers(); + if (m.getDeclaringClass().equals(recordClass) && + Modifier.isPublic(mod) && + (!Modifier.isStatic(mod))) { + String name = m.getName(); + if (name.startsWith("set") && (m.getParameterTypes().length == 1)) { + String propertyName = name.substring(3); + Type valueType = m.getGenericParameterTypes()[0]; + GetSetPair p = ret.get(propertyName); + if (p != null && p.type.equals(valueType)) { + p.setMethod = m; + } + } + } + } + // exclude incomplete get/set pair, and generate test value + Iterator> itr = ret.entrySet().iterator(); + while (itr.hasNext()) { + Entry cur = itr.next(); + GetSetPair gsp = cur.getValue(); + if ((gsp.getMethod == null) || + (gsp.setMethod == null)) { + LOG.info(String.format("Exclude protential property: %s\n", gsp.propertyName)); + itr.remove(); + } else { + LOG.info(String.format("New property: %s type: %s", gsp.toString(), gsp.type)); + gsp.testValue = genTypeValue(gsp.type); + LOG.info(String.format(" testValue: %s\n", gsp.testValue)); + } + } + return ret; + } + + private void validatePBImplRecord(Class recordClass, + Class

protoClass) + throws Exception { + LOG.info(String.format("Validate %s %s\n", recordClass.getName(), + protoClass.getName())); + Constructor emptyConstructor = recordClass.getConstructor(); + Constructor pbConstructor = recordClass.getConstructor(protoClass); + Method getProto = recordClass.getDeclaredMethod("getProto"); + Map getSetPairs = getGetSetPairs(recordClass); + R origRecord = emptyConstructor.newInstance(); + for (GetSetPair gsp : getSetPairs.values()) { + gsp.setMethod.invoke(origRecord, gsp.testValue); + } + Object ret = getProto.invoke(origRecord); + Assert.assertNotNull(recordClass.getName() + "#getProto returns null", ret); + if (!(protoClass.isAssignableFrom(ret.getClass()))) { + Assert.fail("Illegal getProto method return type: " + ret.getClass()); + } + R deserRecord = pbConstructor.newInstance(ret); + Assert.assertEquals("whole " + recordClass + " records should be equal", + origRecord, deserRecord); + for (GetSetPair gsp : getSetPairs.values()) { + Object origValue = gsp.getMethod.invoke(origRecord); + Object deserValue = gsp.getMethod.invoke(deserRecord); + Assert.assertEquals("property " + recordClass.getName() + "#" + + gsp.propertyName + " should be equal", origValue, deserValue); + } + } + + @Test + public void testAllocateRequestPBImpl() throws Exception { + validatePBImplRecord(AllocateRequestPBImpl.class, AllocateRequestProto.class); + } + + @Test + public void testAllocateResponsePBImpl() throws Exception { + validatePBImplRecord(AllocateResponsePBImpl.class, AllocateResponseProto.class); + } + + @Test + public void testCancelDelegationTokenRequestPBImpl() throws Exception { + validatePBImplRecord(CancelDelegationTokenRequestPBImpl.class, + CancelDelegationTokenRequestProto.class); + } + + @Test + public void testCancelDelegationTokenResponsePBImpl() throws Exception { + validatePBImplRecord(CancelDelegationTokenResponsePBImpl.class, + CancelDelegationTokenResponseProto.class); + } + + @Test + public void testFinishApplicationMasterRequestPBImpl() throws Exception { + validatePBImplRecord(FinishApplicationMasterRequestPBImpl.class, + FinishApplicationMasterRequestProto.class); + } + + @Test + public void testFinishApplicationMasterResponsePBImpl() throws Exception { + validatePBImplRecord(FinishApplicationMasterResponsePBImpl.class, + FinishApplicationMasterResponseProto.class); + } + + @Test + public void testGetApplicationAttemptReportRequestPBImpl() throws Exception { + validatePBImplRecord(GetApplicationAttemptReportRequestPBImpl.class, + GetApplicationAttemptReportRequestProto.class); + } + + @Test + public void testGetApplicationAttemptReportResponsePBImpl() throws Exception { + validatePBImplRecord(GetApplicationAttemptReportResponsePBImpl.class, + GetApplicationAttemptReportResponseProto.class); + } + + @Test + public void testGetApplicationAttemptsRequestPBImpl() throws Exception { + validatePBImplRecord(GetApplicationAttemptsRequestPBImpl.class, + GetApplicationAttemptsRequestProto.class); + } + + @Test + public void testGetApplicationAttemptsResponsePBImpl() throws Exception { + validatePBImplRecord(GetApplicationAttemptsResponsePBImpl.class, + GetApplicationAttemptsResponseProto.class); + } + + @Test + public void testGetApplicationReportRequestPBImpl() throws Exception { + validatePBImplRecord(GetApplicationReportRequestPBImpl.class, + GetApplicationReportRequestProto.class); + } + + @Test + public void testGetApplicationReportResponsePBImpl() throws Exception { + validatePBImplRecord(GetApplicationReportResponsePBImpl.class, + GetApplicationReportResponseProto.class); + } + + @Test + public void testGetApplicationsRequestPBImpl() throws Exception { + validatePBImplRecord(GetApplicationsRequestPBImpl.class, + GetApplicationsRequestProto.class); + } + + @Test + public void testGetApplicationsResponsePBImpl() throws Exception { + validatePBImplRecord(GetApplicationsResponsePBImpl.class, + GetApplicationsResponseProto.class); + } + + @Test + public void testGetClusterMetricsRequestPBImpl() throws Exception { + validatePBImplRecord(GetClusterMetricsRequestPBImpl.class, + GetClusterMetricsRequestProto.class); + } + + @Test + public void testGetClusterMetricsResponsePBImpl() throws Exception { + validatePBImplRecord(GetClusterMetricsResponsePBImpl.class, + GetClusterMetricsResponseProto.class); + } + + @Test + public void testGetClusterNodesRequestPBImpl() throws Exception { + validatePBImplRecord(GetClusterNodesRequestPBImpl.class, + GetClusterNodesRequestProto.class); + } + + @Test + public void testGetClusterNodesResponsePBImpl() throws Exception { + validatePBImplRecord(GetClusterNodesResponsePBImpl.class, + GetClusterNodesResponseProto.class); + } + + @Test + public void testGetContainerReportRequestPBImpl() throws Exception { + validatePBImplRecord(GetContainerReportRequestPBImpl.class, + GetContainerReportRequestProto.class); + } + + @Test + public void testGetContainerReportResponsePBImpl() throws Exception { + validatePBImplRecord(GetContainerReportResponsePBImpl.class, + GetContainerReportResponseProto.class); + } + + @Test + public void testGetContainersRequestPBImpl() throws Exception { + validatePBImplRecord(GetContainersRequestPBImpl.class, + GetContainersRequestProto.class); + } + + @Test + public void testGetContainersResponsePBImpl() throws Exception { + validatePBImplRecord(GetContainersResponsePBImpl.class, + GetContainersResponseProto.class); + } + + @Test + public void testGetContainerStatusesRequestPBImpl() throws Exception { + validatePBImplRecord(GetContainerStatusesRequestPBImpl.class, + GetContainerStatusesRequestProto.class); + } + + @Test + public void testGetContainerStatusesResponsePBImpl() throws Exception { + validatePBImplRecord(GetContainerStatusesResponsePBImpl.class, + GetContainerStatusesResponseProto.class); + } + + @Test + public void testGetDelegationTokenRequestPBImpl() throws Exception { + validatePBImplRecord(GetDelegationTokenRequestPBImpl.class, + GetDelegationTokenRequestProto.class); + } + + @Test + public void testGetDelegationTokenResponsePBImpl() throws Exception { + validatePBImplRecord(GetDelegationTokenResponsePBImpl.class, + GetDelegationTokenResponseProto.class); + } + + @Test + public void testGetNewApplicationRequestPBImpl() throws Exception { + validatePBImplRecord(GetNewApplicationRequestPBImpl.class, + GetNewApplicationRequestProto.class); + } + + @Test + public void testGetNewApplicationResponsePBImpl() throws Exception { + validatePBImplRecord(GetNewApplicationResponsePBImpl.class, + GetNewApplicationResponseProto.class); + } + + @Test + public void testGetQueueInfoRequestPBImpl() throws Exception { + validatePBImplRecord(GetQueueInfoRequestPBImpl.class, + GetQueueInfoRequestProto.class); + } + + @Test + public void testGetQueueInfoResponsePBImpl() throws Exception { + validatePBImplRecord(GetQueueInfoResponsePBImpl.class, + GetQueueInfoResponseProto.class); + } + + @Test + public void testGetQueueUserAclsInfoRequestPBImpl() throws Exception { + validatePBImplRecord(GetQueueUserAclsInfoRequestPBImpl.class, + GetQueueUserAclsInfoRequestProto.class); + } + + @Test + public void testGetQueueUserAclsInfoResponsePBImpl() throws Exception { + validatePBImplRecord(GetQueueUserAclsInfoResponsePBImpl.class, + GetQueueUserAclsInfoResponseProto.class); + } + + @Test + public void testKillApplicationRequestPBImpl() throws Exception { + validatePBImplRecord(KillApplicationRequestPBImpl.class, + KillApplicationRequestProto.class); + } + + @Test + public void testKillApplicationResponsePBImpl() throws Exception { + validatePBImplRecord(KillApplicationResponsePBImpl.class, + KillApplicationResponseProto.class); + } + + @Test + public void testMoveApplicationAcrossQueuesRequestPBImpl() throws Exception { + validatePBImplRecord(MoveApplicationAcrossQueuesRequestPBImpl.class, + MoveApplicationAcrossQueuesRequestProto.class); + } + + @Test + public void testMoveApplicationAcrossQueuesResponsePBImpl() throws Exception { + validatePBImplRecord(MoveApplicationAcrossQueuesResponsePBImpl.class, + MoveApplicationAcrossQueuesResponseProto.class); + } + + @Test + public void testRegisterApplicationMasterRequestPBImpl() throws Exception { + validatePBImplRecord(RegisterApplicationMasterRequestPBImpl.class, + RegisterApplicationMasterRequestProto.class); + } + + @Test + public void testRegisterApplicationMasterResponsePBImpl() throws Exception { + validatePBImplRecord(RegisterApplicationMasterResponsePBImpl.class, + RegisterApplicationMasterResponseProto.class); + } + + @Test + public void testRenewDelegationTokenRequestPBImpl() throws Exception { + validatePBImplRecord(RenewDelegationTokenRequestPBImpl.class, + RenewDelegationTokenRequestProto.class); + } + + @Test + public void testRenewDelegationTokenResponsePBImpl() throws Exception { + validatePBImplRecord(RenewDelegationTokenResponsePBImpl.class, + RenewDelegationTokenResponseProto.class); + } + + @Test + public void testStartContainerRequestPBImpl() throws Exception { + validatePBImplRecord(StartContainerRequestPBImpl.class, + StartContainerRequestProto.class); + } + + @Test + public void testStartContainersRequestPBImpl() throws Exception { + validatePBImplRecord(StartContainersRequestPBImpl.class, + StartContainersRequestProto.class); + } + + @Test + public void testStartContainersResponsePBImpl() throws Exception { + validatePBImplRecord(StartContainersResponsePBImpl.class, + StartContainersResponseProto.class); + } + + @Test + public void testStopContainersRequestPBImpl() throws Exception { + validatePBImplRecord(StopContainersRequestPBImpl.class, + StopContainersRequestProto.class); + } + + @Test + public void testStopContainersResponsePBImpl() throws Exception { + validatePBImplRecord(StopContainersResponsePBImpl.class, + StopContainersResponseProto.class); + } + + @Test + public void testSubmitApplicationRequestPBImpl() throws Exception { + validatePBImplRecord(SubmitApplicationRequestPBImpl.class, + SubmitApplicationRequestProto.class); + } + + @Test + public void testSubmitApplicationResponsePBImpl() throws Exception { + validatePBImplRecord(SubmitApplicationResponsePBImpl.class, + SubmitApplicationResponseProto.class); + } + + @Test + @Ignore + // ignore cause ApplicationIdPBImpl is immutable + public void testApplicationAttemptIdPBImpl() throws Exception { + validatePBImplRecord(ApplicationAttemptIdPBImpl.class, + ApplicationAttemptIdProto.class); + } + + @Test + public void testApplicationAttemptReportPBImpl() throws Exception { + validatePBImplRecord(ApplicationAttemptReportPBImpl.class, + ApplicationAttemptReportProto.class); + } + + @Test + @Ignore + // ignore cause ApplicationIdPBImpl is immutable + public void testApplicationIdPBImpl() throws Exception { + validatePBImplRecord(ApplicationIdPBImpl.class, ApplicationIdProto.class); + } + + @Test + public void testApplicationReportPBImpl() throws Exception { + validatePBImplRecord(ApplicationReportPBImpl.class, + ApplicationReportProto.class); + } + + @Test + public void testApplicationResourceUsageReportPBImpl() throws Exception { + validatePBImplRecord(ApplicationResourceUsageReportPBImpl.class, + ApplicationResourceUsageReportProto.class); + } + + @Test + public void testApplicationSubmissionContextPBImpl() throws Exception { + validatePBImplRecord(ApplicationSubmissionContextPBImpl.class, + ApplicationSubmissionContextProto.class); + } + + @Test + @Ignore + // ignore cause ApplicationIdPBImpl is immutable + public void testContainerIdPBImpl() throws Exception { + validatePBImplRecord(ContainerIdPBImpl.class, ContainerIdProto.class); + } + + @Test + public void testContainerLaunchContextPBImpl() throws Exception { + validatePBImplRecord(ContainerLaunchContextPBImpl.class, + ContainerLaunchContextProto.class); + } + + @Test + public void testContainerPBImpl() throws Exception { + validatePBImplRecord(ContainerPBImpl.class, ContainerProto.class); + } + + @Test + public void testContainerReportPBImpl() throws Exception { + validatePBImplRecord(ContainerReportPBImpl.class, ContainerReportProto.class); + } + + @Test + public void testContainerResourceDecreasePBImpl() throws Exception { + validatePBImplRecord(ContainerResourceDecreasePBImpl.class, + ContainerResourceDecreaseProto.class); + } + + @Test + public void testContainerResourceIncreasePBImpl() throws Exception { + validatePBImplRecord(ContainerResourceIncreasePBImpl.class, + ContainerResourceIncreaseProto.class); + } + + @Test + public void testContainerResourceIncreaseRequestPBImpl() throws Exception { + validatePBImplRecord(ContainerResourceIncreaseRequestPBImpl.class, + ContainerResourceIncreaseRequestProto.class); + } + + @Test + public void testContainerStatusPBImpl() throws Exception { + validatePBImplRecord(ContainerStatusPBImpl.class, ContainerStatusProto.class); + } + + @Test + public void testLocalResourcePBImpl() throws Exception { + validatePBImplRecord(LocalResourcePBImpl.class, LocalResourceProto.class); + } + + @Test + public void testNMTokenPBImpl() throws Exception { + validatePBImplRecord(NMTokenPBImpl.class, NMTokenProto.class); + } + + @Test + @Ignore + // ignore cause ApplicationIdPBImpl is immutable + public void testNodeIdPBImpl() throws Exception { + validatePBImplRecord(NodeIdPBImpl.class, NodeIdProto.class); + } + + @Test + public void testNodeReportPBImpl() throws Exception { + validatePBImplRecord(NodeReportPBImpl.class, NodeReportProto.class); + } + + @Test + public void testPreemptionContainerPBImpl() throws Exception { + validatePBImplRecord(PreemptionContainerPBImpl.class, + PreemptionContainerProto.class); + } + + @Test + public void testPreemptionContractPBImpl() throws Exception { + validatePBImplRecord(PreemptionContractPBImpl.class, + PreemptionContractProto.class); + } + + @Test + public void testPreemptionMessagePBImpl() throws Exception { + validatePBImplRecord(PreemptionMessagePBImpl.class, + PreemptionMessageProto.class); + } + + @Test + public void testPreemptionResourceRequestPBImpl() throws Exception { + validatePBImplRecord(PreemptionResourceRequestPBImpl.class, + PreemptionResourceRequestProto.class); + } + + @Test + public void testPriorityPBImpl() throws Exception { + validatePBImplRecord(PriorityPBImpl.class, PriorityProto.class); + } + + @Test + public void testQueueInfoPBImpl() throws Exception { + validatePBImplRecord(QueueInfoPBImpl.class, QueueInfoProto.class); + } + + @Test + public void testQueueUserACLInfoPBImpl() throws Exception { + validatePBImplRecord(QueueUserACLInfoPBImpl.class, + QueueUserACLInfoProto.class); + } + + @Test + public void testResourceBlacklistRequestPBImpl() throws Exception { + validatePBImplRecord(ResourceBlacklistRequestPBImpl.class, + ResourceBlacklistRequestProto.class); + } + + @Test + @Ignore + // ignore as ResourceOptionPBImpl is immutable + public void testResourceOptionPBImpl() throws Exception { + validatePBImplRecord(ResourceOptionPBImpl.class, ResourceOptionProto.class); + } + + @Test + public void testResourcePBImpl() throws Exception { + validatePBImplRecord(ResourcePBImpl.class, ResourceProto.class); + } + + @Test + public void testResourceRequestPBImpl() throws Exception { + validatePBImplRecord(ResourceRequestPBImpl.class, ResourceRequestProto.class); + } + + @Test + public void testSerializedExceptionPBImpl() throws Exception { + validatePBImplRecord(SerializedExceptionPBImpl.class, + SerializedExceptionProto.class); + } + + @Test + public void testStrictPreemptionContractPBImpl() throws Exception { + validatePBImplRecord(StrictPreemptionContractPBImpl.class, + StrictPreemptionContractProto.class); + } + + @Test + public void testTokenPBImpl() throws Exception { + validatePBImplRecord(TokenPBImpl.class, TokenProto.class); + } + + @Test + public void testURLPBImpl() throws Exception { + validatePBImplRecord(URLPBImpl.class, URLProto.class); + } + + @Test + public void testYarnClusterMetricsPBImpl() throws Exception { + validatePBImplRecord(YarnClusterMetricsPBImpl.class, + YarnClusterMetricsProto.class); + } + + @Test + public void testRefreshAdminAclsRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshAdminAclsRequestPBImpl.class, + RefreshAdminAclsRequestProto.class); + } + + @Test + public void testRefreshAdminAclsResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshAdminAclsResponsePBImpl.class, + RefreshAdminAclsResponseProto.class); + } + + @Test + public void testRefreshNodesRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshNodesRequestPBImpl.class, + RefreshNodesRequestProto.class); + } + + @Test + public void testRefreshNodesResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshNodesResponsePBImpl.class, + RefreshNodesResponseProto.class); + } + + @Test + public void testRefreshQueuesRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshQueuesRequestPBImpl.class, + RefreshQueuesRequestProto.class); + } + + @Test + public void testRefreshQueuesResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshQueuesResponsePBImpl.class, + RefreshQueuesResponseProto.class); + } + + @Test + public void testRefreshServiceAclsRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshServiceAclsRequestPBImpl.class, + RefreshServiceAclsRequestProto.class); + } + + @Test + public void testRefreshServiceAclsResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshServiceAclsResponsePBImpl.class, + RefreshServiceAclsResponseProto.class); + } + + @Test + public void testRefreshSuperUserGroupsConfigurationRequestPBImpl() + throws Exception { + validatePBImplRecord(RefreshSuperUserGroupsConfigurationRequestPBImpl.class, + RefreshSuperUserGroupsConfigurationRequestProto.class); + } + + @Test + public void testRefreshSuperUserGroupsConfigurationResponsePBImpl() + throws Exception { + validatePBImplRecord(RefreshSuperUserGroupsConfigurationResponsePBImpl.class, + RefreshSuperUserGroupsConfigurationResponseProto.class); + } + + @Test + public void testRefreshUserToGroupsMappingsRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshUserToGroupsMappingsRequestPBImpl.class, + RefreshUserToGroupsMappingsRequestProto.class); + } + + @Test + public void testRefreshUserToGroupsMappingsResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshUserToGroupsMappingsResponsePBImpl.class, + RefreshUserToGroupsMappingsResponseProto.class); + } + + @Test + public void testUpdateNodeResourceRequestPBImpl() throws Exception { + validatePBImplRecord(UpdateNodeResourceRequestPBImpl.class, + UpdateNodeResourceRequestProto.class); + } + + @Test + public void testUpdateNodeResourceResponsePBImpl() throws Exception { + validatePBImplRecord(UpdateNodeResourceResponsePBImpl.class, + UpdateNodeResourceResponseProto.class); + } +} From 397c88236233ad037234315dbb1f96a8948ea13d Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 1 Aug 2014 07:12:39 +0000 Subject: [PATCH 097/354] HDFS-6798. Add test case for incorrect data node condition during balancing. (Contributed by Benoy Antony) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615044 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/balancer/TestBalancer.java | 65 +++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 785c82523e6..daf4fd31ed3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -337,6 +337,9 @@ Release 2.6.0 - UNRELEASED HDFS-6685. Balancer should preserve storage type of replicas. (szetszwo) + HDFS-6798. Add test case for incorrect data node condition during + balancing. (Benoy Antony via Arpit Agarwal) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index e9c86dfe3f0..39abdc50fc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -658,6 +658,71 @@ public void integrationTest(Configuration conf) throws Exception { oneNodeTest(conf, false); } + /* we first start a cluster and fill the cluster up to a certain size. + * then redistribute blocks according the required distribution. + * Then we start an empty datanode. + * Afterwards a balancer is run to balance the cluster. + * A partially filled datanode is excluded during balancing. + * This triggers a situation where one of the block's location is unknown. + */ + @Test(timeout=100000) + public void testUnknownDatanode() throws Exception { + Configuration conf = new HdfsConfiguration(); + initConf(conf); + long distribution[] = new long[] {50*CAPACITY/100, 70*CAPACITY/100, 0*CAPACITY/100}; + long capacities[] = new long[]{CAPACITY, CAPACITY, CAPACITY}; + String racks[] = new String[] {RACK0, RACK1, RACK1}; + + int numDatanodes = distribution.length; + if (capacities.length != numDatanodes || racks.length != numDatanodes) { + throw new IllegalArgumentException("Array length is not the same"); + } + + // calculate total space that need to be filled + final long totalUsedSpace = sum(distribution); + + // fill the cluster + ExtendedBlock[] blocks = generateBlocks(conf, totalUsedSpace, + (short) numDatanodes); + + // redistribute blocks + Block[][] blocksDN = distributeBlocks( + blocks, (short)(numDatanodes-1), distribution); + + // restart the cluster: do NOT format the cluster + conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .format(false) + .racks(racks) + .simulatedCapacities(capacities) + .build(); + try { + cluster.waitActive(); + client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), + ClientProtocol.class).getProxy(); + + for(int i = 0; i < 3; i++) { + cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null); + } + + cluster.startDataNodes(conf, 1, true, null, + new String[]{RACK0}, null,new long[]{CAPACITY}); + cluster.triggerHeartbeats(); + + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + Set datanodes = new HashSet(); + datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName()); + Balancer.Parameters p = new Balancer.Parameters( + Balancer.Parameters.DEFAULT.policy, + Balancer.Parameters.DEFAULT.threshold, + datanodes, Balancer.Parameters.DEFAULT.nodesToBeIncluded); + final int r = Balancer.run(namenodes, p, conf); + assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); + } finally { + cluster.shutdown(); + } + } + /** * Test parse method in Balancer#Cli class with threshold value out of * boundaries. From 7e12b1912f8cdbe6d88ac0b8eb71d7c4dc1bf78e Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 1 Aug 2014 14:12:35 +0000 Subject: [PATCH 098/354] HDFS-6796. Improve the argument check during balancer command line parsing. Contributed by Benoy Antony git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615107 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/server/balancer/Balancer.java | 33 +++++++++++-------- .../hdfs/server/balancer/TestBalancer.java | 31 ++++++++++++++++- 3 files changed, 53 insertions(+), 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index daf4fd31ed3..75833efbb1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -340,6 +340,9 @@ Release 2.6.0 - UNRELEASED HDFS-6798. Add test case for incorrect data node condition during balancing. (Benoy Antony via Arpit Agarwal) + HDFS-6796. Improve the argument check during balancer command line parsing. + (Benoy Antony via szetszwo) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index dad39494861..e5ff544ee75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -1761,9 +1761,9 @@ static Parameters parse(String[] args) { if (args != null) { try { for(int i = 0; i < args.length; i++) { - checkArgument(args.length >= 2, "args = " + Arrays.toString(args)); if ("-threshold".equalsIgnoreCase(args[i])) { - i++; + checkArgument(++i < args.length, + "Threshold value is missing: args = " + Arrays.toString(args)); try { threshold = Double.parseDouble(args[i]); if (threshold < 1 || threshold > 100) { @@ -1778,7 +1778,8 @@ static Parameters parse(String[] args) { throw e; } } else if ("-policy".equalsIgnoreCase(args[i])) { - i++; + checkArgument(++i < args.length, + "Policy value is missing: args = " + Arrays.toString(args)); try { policy = BalancingPolicy.parse(args[i]); } catch(IllegalArgumentException e) { @@ -1786,16 +1787,26 @@ static Parameters parse(String[] args) { throw e; } } else if ("-exclude".equalsIgnoreCase(args[i])) { - i++; + checkArgument(++i < args.length, + "List of nodes to exclude | -f is missing: args = " + + Arrays.toString(args)); if ("-f".equalsIgnoreCase(args[i])) { - nodesTobeExcluded = Util.getHostListFromFile(args[++i]); + checkArgument(++i < args.length, + "File containing nodes to exclude is not specified: args = " + + Arrays.toString(args)); + nodesTobeExcluded = Util.getHostListFromFile(args[i]); } else { nodesTobeExcluded = Util.parseHostList(args[i]); } } else if ("-include".equalsIgnoreCase(args[i])) { - i++; + checkArgument(++i < args.length, + "List of nodes to include | -f is missing: args = " + + Arrays.toString(args)); if ("-f".equalsIgnoreCase(args[i])) { - nodesTobeIncluded = Util.getHostListFromFile(args[++i]); + checkArgument(++i < args.length, + "File containing nodes to include is not specified: args = " + + Arrays.toString(args)); + nodesTobeIncluded = Util.getHostListFromFile(args[i]); } else { nodesTobeIncluded = Util.parseHostList(args[i]); } @@ -1804,12 +1815,8 @@ static Parameters parse(String[] args) { + Arrays.toString(args)); } } - if (!nodesTobeExcluded.isEmpty() && !nodesTobeIncluded.isEmpty()) { - System.err.println( - "-exclude and -include options cannot be specified together."); - throw new IllegalArgumentException( - "-exclude and -include options cannot be specified together."); - } + checkArgument(nodesTobeExcluded.isEmpty() || nodesTobeIncluded.isEmpty(), + "-exclude and -include options cannot be specified together."); } catch(RuntimeException e) { printUsage(System.err); throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 39abdc50fc9..5da3cd177a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -854,13 +854,42 @@ public void testBalancerCliParseWithWrongParams() { } catch (IllegalArgumentException e) { } - parameters = new String[] { "-threshold 1 -policy" }; + parameters = new String[] {"-threshold", "1", "-policy"}; try { Balancer.Cli.parse(parameters); fail(reason); } catch (IllegalArgumentException e) { } + parameters = new String[] {"-threshold", "1", "-include"}; + try { + Balancer.Cli.parse(parameters); + fail(reason); + } catch (IllegalArgumentException e) { + + } + parameters = new String[] {"-threshold", "1", "-exclude"}; + try { + Balancer.Cli.parse(parameters); + fail(reason); + } catch (IllegalArgumentException e) { + + } + parameters = new String[] {"-include", "-f"}; + try { + Balancer.Cli.parse(parameters); + fail(reason); + } catch (IllegalArgumentException e) { + + } + parameters = new String[] {"-exclude", "-f"}; + try { + Balancer.Cli.parse(parameters); + fail(reason); + } catch (IllegalArgumentException e) { + + } + parameters = new String[] {"-include", "testnode1", "-exclude", "testnode2"}; try { Balancer.Cli.parse(parameters); From 45db4d204b796eee6dd0e39d3cc94b70c47028d4 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 1 Aug 2014 16:58:06 +0000 Subject: [PATCH 099/354] HDFS-6794. Update BlockManager methods to use DatanodeStorageInfo where possible. (Arpit Agarwal) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615169 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockManager.java | 159 ++++++++++-------- .../blockmanagement/DatanodeStorageInfo.java | 2 +- .../PendingDataNodeMessages.java | 30 ++-- .../hdfs/server/namenode/FSNamesystem.java | 9 +- .../server/namenode/NameNodeRpcServer.java | 4 +- .../blockmanagement/TestBlockManager.java | 10 +- .../TestDatanodeDescriptor.java | 6 +- .../TestPendingDataNodeMessages.java | 7 +- .../TestReplicationPolicy.java | 5 +- 10 files changed, 129 insertions(+), 106 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 75833efbb1b..9ae0002260e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -343,6 +343,9 @@ Release 2.6.0 - UNRELEASED HDFS-6796. Improve the argument check during balancer command line parsing. (Benoy Antony via szetszwo) + HDFS-6794. Update BlockManager methods to use DatanodeStorageInfo + where possible (Arpit Agarwal) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index c4837db4d81..41118183658 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1079,6 +1079,7 @@ private void addToInvalidates(Block b) { * Mark the block belonging to datanode as corrupt * @param blk Block to be marked as corrupt * @param dn Datanode which holds the corrupt replica + * @param storageID if known, null otherwise. * @param reason a textual reason why the block should be marked corrupt, * for logging purposes */ @@ -1095,19 +1096,29 @@ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, + blk + " not found"); return; } - markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, - blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED), - dn, storageID); - } - private void markBlockAsCorrupt(BlockToMarkCorrupt b, - DatanodeInfo dn, String storageID) throws IOException { DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { - throw new IOException("Cannot mark " + b + throw new IOException("Cannot mark " + blk + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid() + ") does not exist"); } + + markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, + blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED), + storageID == null ? null : node.getStorageInfo(storageID), + node); + } + + /** + * + * @param b + * @param storageInfo storage that contains the block, if known. null otherwise. + * @throws IOException + */ + private void markBlockAsCorrupt(BlockToMarkCorrupt b, + DatanodeStorageInfo storageInfo, + DatanodeDescriptor node) throws IOException { BlockCollection bc = b.corrupted.getBlockCollection(); if (bc == null) { @@ -1118,7 +1129,9 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, } // Add replica to the data-node if it is not already there - node.addBlock(storageID, b.stored); + if (storageInfo != null) { + storageInfo.addBlock(b.stored); + } // Add this replica to corruptReplicas Map corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason, @@ -1457,7 +1470,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { * @throws IOException * if the number of targets < minimum replication. * @see BlockPlacementPolicy#chooseTarget(String, int, Node, - * List, boolean, Set, long) + * List, boolean, Set, long, StorageType) */ public DatanodeStorageInfo[] chooseTarget(final String src, final int numOfReplicas, final DatanodeDescriptor client, @@ -1694,7 +1707,7 @@ public String toString() { * @throws IOException */ public boolean processReport(final DatanodeID nodeID, - final DatanodeStorage storage, final String poolId, + final DatanodeStorage storage, final BlockListAsLongs newReport) throws IOException { namesystem.writeLock(); final long startTime = Time.now(); //after acquiring write lock @@ -1726,9 +1739,9 @@ public boolean processReport(final DatanodeID nodeID, if (storageInfo.numBlocks() == 0) { // The first block report can be processed a lot more efficiently than // ordinary block reports. This shortens restart times. - processFirstBlockReport(node, storage.getStorageID(), newReport); + processFirstBlockReport(storageInfo, newReport); } else { - processReport(node, storage, newReport); + processReport(storageInfo, newReport); } // Now that we have an up-to-date block report, we know that any @@ -1790,9 +1803,8 @@ private void rescanPostponedMisreplicatedBlocks() { } } - private void processReport(final DatanodeDescriptor node, - final DatanodeStorage storage, - final BlockListAsLongs report) throws IOException { + private void processReport(final DatanodeStorageInfo storageInfo, + final BlockListAsLongs report) throws IOException { // Normal case: // Modify the (block-->datanode) map, according to the difference // between the old and new block report. @@ -1802,19 +1814,20 @@ private void processReport(final DatanodeDescriptor node, Collection toInvalidate = new LinkedList(); Collection toCorrupt = new LinkedList(); Collection toUC = new LinkedList(); - reportDiff(node, storage, report, + reportDiff(storageInfo, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); - + + DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Process the blocks on each queue for (StatefulBlockInfo b : toUC) { - addStoredBlockUnderConstruction(b, node, storage.getStorageID()); + addStoredBlockUnderConstruction(b, storageInfo); } for (Block b : toRemove) { removeStoredBlock(b, node); } int numBlocksLogged = 0; for (BlockInfo b : toAdd) { - addStoredBlock(b, node, storage.getStorageID(), null, numBlocksLogged < maxNumBlocksToLog); + addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { @@ -1828,7 +1841,7 @@ private void processReport(final DatanodeDescriptor node, addToInvalidates(b, node); } for (BlockToMarkCorrupt b : toCorrupt) { - markBlockAsCorrupt(b, node, storage.getStorageID()); + markBlockAsCorrupt(b, storageInfo, node); } } @@ -1839,16 +1852,16 @@ private void processReport(final DatanodeDescriptor node, * a toRemove list (since there won't be any). It also silently discards * any invalid blocks, thereby deferring their processing until * the next block report. - * @param node - DatanodeDescriptor of the node that sent the report + * @param storageInfo - DatanodeStorageInfo that sent the report * @param report - the initial block report, to be processed * @throws IOException */ - private void processFirstBlockReport(final DatanodeDescriptor node, - final String storageID, + private void processFirstBlockReport( + final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { if (report == null) return; assert (namesystem.hasWriteLock()); - assert (node.getStorageInfo(storageID).numBlocks() == 0); + assert (storageInfo.numBlocks() == 0); BlockReportIterator itBR = report.getBlockReportIterator(); while(itBR.hasNext()) { @@ -1857,7 +1870,7 @@ private void processFirstBlockReport(final DatanodeDescriptor node, if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(iblk)) { - queueReportedBlock(node, storageID, iblk, reportedState, + queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); continue; } @@ -1869,15 +1882,16 @@ private void processFirstBlockReport(final DatanodeDescriptor node, // If block is corrupt, mark it and continue to next block. BlockUCState ucState = storedBlock.getBlockUCState(); BlockToMarkCorrupt c = checkReplicaCorrupt( - iblk, reportedState, storedBlock, ucState, node); + iblk, reportedState, storedBlock, ucState, + storageInfo.getDatanodeDescriptor()); if (c != null) { if (shouldPostponeBlocksFromFuture) { // In the Standby, we may receive a block report for a file that we // just have an out-of-date gen-stamp or state for, for example. - queueReportedBlock(node, storageID, iblk, reportedState, + queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { - markBlockAsCorrupt(c, node, storageID); + markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor()); } continue; } @@ -1885,7 +1899,7 @@ private void processFirstBlockReport(final DatanodeDescriptor node, // If block is under construction, add this replica to its list if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent( - node.getStorageInfo(storageID), iblk, reportedState); + storageInfo, iblk, reportedState); // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 @@ -1898,12 +1912,12 @@ private void processFirstBlockReport(final DatanodeDescriptor node, } //add replica if appropriate if (reportedState == ReplicaState.FINALIZED) { - addStoredBlockImmediate(storedBlock, node, storageID); + addStoredBlockImmediate(storedBlock, storageInfo); } } } - private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage, + private void reportDiff(DatanodeStorageInfo storageInfo, BlockListAsLongs newReport, Collection toAdd, // add to DatanodeDescriptor Collection toRemove, // remove from DatanodeDescriptor @@ -1911,8 +1925,6 @@ private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage, Collection toCorrupt, // add to corrupt replicas list Collection toUC) { // add to under-construction list - final DatanodeStorageInfo storageInfo = dn.getStorageInfo(storage.getStorageID()); - // place a delimiter in the list which separates blocks // that have been reported from those that have not BlockInfo delimiter = new BlockInfo(new Block(), 1); @@ -1929,7 +1941,7 @@ private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage, while(itBR.hasNext()) { Block iblk = itBR.next(); ReplicaState iState = itBR.getCurrentReplicaState(); - BlockInfo storedBlock = processReportedBlock(dn, storage.getStorageID(), + BlockInfo storedBlock = processReportedBlock(storageInfo, iblk, iState, toAdd, toInvalidate, toCorrupt, toUC); // move block to the head of the list @@ -1966,7 +1978,7 @@ private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage, * BlockInfoUnderConstruction's list of replicas. * * - * @param dn descriptor for the datanode that made the report + * @param storageInfo DatanodeStorageInfo that sent the report. * @param block reported block replica * @param reportedState reported replica state * @param toAdd add to DatanodeDescriptor @@ -1978,14 +1990,16 @@ private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage, * @return the up-to-date stored block, if it should be kept. * Otherwise, null. */ - private BlockInfo processReportedBlock(final DatanodeDescriptor dn, - final String storageID, + private BlockInfo processReportedBlock( + final DatanodeStorageInfo storageInfo, final Block block, final ReplicaState reportedState, final Collection toAdd, final Collection toInvalidate, final Collection toCorrupt, final Collection toUC) { + DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor(); + if(LOG.isDebugEnabled()) { LOG.debug("Reported block " + block + " on " + dn + " size " + block.getNumBytes() @@ -1994,7 +2008,7 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn, if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(block)) { - queueReportedBlock(dn, storageID, block, reportedState, + queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); return null; } @@ -2034,7 +2048,7 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn, // TODO: Pretty confident this should be s/storedBlock/block below, // since we should be postponing the info of the reported block, not // the stored block. See HDFS-6289 for more context. - queueReportedBlock(dn, storageID, storedBlock, reportedState, + queueReportedBlock(storageInfo, storedBlock, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { toCorrupt.add(c); @@ -2063,17 +2077,17 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn, * standby node. @see PendingDataNodeMessages. * @param reason a textual reason to report in the debug logs */ - private void queueReportedBlock(DatanodeDescriptor dn, String storageID, Block block, + private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, String reason) { assert shouldPostponeBlocksFromFuture; if (LOG.isDebugEnabled()) { LOG.debug("Queueing reported block " + block + " in state " + reportedState + - " from datanode " + dn + " for later processing " + - "because " + reason + "."); + " from datanode " + storageInfo.getDatanodeDescriptor() + + " for later processing because " + reason + "."); } - pendingDNMessages.enqueueReportedBlock(dn, storageID, block, reportedState); + pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState); } /** @@ -2096,7 +2110,7 @@ private void processQueuedMessages(Iterable rbis) if (LOG.isDebugEnabled()) { LOG.debug("Processing previouly queued message " + rbi); } - processAndHandleReportedBlock(rbi.getNode(), rbi.getStorageID(), + processAndHandleReportedBlock(rbi.getStorageInfo(), rbi.getBlock(), rbi.getReportedState(), null); } } @@ -2216,19 +2230,20 @@ private boolean isBlockUnderConstruction(BlockInfo storedBlock, } void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, - DatanodeDescriptor node, String storageID) throws IOException { + DatanodeStorageInfo storageInfo) throws IOException { BlockInfoUnderConstruction block = ucBlock.storedBlock; - block.addReplicaIfNotPresent(node.getStorageInfo(storageID), - ucBlock.reportedBlock, ucBlock.reportedState); + block.addReplicaIfNotPresent( + storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); - if (ucBlock.reportedState == ReplicaState.FINALIZED && block.findDatanode(node) < 0) { - addStoredBlock(block, node, storageID, null, true); + if (ucBlock.reportedState == ReplicaState.FINALIZED && + block.findDatanode(storageInfo.getDatanodeDescriptor()) < 0) { + addStoredBlock(block, storageInfo, null, true); } } /** * Faster version of - * {@link #addStoredBlock(BlockInfo, DatanodeDescriptor, String, DatanodeDescriptor, boolean)} + * {@link #addStoredBlock(BlockInfo, DatanodeStorageInfo, DatanodeDescriptor, boolean)} * , intended for use with initial block report at startup. If not in startup * safe mode, will call standard addStoredBlock(). Assumes this method is * called "immediately" so there is no need to refresh the storedBlock from @@ -2239,17 +2254,17 @@ void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, * @throws IOException */ private void addStoredBlockImmediate(BlockInfo storedBlock, - DatanodeDescriptor node, String storageID) + DatanodeStorageInfo storageInfo) throws IOException { assert (storedBlock != null && namesystem.hasWriteLock()); if (!namesystem.isInStartupSafeMode() || namesystem.isPopulatingReplQueues()) { - addStoredBlock(storedBlock, node, storageID, null, false); + addStoredBlock(storedBlock, storageInfo, null, false); return; } // just add it - node.addBlock(storageID, storedBlock); + storageInfo.addBlock(storedBlock); // Now check for completion of blocks and safe block count int numCurrentReplica = countLiveNodes(storedBlock); @@ -2271,13 +2286,13 @@ private void addStoredBlockImmediate(BlockInfo storedBlock, * @return the block that is stored in blockMap. */ private Block addStoredBlock(final BlockInfo block, - DatanodeDescriptor node, - String storageID, + DatanodeStorageInfo storageInfo, DatanodeDescriptor delNodeHint, boolean logEveryBlock) throws IOException { assert block != null && namesystem.hasWriteLock(); BlockInfo storedBlock; + DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); if (block instanceof BlockInfoUnderConstruction) { //refresh our copy in case the block got completed in another thread storedBlock = blocksMap.getStoredBlock(block); @@ -2297,7 +2312,7 @@ private Block addStoredBlock(final BlockInfo block, assert bc != null : "Block must belong to a file"; // add block to the datanode - boolean added = node.addBlock(storageID, storedBlock); + boolean added = storageInfo.addBlock(storedBlock); int curReplicaDelta; if (added) { @@ -2843,8 +2858,9 @@ private long addBlock(Block block, List results) { * The given node is reporting that it received a certain block. */ @VisibleForTesting - void addBlock(DatanodeDescriptor node, String storageID, Block block, String delHint) + void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint) throws IOException { + DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Decrement number of blocks scheduled to this datanode. // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with // RECEIVED_BLOCK), we currently also decrease the approximate number. @@ -2864,12 +2880,12 @@ void addBlock(DatanodeDescriptor node, String storageID, Block block, String del // Modify the blocks->datanode map and node's map. // pendingReplications.decrement(block, node); - processAndHandleReportedBlock(node, storageID, block, ReplicaState.FINALIZED, + processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED, delHintNode); } - private void processAndHandleReportedBlock(DatanodeDescriptor node, - String storageID, Block block, + private void processAndHandleReportedBlock( + DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, DatanodeDescriptor delHintNode) throws IOException { // blockReceived reports a finalized block @@ -2877,7 +2893,9 @@ private void processAndHandleReportedBlock(DatanodeDescriptor node, Collection toInvalidate = new LinkedList(); Collection toCorrupt = new LinkedList(); Collection toUC = new LinkedList(); - processReportedBlock(node, storageID, block, reportedState, + final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); + + processReportedBlock(storageInfo, block, reportedState, toAdd, toInvalidate, toCorrupt, toUC); // the block is only in one of the to-do lists // if it is in none then data-node already has it @@ -2885,11 +2903,11 @@ private void processAndHandleReportedBlock(DatanodeDescriptor node, : "The block should be only in one of the lists."; for (StatefulBlockInfo b : toUC) { - addStoredBlockUnderConstruction(b, node, storageID); + addStoredBlockUnderConstruction(b, storageInfo); } long numBlocksLogged = 0; for (BlockInfo b : toAdd) { - addStoredBlock(b, node, storageID, delHintNode, numBlocksLogged < maxNumBlocksToLog); + addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { @@ -2903,7 +2921,7 @@ private void processAndHandleReportedBlock(DatanodeDescriptor node, addToInvalidates(b, node); } for (BlockToMarkCorrupt b : toCorrupt) { - markBlockAsCorrupt(b, node, storageID); + markBlockAsCorrupt(b, storageInfo, node); } } @@ -2930,13 +2948,15 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, "Got incremental block report from unregistered or dead node"); } - if (node.getStorageInfo(srdb.getStorage().getStorageID()) == null) { + DatanodeStorageInfo storageInfo = + node.getStorageInfo(srdb.getStorage().getStorageID()); + if (storageInfo == null) { // The DataNode is reporting an unknown storage. Usually the NN learns // about new storages from heartbeats but during NN restart we may // receive a block report or incremental report before the heartbeat. // We must handle this for protocol compatibility. This issue was // uncovered by HDFS-6094. - node.updateStorage(srdb.getStorage()); + storageInfo = node.updateStorage(srdb.getStorage()); } for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) { @@ -2946,14 +2966,13 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, deleted++; break; case RECEIVED_BLOCK: - addBlock(node, srdb.getStorage().getStorageID(), - rdbi.getBlock(), rdbi.getDelHints()); + addBlock(storageInfo, rdbi.getBlock(), rdbi.getDelHints()); received++; break; case RECEIVING_BLOCK: receiving++; - processAndHandleReportedBlock(node, srdb.getStorage().getStorageID(), - rdbi.getBlock(), ReplicaState.RBW, null); + processAndHandleReportedBlock(storageInfo, rdbi.getBlock(), + ReplicaState.RBW, null); break; default: String msg = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index fa4b0e533bd..791fc3157d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -207,7 +207,7 @@ long getBlockPoolUsed() { return blockPoolUsed; } - boolean addBlock(BlockInfo b) { + public boolean addBlock(BlockInfo b) { if(!b.addStorage(this)) return false; // add to the head of the data-node list diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java index 0a1ba65f125..5f59f0267a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; /** * In the Standby Node, we can receive messages about blocks @@ -41,14 +42,12 @@ class PendingDataNodeMessages { static class ReportedBlockInfo { private final Block block; - private final DatanodeDescriptor dn; - private final String storageID; + private final DatanodeStorageInfo storageInfo; private final ReplicaState reportedState; - ReportedBlockInfo(DatanodeDescriptor dn, String storageID, Block block, + ReportedBlockInfo(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState) { - this.dn = dn; - this.storageID = storageID; + this.storageInfo = storageInfo; this.block = block; this.reportedState = reportedState; } @@ -57,21 +56,18 @@ Block getBlock() { return block; } - DatanodeDescriptor getNode() { - return dn; - } - - String getStorageID() { - return storageID; - } - ReplicaState getReportedState() { return reportedState; } + + DatanodeStorageInfo getStorageInfo() { + return storageInfo; + } @Override public String toString() { - return "ReportedBlockInfo [block=" + block + ", dn=" + dn + return "ReportedBlockInfo [block=" + block + ", dn=" + + storageInfo.getDatanodeDescriptor() + ", reportedState=" + reportedState + "]"; } } @@ -87,7 +83,7 @@ void removeAllMessagesForDatanode(DatanodeDescriptor dn) { Queue oldQueue = entry.getValue(); while (!oldQueue.isEmpty()) { ReportedBlockInfo rbi = oldQueue.remove(); - if (!rbi.getNode().equals(dn)) { + if (!rbi.getStorageInfo().getDatanodeDescriptor().equals(dn)) { newQueue.add(rbi); } else { count--; @@ -97,11 +93,11 @@ void removeAllMessagesForDatanode(DatanodeDescriptor dn) { } } - void enqueueReportedBlock(DatanodeDescriptor dn, String storageID, Block block, + void enqueueReportedBlock(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState) { block = new Block(block); getBlockQueue(block).add( - new ReportedBlockInfo(dn, storageID, block, reportedState)); + new ReportedBlockInfo(storageInfo, block, reportedState)); count++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 37f8c4b23d0..edfdfc1c894 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4355,8 +4355,11 @@ void commitBlockSynchronization(ExtendedBlock lastblock, // Otherwise fsck will report these blocks as MISSING, especially if the // blocksReceived from Datanodes take a long time to arrive. for (int i = 0; i < trimmedTargets.size(); i++) { - trimmedTargets.get(i).addBlock( - trimmedStorages.get(i), storedBlock); + DatanodeStorageInfo storageInfo = + trimmedTargets.get(i).getStorageInfo(trimmedStorages.get(i)); + if (storageInfo != null) { + storageInfo.addBlock(storedBlock); + } } } @@ -5835,7 +5838,7 @@ NamenodeCommand startCheckpoint(NamenodeRegistration backupNode, } public void processIncrementalBlockReport(final DatanodeID nodeID, - final String poolId, final StorageReceivedDeletedBlocks srdb) + final StorageReceivedDeletedBlocks srdb) throws IOException { writeLock(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 6800fcde174..199d7288897 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1065,7 +1065,7 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg, // for the same node and storage, so the value returned by the last // call of this loop is the final updated value for noStaleStorage. // - noStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks); + noStaleStorages = bm.processReport(nodeReg, r.getStorage(), blocks); metrics.incrStorageBlockReportOps(); } @@ -1101,7 +1101,7 @@ public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId, +" blocks."); } for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) { - namesystem.processIncrementalBlockReport(nodeReg, poolId, r); + namesystem.processIncrementalBlockReport(nodeReg, r); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index e632ed1ca97..41af2370d14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -368,7 +368,7 @@ private void fulfillPipeline(BlockInfo blockInfo, DatanodeStorageInfo[] pipeline) throws IOException { for (int i = 1; i < pipeline.length; i++) { DatanodeStorageInfo storage = pipeline[i]; - bm.addBlock(storage.getDatanodeDescriptor(), storage.getStorageID(), blockInfo, null); + bm.addBlock(storage, blockInfo, null); blockInfo.addStorage(storage); } } @@ -549,12 +549,12 @@ public void testSafeModeIBR() throws Exception { // send block report, should be processed reset(node); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), new BlockListAsLongs(null, null)); assertEquals(1, ds.getBlockReportCount()); // send block report again, should NOT be processed reset(node); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), new BlockListAsLongs(null, null)); assertEquals(1, ds.getBlockReportCount()); @@ -566,7 +566,7 @@ public void testSafeModeIBR() throws Exception { assertEquals(0, ds.getBlockReportCount()); // ready for report again // send block report, should be processed after restart reset(node); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), new BlockListAsLongs(null, null)); assertEquals(1, ds.getBlockReportCount()); } @@ -595,7 +595,7 @@ public void testSafeModeIBRAfterIncremental() throws Exception { // send block report while pretending to already have blocks reset(node); doReturn(1).when(node).numBlocks(); - bm.processReport(node, new DatanodeStorage(ds.getStorageID()), "pool", + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), new BlockListAsLongs(null, null)); assertEquals(1, ds.getBlockReportCount()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java index 12674eb318a..2d7eaf3dcfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeDescriptor.java @@ -63,16 +63,16 @@ public void testBlocksCounter() throws Exception { assertTrue(storages.length > 0); final String storageID = storages[0].getStorageID(); // add first block - assertTrue(dd.addBlock(storageID, blk)); + assertTrue(storages[0].addBlock(blk)); assertEquals(1, dd.numBlocks()); // remove a non-existent block assertFalse(dd.removeBlock(blk1)); assertEquals(1, dd.numBlocks()); // add an existent block - assertFalse(dd.addBlock(storageID, blk)); + assertFalse(storages[0].addBlock(blk)); assertEquals(1, dd.numBlocks()); // add second block - assertTrue(dd.addBlock(storageID, blk1)); + assertTrue(storages[0].addBlock(blk1)); assertEquals(2, dd.numBlocks()); // remove first block assertTrue(dd.removeBlock(blk)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java index a17d32e6672..981ae76a10a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.junit.Test; import com.google.common.base.Joiner; @@ -43,8 +44,10 @@ public class TestPendingDataNodeMessages { @Test public void testQueues() { DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor(); - msgs.enqueueReportedBlock(fakeDN, "STORAGE_ID", block1Gs1, ReplicaState.FINALIZED); - msgs.enqueueReportedBlock(fakeDN, "STORAGE_ID", block1Gs2, ReplicaState.FINALIZED); + DatanodeStorage storage = new DatanodeStorage("STORAGE_ID"); + DatanodeStorageInfo storageInfo = new DatanodeStorageInfo(fakeDN, storage); + msgs.enqueueReportedBlock(storageInfo, block1Gs1, ReplicaState.FINALIZED); + msgs.enqueueReportedBlock(storageInfo, block1Gs2, ReplicaState.FINALIZED); assertEquals(2, msgs.count()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 73c3ec86498..e575ceeb7ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -82,7 +82,7 @@ public class TestReplicationPolicy { private static NameNode namenode; private static BlockPlacementPolicy replicator; private static final String filename = "/dummyfile.txt"; - private static DatanodeDescriptor dataNodes[]; + private static DatanodeDescriptor[] dataNodes; private static DatanodeStorageInfo[] storages; // The interval for marking a datanode as stale, private static final long staleInterval = @@ -1118,8 +1118,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() // Adding this block will increase its current replication, and that will // remove it from the queue. bm.addStoredBlockUnderConstruction(new StatefulBlockInfo(info, info, - ReplicaState.FINALIZED), TestReplicationPolicy.dataNodes[0], - "STORAGE"); + ReplicaState.FINALIZED), TestReplicationPolicy.storages[0]); // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. From 0d66f1f19ce176885120de1666cce12801b1f9b7 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 1 Aug 2014 17:13:37 +0000 Subject: [PATCH 100/354] HADOOP-10902. Deletion of directories with snapshots will not output reason for trash move failure. Contributed by Stephen Chu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615171 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/fs/shell/Delete.java | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 08c16fe16c9..0df665a3a9f 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -466,6 +466,9 @@ Release 2.6.0 - UNRELEASED HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via Arpit Agarwal) + HADOOP-10902. Deletion of directories with snapshots will not output + reason for trash move failure. (Stephen Chu via wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java index fcb0690d8d4..6798fbee438 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java @@ -118,7 +118,11 @@ private boolean moveToTrash(PathData item) throws IOException { } catch(FileNotFoundException fnfe) { throw fnfe; } catch (IOException ioe) { - throw new IOException(ioe.getMessage() + ". Consider using -skipTrash option", ioe); + String msg = ioe.getMessage(); + if (ioe.getCause() != null) { + msg += ": " + ioe.getCause().getMessage(); + } + throw new IOException(msg + ". Consider using -skipTrash option", ioe); } } return success; From 5918e991512658aa9d2af45a15862a8d405590fb Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 1 Aug 2014 17:43:17 +0000 Subject: [PATCH 101/354] HADOOP-10900. CredentialShell args should use single-dash style. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615177 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../security/alias/CredentialShell.java | 40 +++++++++---------- .../hadoop/security/alias/TestCredShell.java | 20 +++++----- 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 0df665a3a9f..9397d578f2d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -469,6 +469,8 @@ Release 2.6.0 - UNRELEASED HADOOP-10902. Deletion of directories with snapshots will not output reason for trash move failure. (Stephen Chu via wang) + HADOOP-10900. CredentialShell args should use single-dash style. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java index bb35ce51d48..2c1a792d5a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java @@ -79,9 +79,9 @@ public int run(String[] args) throws Exception { /** * Parse the command line arguments and initialize the data *

-   * % hadoop alias create alias [--provider providerPath]
+   * % hadoop alias create alias [-provider providerPath]
    * % hadoop alias list [-provider providerPath]
-   * % hadoop alias delete alias [--provider providerPath] [-i]
+   * % hadoop alias delete alias [-provider providerPath] [-i]
    * 
* @param args * @return @@ -92,28 +92,28 @@ private int init(String[] args) throws IOException { if (args[i].equals("create")) { String alias = args[++i]; command = new CreateCommand(alias); - if (alias.equals("--help")) { + if (alias.equals("-help")) { printCredShellUsage(); return -1; } } else if (args[i].equals("delete")) { String alias = args[++i]; command = new DeleteCommand(alias); - if (alias.equals("--help")) { + if (alias.equals("-help")) { printCredShellUsage(); return -1; } } else if (args[i].equals("list")) { command = new ListCommand(); - } else if (args[i].equals("--provider")) { + } else if (args[i].equals("-provider")) { userSuppliedProvider = true; getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, args[++i]); - } else if (args[i].equals("-i") || (args[i].equals("--interactive"))) { + } else if (args[i].equals("-i") || (args[i].equals("-interactive"))) { interactive = true; - } else if (args[i].equals("-v") || (args[i].equals("--value"))) { + } else if (args[i].equals("-v") || (args[i].equals("-value"))) { value = args[++i]; - } else if (args[i].equals("--help")) { + } else if (args[i].equals("-help")) { printCredShellUsage(); return -1; } else { @@ -188,20 +188,20 @@ protected void warnIfTransientProvider() { } private class ListCommand extends Command { - public static final String USAGE = "list [--provider] [--help]"; + public static final String USAGE = "list [-provider] [-help]"; public static final String DESC = "The list subcommand displays the aliases contained within \n" + "a particular provider - as configured in core-site.xml or " + - "indicated\nthrough the --provider argument."; + "indicated\nthrough the -provider argument."; public boolean validate() { boolean rc = true; provider = getCredentialProvider(); if (provider == null) { out.println("There are no non-transient CredentialProviders configured.\n" - + "Consider using the --provider option to indicate the provider\n" + + "Consider using the -provider option to indicate the provider\n" + "to use. If you want to list a transient provider then you\n" - + "you MUST use the --provider argument."); + + "you MUST use the -provider argument."); rc = false; } return rc; @@ -229,11 +229,11 @@ public String getUsage() { } private class DeleteCommand extends Command { - public static final String USAGE = "delete [--provider] [--help]"; + public static final String USAGE = "delete [-provider] [-help]"; public static final String DESC = "The delete subcommand deletes the credenital\n" + "specified as the argument from within the provider\n" + - "indicated through the --provider argument"; + "indicated through the -provider argument"; String alias = null; boolean cont = true; @@ -248,13 +248,13 @@ public boolean validate() { if (provider == null) { out.println("There are no valid CredentialProviders configured.\n" + "Nothing will be deleted.\n" - + "Consider using the --provider option to indicate the provider" + + "Consider using the -provider option to indicate the provider" + " to use."); return false; } if (alias == null) { out.println("There is no alias specified. Please provide the" + - "mandatory . See the usage description with --help."); + "mandatory . See the usage description with -help."); return false; } if (interactive) { @@ -299,11 +299,11 @@ public String getUsage() { } private class CreateCommand extends Command { - public static final String USAGE = "create [--provider] [--help]"; + public static final String USAGE = "create [-provider] [-help]"; public static final String DESC = "The create subcommand creates a new credential for the name specified\n" + "as the argument within the provider indicated through\n" + - "the --provider argument."; + "the -provider argument."; String alias = null; @@ -317,13 +317,13 @@ public boolean validate() { if (provider == null) { out.println("There are no valid CredentialProviders configured." + "\nCredential will not be created.\n" - + "Consider using the --provider option to indicate the provider" + + + "Consider using the -provider option to indicate the provider" + " to use."); rc = false; } if (alias == null) { out.println("There is no alias specified. Please provide the" + - "mandatory . See the usage description with --help."); + "mandatory . See the usage description with -help."); rc = false; } return rc; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java index c48b69f2149..05eb7b8c2a0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java @@ -45,7 +45,7 @@ public void setup() throws Exception { @Test public void testCredentialSuccessfulLifecycle() throws Exception { outContent.reset(); - String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", + String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; int rc = 0; CredentialShell cs = new CredentialShell(); @@ -56,14 +56,14 @@ public void testCredentialSuccessfulLifecycle() throws Exception { "created.")); outContent.reset(); - String[] args2 = {"list", "--provider", + String[] args2 = {"list", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; rc = cs.run(args2); assertEquals(0, rc); assertTrue(outContent.toString().contains("credential1")); outContent.reset(); - String[] args4 = {"delete", "credential1", "--provider", + String[] args4 = {"delete", "credential1", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; rc = cs.run(args4); assertEquals(0, rc); @@ -71,7 +71,7 @@ public void testCredentialSuccessfulLifecycle() throws Exception { "deleted.")); outContent.reset(); - String[] args5 = {"list", "--provider", + String[] args5 = {"list", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; rc = cs.run(args5); assertEquals(0, rc); @@ -80,7 +80,7 @@ public void testCredentialSuccessfulLifecycle() throws Exception { @Test public void testInvalidProvider() throws Exception { - String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", + String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider", "sdff://file/tmp/credstore.jceks"}; int rc = 0; @@ -94,7 +94,7 @@ public void testInvalidProvider() throws Exception { @Test public void testTransientProviderWarning() throws Exception { - String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", + String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider", "user:///"}; int rc = 0; @@ -105,7 +105,7 @@ public void testTransientProviderWarning() throws Exception { assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider.")); - String[] args2 = {"delete", "credential1", "--provider", "user:///"}; + String[] args2 = {"delete", "credential1", "-provider", "user:///"}; rc = cs.run(args2); assertEquals(outContent.toString(), 0, rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + @@ -129,7 +129,7 @@ public void testTransientProviderOnlyConfig() throws Exception { @Test public void testPromptForCredentialWithEmptyPasswd() throws Exception { - String[] args1 = {"create", "credential1", "--provider", + String[] args1 = {"create", "credential1", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; ArrayList passwords = new ArrayList(); passwords.add(null); @@ -145,7 +145,7 @@ public void testPromptForCredentialWithEmptyPasswd() throws Exception { @Test public void testPromptForCredential() throws Exception { - String[] args1 = {"create", "credential1", "--provider", + String[] args1 = {"create", "credential1", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; ArrayList passwords = new ArrayList(); passwords.add("p@ssw0rd"); @@ -159,7 +159,7 @@ public void testPromptForCredential() throws Exception { assertTrue(outContent.toString().contains("credential1 has been successfully " + "created.")); - String[] args2 = {"delete", "credential1", "--provider", + String[] args2 = {"delete", "credential1", "-provider", "jceks://file" + tmpDir + "/credstore.jceks"}; rc = shell.run(args2); assertEquals(0, rc); From c4a3a29541e1c385af20494984b072b6fd67f2e5 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 1 Aug 2014 17:45:45 +0000 Subject: [PATCH 102/354] HADOOP-10793. KeyShell args should use single-dash style. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615180 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../apache/hadoop/crypto/key/KeyShell.java | 84 +++++++++---------- .../hadoop/crypto/key/TestKeyShell.java | 48 +++++------ 3 files changed, 68 insertions(+), 66 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 9397d578f2d..8d19ef49675 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -195,6 +195,8 @@ Trunk (Unreleased) HADOOP-10756. KMS audit log should consolidate successful similar requests. (asuresh via tucu) + HADOOP-10793. KeyShell args should use single-dash style. (wang) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java index fb01e5f7c5b..6d50c9168d8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java @@ -38,9 +38,9 @@ */ public class KeyShell extends Configured implements Tool { final static private String USAGE_PREFIX = "Usage: hadoop key " + - "[generic options]\n"; + "[generic options]\n"; final static private String COMMANDS = - " [--help]\n" + + " [-help]\n" + " [" + CreateCommand.USAGE + "]\n" + " [" + RollCommand.USAGE + "]\n" + " [" + DeleteCommand.USAGE + "]\n" + @@ -90,11 +90,11 @@ public int run(String[] args) throws Exception { /** * Parse the command line arguments and initialize the data *
-   * % hadoop key create keyName [--size size] [--cipher algorithm]
-   *    [--provider providerPath]
-   * % hadoop key roll keyName [--provider providerPath]
+   * % hadoop key create keyName [-size size] [-cipher algorithm]
+   *    [-provider providerPath]
+   * % hadoop key roll keyName [-provider providerPath]
    * % hadoop key list [-provider providerPath]
-   * % hadoop key delete keyName [--provider providerPath] [-i]
+   * % hadoop key delete keyName [-provider providerPath] [-i]
    * 
* @param args Command line arguments. * @return 0 on success, 1 on failure. @@ -107,47 +107,47 @@ private int init(String[] args) throws IOException { for (int i = 0; i < args.length; i++) { // parse command line boolean moreTokens = (i < args.length - 1); if (args[i].equals("create")) { - String keyName = "--help"; + String keyName = "-help"; if (moreTokens) { keyName = args[++i]; } command = new CreateCommand(keyName, options); - if ("--help".equals(keyName)) { + if ("-help".equals(keyName)) { printKeyShellUsage(); return 1; } } else if (args[i].equals("delete")) { - String keyName = "--help"; + String keyName = "-help"; if (moreTokens) { keyName = args[++i]; } command = new DeleteCommand(keyName); - if ("--help".equals(keyName)) { + if ("-help".equals(keyName)) { printKeyShellUsage(); return 1; } } else if (args[i].equals("roll")) { - String keyName = "--help"; + String keyName = "-help"; if (moreTokens) { keyName = args[++i]; } command = new RollCommand(keyName); - if ("--help".equals(keyName)) { + if ("-help".equals(keyName)) { printKeyShellUsage(); return 1; } } else if ("list".equals(args[i])) { command = new ListCommand(); - } else if ("--size".equals(args[i]) && moreTokens) { + } else if ("-size".equals(args[i]) && moreTokens) { options.setBitLength(Integer.parseInt(args[++i])); - } else if ("--cipher".equals(args[i]) && moreTokens) { + } else if ("-cipher".equals(args[i]) && moreTokens) { options.setCipher(args[++i]); - } else if ("--description".equals(args[i]) && moreTokens) { + } else if ("-description".equals(args[i]) && moreTokens) { options.setDescription(args[++i]); - } else if ("--attr".equals(args[i]) && moreTokens) { + } else if ("-attr".equals(args[i]) && moreTokens) { final String attrval[] = args[++i].split("=", 2); final String attr = attrval[0].trim(); final String val = attrval[1].trim(); @@ -164,14 +164,14 @@ private int init(String[] args) throws IOException { return 1; } attributes.put(attr, val); - } else if ("--provider".equals(args[i]) && moreTokens) { + } else if ("-provider".equals(args[i]) && moreTokens) { userSuppliedProvider = true; getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]); - } else if ("--metadata".equals(args[i])) { + } else if ("-metadata".equals(args[i])) { getConf().setBoolean(LIST_METADATA, true); - } else if ("-i".equals(args[i]) || ("--interactive".equals(args[i]))) { + } else if ("-i".equals(args[i]) || ("-interactive".equals(args[i]))) { interactive = true; - } else if ("--help".equals(args[i])) { + } else if ("-help".equals(args[i])) { printKeyShellUsage(); return 1; } else { @@ -258,11 +258,11 @@ protected void warnIfTransientProvider() { private class ListCommand extends Command { public static final String USAGE = - "list [--provider ] [--metadata] [--help]"; + "list [-provider ] [-metadata] [-help]"; public static final String DESC = "The list subcommand displays the keynames contained within\n" + "a particular provider as configured in core-site.xml or\n" + - "specified with the --provider argument. --metadata displays\n" + + "specified with the -provider argument. -metadata displays\n" + "the metadata."; private boolean metadata = false; @@ -272,9 +272,9 @@ public boolean validate() { provider = getKeyProvider(); if (provider == null) { out.println("There are no non-transient KeyProviders configured.\n" - + "Use the --provider option to specify a provider. If you\n" + + "Use the -provider option to specify a provider. If you\n" + "want to list a transient provider then you must use the\n" - + "--provider argument."); + + "-provider argument."); rc = false; } metadata = getConf().getBoolean(LIST_METADATA, false); @@ -310,10 +310,10 @@ public String getUsage() { } private class RollCommand extends Command { - public static final String USAGE = "roll [--provider ] [--help]"; + public static final String USAGE = "roll [-provider ] [-help]"; public static final String DESC = "The roll subcommand creates a new version for the specified key\n" + - "within the provider indicated using the --provider argument\n"; + "within the provider indicated using the -provider argument\n"; String keyName = null; @@ -326,13 +326,13 @@ public boolean validate() { provider = getKeyProvider(); if (provider == null) { out.println("There are no valid KeyProviders configured. The key\n" + - "has not been rolled. Use the --provider option to specify\n" + + "has not been rolled. Use the -provider option to specify\n" + "a provider."); rc = false; } if (keyName == null) { out.println("Please provide a .\n" + - "See the usage description by using --help."); + "See the usage description by using -help."); rc = false; } return rc; @@ -367,11 +367,11 @@ public String getUsage() { } private class DeleteCommand extends Command { - public static final String USAGE = "delete [--provider ] [--help]"; + public static final String USAGE = "delete [-provider ] [-help]"; public static final String DESC = "The delete subcommand deletes all versions of the key\n" + "specified by the argument from within the\n" + - "provider specified --provider."; + "provider specified -provider."; String keyName = null; boolean cont = true; @@ -385,12 +385,12 @@ public boolean validate() { provider = getKeyProvider(); if (provider == null) { out.println("There are no valid KeyProviders configured. Nothing\n" - + "was deleted. Use the --provider option to specify a provider."); + + "was deleted. Use the -provider option to specify a provider."); return false; } if (keyName == null) { out.println("There is no keyName specified. Please specify a " + - ". See the usage description with --help."); + ". See the usage description with -help."); return false; } if (interactive) { @@ -436,19 +436,19 @@ public String getUsage() { private class CreateCommand extends Command { public static final String USAGE = - "create [--cipher ] [--size ]\n" + - " [--description ]\n" + - " [--attr ]\n" + - " [--provider ] [--help]"; + "create [-cipher ] [-size ]\n" + + " [-description ]\n" + + " [-attr ]\n" + + " [-provider ] [-help]"; public static final String DESC = "The create subcommand creates a new key for the name specified\n" + "by the argument within the provider specified by the\n" + - "--provider argument. You may specify a cipher with the --cipher\n" + + "-provider argument. You may specify a cipher with the -cipher\n" + "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" + "The default keysize is 256. You may specify the requested key\n" + - "length using the --size argument. Arbitrary attribute=value\n" + - "style attributes may be specified using the --attr argument.\n" + - "--attr may be specified multiple times, once per attribute.\n"; + "length using the -size argument. Arbitrary attribute=value\n" + + "style attributes may be specified using the -attr argument.\n" + + "-attr may be specified multiple times, once per attribute.\n"; final String keyName; final Options options; @@ -463,13 +463,13 @@ public boolean validate() { provider = getKeyProvider(); if (provider == null) { out.println("There are no valid KeyProviders configured. No key\n" + - " was created. You can use the --provider option to specify\n" + + " was created. You can use the -provider option to specify\n" + " a provider to use."); rc = false; } if (keyName == null) { out.println("Please provide a . See the usage description" + - " with --help."); + " with -help."); rc = false; } return rc; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java index 154579b567d..5981a2a6a38 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java @@ -73,7 +73,7 @@ public void cleanUp() throws Exception { private void deleteKey(KeyShell ks, String keyName) throws Exception { int rc; outContent.reset(); - final String[] delArgs = {"delete", keyName, "--provider", jceksProvider}; + final String[] delArgs = {"delete", keyName, "-provider", jceksProvider}; rc = ks.run(delArgs); assertEquals(0, rc); assertTrue(outContent.toString().contains(keyName + " has been " + @@ -90,8 +90,8 @@ private void deleteKey(KeyShell ks, String keyName) throws Exception { private String listKeys(KeyShell ks, boolean wantMetadata) throws Exception { int rc; outContent.reset(); - final String[] listArgs = {"list", "--provider", jceksProvider }; - final String[] listArgsM = {"list", "--metadata", "--provider", jceksProvider }; + final String[] listArgs = {"list", "-provider", jceksProvider }; + final String[] listArgsM = {"list", "-metadata", "-provider", jceksProvider }; rc = ks.run(wantMetadata ? listArgsM : listArgs); assertEquals(0, rc); return outContent.toString(); @@ -106,7 +106,7 @@ public void testKeySuccessfulKeyLifecycle() throws Exception { ks.setConf(new Configuration()); outContent.reset(); - final String[] args1 = {"create", keyName, "--provider", jceksProvider}; + final String[] args1 = {"create", keyName, "-provider", jceksProvider}; rc = ks.run(args1); assertEquals(0, rc); assertTrue(outContent.toString().contains(keyName + " has been " + @@ -121,7 +121,7 @@ public void testKeySuccessfulKeyLifecycle() throws Exception { assertTrue(listOut.contains("created")); outContent.reset(); - final String[] args2 = {"roll", keyName, "--provider", jceksProvider}; + final String[] args2 = {"roll", keyName, "-provider", jceksProvider}; rc = ks.run(args2); assertEquals(0, rc); assertTrue(outContent.toString().contains("key1 has been successfully " + @@ -137,8 +137,8 @@ public void testKeySuccessfulKeyLifecycle() throws Exception { @Test public void testKeySuccessfulCreationWithDescription() throws Exception { outContent.reset(); - final String[] args1 = {"create", "key1", "--provider", jceksProvider, - "--description", "someDescription"}; + final String[] args1 = {"create", "key1", "-provider", jceksProvider, + "-description", "someDescription"}; int rc = 0; KeyShell ks = new KeyShell(); ks.setConf(new Configuration()); @@ -154,7 +154,7 @@ public void testKeySuccessfulCreationWithDescription() throws Exception { @Test public void testInvalidKeySize() throws Exception { - final String[] args1 = {"create", "key1", "--size", "56", "--provider", + final String[] args1 = {"create", "key1", "-size", "56", "-provider", jceksProvider}; int rc = 0; @@ -167,7 +167,7 @@ public void testInvalidKeySize() throws Exception { @Test public void testInvalidCipher() throws Exception { - final String[] args1 = {"create", "key1", "--cipher", "LJM", "--provider", + final String[] args1 = {"create", "key1", "-cipher", "LJM", "-provider", jceksProvider}; int rc = 0; @@ -180,7 +180,7 @@ public void testInvalidCipher() throws Exception { @Test public void testInvalidProvider() throws Exception { - final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider", + final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider", "sdff://file/tmp/keystore.jceks"}; int rc = 0; @@ -194,7 +194,7 @@ public void testInvalidProvider() throws Exception { @Test public void testTransientProviderWarning() throws Exception { - final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider", + final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider", "user:///"}; int rc = 0; @@ -224,8 +224,8 @@ public void testTransientProviderOnlyConfig() throws Exception { @Test public void testFullCipher() throws Exception { final String keyName = "key1"; - final String[] args1 = {"create", keyName, "--cipher", "AES/CBC/pkcs5Padding", - "--provider", jceksProvider}; + final String[] args1 = {"create", keyName, "-cipher", "AES/CBC/pkcs5Padding", + "-provider", jceksProvider}; int rc = 0; KeyShell ks = new KeyShell(); @@ -245,8 +245,8 @@ public void testAttributes() throws Exception { ks.setConf(new Configuration()); /* Simple creation test */ - final String[] args1 = {"create", "keyattr1", "--provider", jceksProvider, - "--attr", "foo=bar"}; + final String[] args1 = {"create", "keyattr1", "-provider", jceksProvider, + "-attr", "foo=bar"}; rc = ks.run(args1); assertEquals(0, rc); assertTrue(outContent.toString().contains("keyattr1 has been " + @@ -259,8 +259,8 @@ public void testAttributes() throws Exception { /* Negative tests: no attribute */ outContent.reset(); - final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider, - "--attr", "=bar"}; + final String[] args2 = {"create", "keyattr2", "-provider", jceksProvider, + "-attr", "=bar"}; rc = ks.run(args2); assertEquals(1, rc); @@ -288,10 +288,10 @@ public void testAttributes() throws Exception { /* Test several attrs together... */ outContent.reset(); - final String[] args3 = {"create", "keyattr3", "--provider", jceksProvider, - "--attr", "foo = bar", - "--attr", " glarch =baz ", - "--attr", "abc=def"}; + final String[] args3 = {"create", "keyattr3", "-provider", jceksProvider, + "-attr", "foo = bar", + "-attr", " glarch =baz ", + "-attr", "abc=def"}; rc = ks.run(args3); assertEquals(0, rc); @@ -304,9 +304,9 @@ public void testAttributes() throws Exception { /* Negative test - repeated attributes should fail */ outContent.reset(); - final String[] args4 = {"create", "keyattr4", "--provider", jceksProvider, - "--attr", "foo=bar", - "--attr", "foo=glarch"}; + final String[] args4 = {"create", "keyattr4", "-provider", jceksProvider, + "-attr", "foo=bar", + "-attr", "foo=glarch"}; rc = ks.run(args4); assertEquals(1, rc); From ce9aae4a52ce0817a17f52d3eac6acbdd30f75b4 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 1 Aug 2014 17:53:05 +0000 Subject: [PATCH 103/354] HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm. Contributed by Akira Ajisaka. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615181 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-kms/src/site/apt/index.apt.vm | 12 ++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 8d19ef49675..0dbfc6e1348 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -410,6 +410,9 @@ Trunk (Unreleased) HADOOP-10881. Clarify usage of encryption and encrypted encryption key in KeyProviderCryptoExtension. (wang) + HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm. + (Akira Ajisaka via wang) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) diff --git a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm index 557cafa2f8a..ebfe8e2c170 100644 --- a/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm +++ b/hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm @@ -106,14 +106,14 @@ Hadoop Key Management Server (KMS) - Documentation Sets ${project.version} ** KMS Aggregated Audit logs -Audit logs are aggregated for API accesses to the GET_KEY_VERSION, -GET_CURRENT_KEY, DECRYPT_EEK, GENERATE_EEK operations. + Audit logs are aggregated for API accesses to the GET_KEY_VERSION, + GET_CURRENT_KEY, DECRYPT_EEK, GENERATE_EEK operations. -Entries are grouped by the (user,key,operation) combined key for a configurable -aggregation interval after which the number of accesses to the specified -end-point by the user for a given key is flushed to the audit log. + Entries are grouped by the (user,key,operation) combined key for a + configurable aggregation interval after which the number of accesses to the + specified end-point by the user for a given key is flushed to the audit log. -The Aggregation interval is configured via the property : + The Aggregation interval is configured via the property : +---+ From da2ce46fbe01d394d5b4b358d4d2e53f6a38d350 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 1 Aug 2014 17:58:14 +0000 Subject: [PATCH 104/354] HDFS-6802. Some tests in TestDFSClientFailover are missing @Test annotation. Contributed by Akira Ajisaka. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615183 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/hadoop/security/SecurityUtil.java | 3 ++- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/TestDFSClientFailover.java | 11 +++++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index b71fbda6301..b5bf26fa084 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -77,7 +77,8 @@ public class SecurityUtil { * For use only by tests and initialization */ @InterfaceAudience.Private - static void setTokenServiceUseIp(boolean flag) { + @VisibleForTesting + public static void setTokenServiceUseIp(boolean flag) { useIpForTokenService = flag; hostResolver = !useIpForTokenService ? new QualifiedHostResolver() diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9ae0002260e..158f7c18ff2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -346,6 +346,9 @@ Release 2.6.0 - UNRELEASED HDFS-6794. Update BlockManager methods to use DatanodeStorageInfo where possible (Arpit Agarwal) + HDFS-6802. Some tests in TestDFSClientFailover are missing @Test + annotation. (Akira Ajisaka via wang) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java index d2a03d698d7..d33d7562cf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java @@ -52,6 +52,7 @@ import org.apache.hadoop.io.retry.FailoverProxyProvider; import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.net.StandardSocketFactory; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; @@ -89,6 +90,11 @@ public void tearDownCluster() throws IOException { cluster.shutdown(); } + @After + public void clearConfig() { + SecurityUtil.setTokenServiceUseIp(true); + } + /** * Make sure that client failover works when an active NN dies and the standby * takes over. @@ -323,6 +329,7 @@ public void close() throws IOException { /** * Test to verify legacy proxy providers are correctly wrapped. */ + @Test public void testWrappedFailoverProxyProvider() throws Exception { // setup the config with the dummy provider class Configuration config = new HdfsConfiguration(conf); @@ -332,6 +339,9 @@ public void testWrappedFailoverProxyProvider() throws Exception { DummyLegacyFailoverProxyProvider.class.getName()); Path p = new Path("hdfs://" + logicalName + "/"); + // not to use IP address for token service + SecurityUtil.setTokenServiceUseIp(false); + // Logical URI should be used. assertTrue("Legacy proxy providers should use logical URI.", HAUtil.useLogicalUri(config, p.toUri())); @@ -340,6 +350,7 @@ public void testWrappedFailoverProxyProvider() throws Exception { /** * Test to verify IPFailoverProxyProvider is not requiring logical URI. */ + @Test public void testIPFailoverProxyProviderLogicalUri() throws Exception { // setup the config with the IP failover proxy provider class Configuration config = new HdfsConfiguration(conf); From 3d9e39e51f8532716965dbafb16d9f0dbf3b79c5 Mon Sep 17 00:00:00 2001 From: Charles Lamb Date: Fri, 1 Aug 2014 18:36:32 +0000 Subject: [PATCH 105/354] HDFS-6807. Fix TestReservedRawPaths. (clamb) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1615188 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 ++ .../org/apache/hadoop/hdfs/DFSTestUtil.java | 20 ++++++++++++++++++ .../hadoop/hdfs/TestEncryptionZones.java | 21 ++++--------------- .../hadoop/hdfs/TestReservedRawPaths.java | 12 ++++++----- 4 files changed, 33 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 743873ea744..63edafd4bcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -81,3 +81,5 @@ fs-encryption (Unreleased) HDFS-6785. Should not be able to create encryption zone using path to a non-directory file. (clamb) + + HDFS-6807. Fix TestReservedRawPaths. (clamb) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 0eb0965695b..6dc80d39114 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem.Statistics; @@ -75,6 +76,7 @@ import java.io.*; import java.net.*; import java.nio.ByteBuffer; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.*; import java.util.concurrent.TimeoutException; @@ -1348,4 +1350,22 @@ public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2, in2.close(); } } + + /** + * Helper function to create a key in the Key Provider. + * + * @param keyName The name of the key to create + * @param cluster The cluster to create it in + * @param conf Configuration to use + */ + public static void createKey(String keyName, MiniDFSCluster cluster, + Configuration conf) + throws NoSuchAlgorithmException, IOException { + KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); + final KeyProvider.Options options = KeyProvider.options(conf); + options.setDescription(keyName); + options.setBitLength(128); + provider.createKey(keyName, options); + provider.flush(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 78f8d8ef3a5..9a3456fb3bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -102,7 +102,7 @@ public void setup() throws Exception { fs.getClient().provider = cluster.getNameNode().getNamesystem() .getProvider(); // Create a test key - createKey(TEST_KEY); + DFSTestUtil.createKey(TEST_KEY, cluster, conf); } @After @@ -147,19 +147,6 @@ public void assertZonePresent(String keyName, String path) throws IOException { ); } - /** - * Helper function to create a key in the Key Provider. - */ - private void createKey(String keyName) - throws NoSuchAlgorithmException, IOException { - KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider(); - final KeyProvider.Options options = KeyProvider.options(conf); - options.setDescription(keyName); - options.setBitLength(128); - provider.createKey(keyName, options); - provider.flush(); - } - @Test(timeout = 60000) public void testBasicOperations() throws Exception { @@ -263,7 +250,7 @@ public void testBasicOperations() throws Exception { assertNumZones(1); /* Test success of creating an EZ when they key exists. */ - createKey(myKeyName); + DFSTestUtil.createKey(myKeyName, cluster, conf); dfsAdmin.createEncryptionZone(zone2, myKeyName); assertNumZones(++numZones); assertZonePresent(myKeyName, zone2.toString()); @@ -601,7 +588,7 @@ public void doCleanup() throws Exception { // Test when the parent directory becomes a different EZ fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); final String otherKey = "otherKey"; - createKey(otherKey); + DFSTestUtil.createKey(otherKey, cluster, conf); dfsAdmin.createEncryptionZone(zone1, TEST_KEY); executor.submit(new InjectFaultTask() { @@ -621,7 +608,7 @@ public void doCleanup() throws Exception { // Test that the retry limit leads to an error fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); final String anotherKey = "anotherKey"; - createKey(anotherKey); + DFSTestUtil.createKey(anotherKey, cluster, conf); dfsAdmin.createEncryptionZone(zone1, anotherKey); String keyToUse = otherKey; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java index c49e88b5460..2a20954a391 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java @@ -57,12 +57,13 @@ public class TestReservedRawPaths { private MiniDFSCluster cluster; private HdfsAdmin dfsAdmin; private DistributedFileSystem fs; + private final String TEST_KEY = "testKey"; protected FileSystemTestWrapper fsWrapper; protected FileContextTestWrapper fcWrapper; @Before - public void setup() throws IOException { + public void setup() throws Exception { conf = new HdfsConfiguration(); fsHelper = new FileSystemTestHelper(); // Set up java key store @@ -82,6 +83,7 @@ public void setup() throws IOException { // else the updates do not get flushed properly fs.getClient().provider = cluster.getNameNode().getNamesystem() .getProvider(); + DFSTestUtil.createKey(TEST_KEY, cluster, conf); } @After @@ -110,7 +112,7 @@ public void testReadWriteRaw() throws Exception { // Create the first enc file final Path zone = new Path("/zone"); fs.mkdirs(zone); - dfsAdmin.createEncryptionZone(zone, null); + dfsAdmin.createEncryptionZone(zone, TEST_KEY); final Path encFile1 = new Path(zone, "myfile"); DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED); // Read them back in and compare byte-by-byte @@ -150,7 +152,7 @@ public void testGetFileStatus() throws Exception { final Path zone = new Path("zone"); final Path slashZone = new Path("/", zone); fs.mkdirs(slashZone); - dfsAdmin.createEncryptionZone(slashZone, null); + dfsAdmin.createEncryptionZone(slashZone, TEST_KEY); final Path base = new Path("base"); final Path reservedRaw = new Path("/.reserved/raw"); @@ -182,7 +184,7 @@ public void testReservedRawMkdir() throws Exception { final Path zone = new Path("zone"); final Path slashZone = new Path("/", zone); fs.mkdirs(slashZone); - dfsAdmin.createEncryptionZone(slashZone, null); + dfsAdmin.createEncryptionZone(slashZone, TEST_KEY); final Path rawRoot = new Path("/.reserved/raw"); final Path dir1 = new Path("dir1"); final Path rawDir1 = new Path(rawRoot, dir1); @@ -220,7 +222,7 @@ public void testAdminAccessOnly() throws Exception { final Path zone = new Path("zone"); final Path slashZone = new Path("/", zone); fs.mkdirs(slashZone); - dfsAdmin.createEncryptionZone(slashZone, null); + dfsAdmin.createEncryptionZone(slashZone, TEST_KEY); final Path base = new Path("base"); final Path reservedRaw = new Path("/.reserved/raw"); final int len = 8192; From 70c99278a9ffb8a22059c20357b435c7b576b3db Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 1 Aug 2014 18:47:01 +0000 Subject: [PATCH 106/354] HDFS-6780. Batch the encryption zones listing API. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1615189 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-fs-encryption.txt | 2 + .../org/apache/hadoop/hdfs/DFSClient.java | 10 ++- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 +- .../hadoop/hdfs/DistributedFileSystem.java | 3 +- .../apache/hadoop/hdfs/client/HdfsAdmin.java | 19 +++--- .../hadoop/hdfs/protocol/ClientProtocol.java | 15 ++--- .../hdfs/protocol/EncryptionZoneIterator.java | 51 +++++++++++++++ .../hdfs/protocol/EncryptionZoneWithId.java | 64 +++++++++++++++++++ .../EncryptionZoneWithIdIterator.java | 53 +++++++++++++++ ...amenodeProtocolServerSideTranslatorPB.java | 11 +++- .../ClientNamenodeProtocolTranslatorPB.java | 26 ++++++-- .../hadoop/hdfs/protocolPB/PBHelper.java | 57 +++++------------ .../namenode/EncryptionZoneManager.java | 51 +++++++++++---- .../hdfs/server/namenode/FSDirectory.java | 10 +-- .../hdfs/server/namenode/FSNamesystem.java | 7 +- .../server/namenode/NameNodeRpcServer.java | 7 +- .../apache/hadoop/hdfs/tools/CryptoAdmin.java | 6 +- .../src/main/proto/encryption.proto | 7 +- .../src/main/resources/hdfs-default.xml | 9 +++ .../hadoop/hdfs/TestEncryptionZones.java | 41 ++++++++---- 20 files changed, 342 insertions(+), 111 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt index 63edafd4bcc..f8289ac66cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt @@ -72,6 +72,8 @@ fs-encryption (Unreleased) HDFS-6692. Add more HDFS encryption tests. (wang) + HDFS-6780. Batch the encryption zones listing API. (wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index d90c7076e1b..04acb037665 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -150,6 +150,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -2857,13 +2858,10 @@ public void createEncryptionZone(String src, String keyName) } } - public List listEncryptionZones() throws IOException { + public RemoteIterator listEncryptionZones() + throws IOException { checkOpen(); - try { - return namenode.listEncryptionZones(); - } catch (RemoteException re) { - throw re.unwrapRemoteException(AccessControlException.class); - } + return new EncryptionZoneIterator(namenode); } public void setXAttr(String src, String name, byte[] value, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index c16c15d34b0..d884e5f7c88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -566,7 +566,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class"; public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection"; public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class"; - + public static final int DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100; + public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses"; + // Journal-node related configs. These are read on the JN side. public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir"; public static final String DFS_JOURNALNODE_EDITS_DIR_DEFAULT = "/tmp/hadoop/dfs/journalnode/"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index eccd563b270..52721962531 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1805,7 +1805,8 @@ public void createEncryptionZone(Path path, String keyName) } /* HDFS only */ - public List listEncryptionZones() throws IOException { + public RemoteIterator listEncryptionZones() + throws IOException { return dfs.listEncryptionZones(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index 0a22d9dd3f4..1a8dce3acbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -248,16 +248,17 @@ public void createEncryptionZone(Path path, String keyName) } /** - * Return a list of all {@link EncryptionZone}s in the HDFS hierarchy which - * are visible to the caller. If the caller is an HDFS superuser, - * then the key name of each encryption zone will also be provided. - * - * @throws IOException if there was a general IO exception - * - * @return List the list of Encryption Zones that the caller has - * access to. + * Returns a RemoteIterator which can be used to list the encryption zones + * in HDFS. For large numbers of encryption zones, the iterator will fetch + * the list of zones in a number of small batches. + *

+ * Since the list is fetched in batches, it does not represent a + * consistent snapshot of the entire list of encryption zones. + *

+ * This method can only be called by HDFS superusers. */ - public List listEncryptionZones() throws IOException { + public RemoteIterator listEncryptionZones() + throws IOException { return dfs.listEncryptionZones(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index effc10d9fe5..a571ac6e389 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1275,16 +1275,15 @@ public void createEncryptionZone(String src, String keyName) throws IOException; /** - * Return a list of all {@EncryptionZone}s in the HDFS hierarchy which are - * visible to the caller. If the caller is the HDFS admin, then the returned - * EncryptionZone instances will have the key id field filled in. If the - * caller is not the HDFS admin, then the EncryptionZone instances will only - * have the path field filled in and only those zones that are visible to the - * user are returned. + * Used to implement cursor-based batched listing of {@EncryptionZone}s. + * + * @param prevId ID of the last item in the previous batch. If there is no + * previous batch, a negative value can be used. + * @return Batch of encryption zones. */ @Idempotent - public List listEncryptionZones() - throws IOException; + public BatchedEntries listEncryptionZones( + long prevId) throws IOException; /** * Set xattr of a file or directory. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java new file mode 100644 index 00000000000..ff308dabf77 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.RemoteIterator; + +/** + * EncryptionZoneIterator is a remote iterator that iterates over encryption + * zones. It supports retrying in case of namenode failover. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class EncryptionZoneIterator implements RemoteIterator { + + private final EncryptionZoneWithIdIterator iterator; + + public EncryptionZoneIterator(ClientProtocol namenode) { + iterator = new EncryptionZoneWithIdIterator(namenode); + } + + @Override + public boolean hasNext() throws IOException { + return iterator.hasNext(); + } + + @Override + public EncryptionZone next() throws IOException { + EncryptionZoneWithId ezwi = iterator.next(); + return ezwi.toEncryptionZone(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java new file mode 100644 index 00000000000..7ed4884bbd5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java @@ -0,0 +1,64 @@ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Internal class similar to an {@link EncryptionZone} which also holds a + * unique id. Used to implement batched listing of encryption zones. + */ +@InterfaceAudience.Private +public class EncryptionZoneWithId extends EncryptionZone { + + final long id; + + public EncryptionZoneWithId(String path, String keyName, long id) { + super(path, keyName); + this.id = id; + } + + public long getId() { + return id; + } + + EncryptionZone toEncryptionZone() { + return new EncryptionZone(getPath(), getKeyName()); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 29) + .append(super.hashCode()) + .append(id) + .toHashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + EncryptionZoneWithId that = (EncryptionZoneWithId) o; + + if (id != that.id) { + return false; + } + + return true; + } + + @Override + public String toString() { + return "EncryptionZoneWithId [" + + "id=" + id + + ", " + super.toString() + + ']'; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java new file mode 100644 index 00000000000..78c7b620535 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BatchedRemoteIterator; + +/** + * Used on the client-side to iterate over the list of encryption zones + * stored on the namenode. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class EncryptionZoneWithIdIterator + extends BatchedRemoteIterator { + + private final ClientProtocol namenode; + + EncryptionZoneWithIdIterator(ClientProtocol namenode) { + super(Long.valueOf(0)); + this.namenode = namenode; + } + + @Override + public BatchedEntries makeRequest(Long prevId) + throws IOException { + return namenode.listEncryptionZones(prevId); + } + + @Override + public Long elementToPrevKey(EncryptionZoneWithId entry) { + return entry.getId(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 7f7fa185ec8..acb0294eee2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -1317,7 +1318,15 @@ public ListEncryptionZonesResponseProto listEncryptionZones( RpcController controller, ListEncryptionZonesRequestProto req) throws ServiceException { try { - return PBHelper.convertListEZResponse(server.listEncryptionZones()); + BatchedEntries entries = server + .listEncryptionZones(req.getId()); + ListEncryptionZonesResponseProto.Builder builder = + ListEncryptionZonesResponseProto.newBuilder(); + builder.setHasMore(entries.hasMore()); + for (int i=0; i listEncryptionZones() throws IOException { + public BatchedEntries listEncryptionZones(long id) + throws IOException { final ListEncryptionZonesRequestProto req = - ListEncryptionZonesRequestProto.newBuilder().build(); + ListEncryptionZonesRequestProto.newBuilder() + .setId(id) + .build(); try { - return PBHelper.convert(rpcProxy.listEncryptionZones(null, req)); + EncryptionZonesProtos.ListEncryptionZonesResponseProto response = + rpcProxy.listEncryptionZones(null, req); + List elements = + Lists.newArrayListWithCapacity(response.getZonesCount()); + for (EncryptionZoneWithIdProto p : response.getZonesList()) { + elements.add(PBHelper.convert(p)); + } + return new BatchedListEntries(elements, + response.getHasMore()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index e6de79cbffb..230f5455198 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.protocolPB; import static com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos + .EncryptionZoneWithIdProto; import java.io.EOFException; import java.io.IOException; @@ -59,7 +61,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdfs.protocol.FsAclPermission; @@ -111,8 +113,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; -import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; @@ -2264,45 +2264,6 @@ public static List convertXAttrs(List xAttrSpec) { } return xAttrs; } - - public static List convert(ListEncryptionZonesResponseProto a) { - final List ezs = a.getPathsAndKeysList(); - return convertEZ(ezs); - } - - public static ListEncryptionZonesResponseProto convertListEZResponse( - List ezs) { - final ListEncryptionZonesResponseProto.Builder builder = - ListEncryptionZonesResponseProto.newBuilder(); - builder.addAllPathsAndKeys(convertEZProto(ezs)); - return builder.build(); - } - - public static List convertEZProto( - List ezs) { - final ArrayList ret = - Lists.newArrayListWithCapacity(ezs.size()); - for (EncryptionZone a : ezs) { - final EncryptionZoneProto.Builder builder = - EncryptionZoneProto.newBuilder(); - builder.setPath(a.getPath()); - builder.setKeyName(a.getKeyName()); - ret.add(builder.build()); - } - return ret; - } - - public static List convertEZ( - List ezs) { - final ArrayList ret = - Lists.newArrayListWithCapacity(ezs.size()); - for (EncryptionZoneProto a : ezs) { - final EncryptionZone ez = - new EncryptionZone(a.getPath(), a.getKeyName()); - ret.add(ez); - } - return ret; - } public static List convert(GetXAttrsResponseProto a) { List xAttrs = a.getXAttrsList(); @@ -2334,6 +2295,18 @@ public static ListXAttrsResponseProto convertListXAttrsResponse( return builder.build(); } + public static EncryptionZoneWithIdProto convert(EncryptionZoneWithId zone) { + return EncryptionZoneWithIdProto.newBuilder() + .setId(zone.getId()) + .setKeyName(zone.getKeyName()) + .setPath(zone.getPath()).build(); + } + + public static EncryptionZoneWithId convert(EncryptionZoneWithIdProto proto) { + return new EncryptionZoneWithId(proto.getPath(), proto.getKeyName(), + proto.getId()); + } + public static ShortCircuitShmSlotProto convert(SlotId slotId) { return ShortCircuitShmSlotProto.newBuilder(). setShmId(convert(slotId.getShmId())). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index a083ea3a87c..143cc66c3ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -2,22 +2,25 @@ import java.io.IOException; import java.util.EnumSet; -import java.util.HashMap; import java.util.List; -import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.XAttrHelper; -import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants .CRYPTO_XATTR_ENCRYPTION_ZONE; @@ -57,17 +60,26 @@ long getINodeId() { } - private final Map encryptionZones; + private final TreeMap encryptionZones; private final FSDirectory dir; + private final int maxListEncryptionZonesResponses; /** * Construct a new EncryptionZoneManager. * * @param dir Enclosing FSDirectory */ - public EncryptionZoneManager(FSDirectory dir) { + public EncryptionZoneManager(FSDirectory dir, Configuration conf) { this.dir = dir; - encryptionZones = new HashMap(); + encryptionZones = new TreeMap(); + maxListEncryptionZonesResponses = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, + DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT + ); + Preconditions.checkArgument(maxListEncryptionZonesResponses >= 0, + DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES + " " + + "must be a positive integer." + ); } /** @@ -236,17 +248,30 @@ XAttr createEncryptionZone(String src, String keyName) } /** - * Return the current list of encryption zones. + * Cursor-based listing of encryption zones. *

* Called while holding the FSDirectory lock. */ - List listEncryptionZones() throws IOException { + BatchedListEntries listEncryptionZones(long prevId) + throws IOException { assert dir.hasReadLock(); - final List ret = - Lists.newArrayListWithExpectedSize(encryptionZones.size()); - for (EncryptionZoneInt ezi : encryptionZones.values()) { - ret.add(new EncryptionZone(getFullPathName(ezi), ezi.getKeyName())); + NavigableMap tailMap = encryptionZones.tailMap + (prevId, false); + final int numResponses = Math.min(maxListEncryptionZonesResponses, + tailMap.size()); + final List zones = + Lists.newArrayListWithExpectedSize(numResponses); + + int count = 0; + for (EncryptionZoneInt ezi : tailMap.values()) { + zones.add(new EncryptionZoneWithId(getFullPathName(ezi), + ezi.getKeyName(), ezi.getINodeId())); + count++; + if (count >= numResponses) { + break; + } } - return ret; + final boolean hasMore = (numResponses < tailMap.size()); + return new BatchedListEntries(zones, hasMore); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index f3ef5c3226b..bb687d7940d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO; import static org.apache.hadoop.util.Time.now; @@ -58,7 +59,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.FsAclPermission; @@ -227,7 +228,7 @@ public int getWriteHoldCount() { nameCache = new NameCache(threshold); namesystem = ns; - ezManager = new EncryptionZoneManager(this); + ezManager = new EncryptionZoneManager(this, conf); } private FSNamesystem getFSNamesystem() { @@ -2646,10 +2647,11 @@ XAttr createEncryptionZone(String src, String keyName) } } - List listEncryptionZones() throws IOException { + BatchedListEntries listEncryptionZones(long prevId) + throws IOException { readLock(); try { - return ezManager.listEncryptionZones(); + return ezManager.listEncryptionZones(prevId); } finally { readUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e4c7509ea02..da9dcfd1f9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -183,6 +183,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; @@ -8559,7 +8560,8 @@ private void createEncryptionZoneInt(final String srcArg, String keyName, logAuditEvent(true, "createEncryptionZone", srcArg, null, resultingStat); } - List listEncryptionZones() throws IOException { + BatchedListEntries listEncryptionZones(long prevId) + throws IOException { boolean success = false; checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); @@ -8567,7 +8569,8 @@ List listEncryptionZones() throws IOException { try { checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); - final List ret = dir.listEncryptionZones(); + final BatchedListEntries ret = + dir.listEncryptionZones(prevId); success = true; return ret; } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index ac721dff7f1..9fb0c33b6b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -77,7 +77,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -1432,8 +1432,9 @@ public void createEncryptionZone(String src, String keyName) } @Override - public List listEncryptionZones() throws IOException { - return namesystem.listEncryptionZones(); + public BatchedEntries listEncryptionZones( + long prevId) throws IOException { + return namesystem.listEncryptionZones(prevId); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java index 28aaef2e455..fcad730075e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.tools.TableListing; @@ -201,8 +202,9 @@ public int run(Configuration conf, List args) throws IOException { final TableListing listing = new TableListing.Builder() .addField("").addField("", true) .wrapWidth(MAX_LINE_WIDTH).hideHeaders().build(); - final List ezs = dfs.listEncryptionZones(); - for (EncryptionZone ez : ezs) { + final RemoteIterator it = dfs.listEncryptionZones(); + while (it.hasNext()) { + EncryptionZone ez = it.next(); listing.addRow(ez.getPath(), ez.getKeyName()); } System.out.println(listing.toString()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto index 391b0aa5ff5..fadaef1b9d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto @@ -42,13 +42,16 @@ message CreateEncryptionZoneResponseProto { } message ListEncryptionZonesRequestProto { + required int64 id = 1; } -message EncryptionZoneProto { +message EncryptionZoneWithIdProto { required string path = 1; required string keyName = 2; + required int64 id = 3; } message ListEncryptionZonesResponseProto { - repeated EncryptionZoneProto pathsAndKeys = 1; + repeated EncryptionZoneWithIdProto zones = 1; + required bool hasMore = 2; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index fea88166c24..961c214c855 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2052,4 +2052,13 @@ + + dfs.namenode.list.encryption.zones.num.responses + false + When listing encryption zones, the maximum number of zones + that will be returned in a batch. Fetching the list incrementally in + batches improves namenode performance. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 9a3456fb3bb..b48978c97ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileSystemTestWrapper; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.protocol.EncryptionZone; @@ -90,6 +91,9 @@ public void setup() throws Exception { conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" ); + // Lower the batch size for testing + conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, + 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); @@ -114,9 +118,13 @@ public void teardown() { } public void assertNumZones(final int numZones) throws IOException { - final List zones = dfsAdmin.listEncryptionZones(); - assertEquals("Unexpected number of encryption zones!", numZones, - zones.size()); + RemoteIterator it = dfsAdmin.listEncryptionZones(); + int count = 0; + while (it.hasNext()) { + count++; + it.next(); + } + assertEquals("Unexpected number of encryption zones!", numZones, count); } /** @@ -126,9 +134,10 @@ public void assertNumZones(final int numZones) throws IOException { * @throws IOException if a matching zone could not be found */ public void assertZonePresent(String keyName, String path) throws IOException { - final List zones = dfsAdmin.listEncryptionZones(); + final RemoteIterator it = dfsAdmin.listEncryptionZones(); boolean match = false; - for (EncryptionZone zone : zones) { + while (it.hasNext()) { + EncryptionZone zone = it.next(); boolean matchKey = (keyName == null); boolean matchPath = (path == null); if (keyName != null && zone.getKeyName().equals(keyName)) { @@ -282,6 +291,16 @@ public Object run() throws Exception { dfsAdmin.createEncryptionZone(deepZone, TEST_KEY); assertNumZones(++numZones); assertZonePresent(null, deepZone.toString()); + + // Create and list some zones to test batching of listEZ + for (int i=1; i<6; i++) { + final Path zonePath = new Path("/listZone" + i); + fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false); + dfsAdmin.createEncryptionZone(zonePath, TEST_KEY); + numZones++; + assertNumZones(numZones); + assertZonePresent(null, zonePath.toString()); + } } /** @@ -369,9 +388,8 @@ public void testReadWrite() throws Exception { // Read them back in and compare byte-by-byte verifyFilesEqual(fs, baseFile, encFile1, len); // Roll the key of the encryption zone - List zones = dfsAdmin.listEncryptionZones(); - assertEquals("Expected 1 EZ", 1, zones.size()); - String keyName = zones.get(0).getKeyName(); + assertNumZones(1); + String keyName = dfsAdmin.listEncryptionZones().next().getKeyName(); cluster.getNamesystem().getProvider().rollNewVersion(keyName); // Read them back in and compare byte-by-byte verifyFilesEqual(fs, baseFile, encFile1, len); @@ -457,14 +475,12 @@ public void testCipherSuiteNegotiation() throws Exception { @Test(timeout = 120000) public void testCreateEZWithNoProvider() throws Exception { - + // Unset the key provider and make sure EZ ops don't work final Configuration clusterConf = cluster.getConfiguration(0); clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ""); cluster.restartNameNode(true); cluster.waitActive(); - /* Test failure of create EZ on a directory that doesn't exist. */ final Path zone1 = new Path("/zone1"); - /* Normal creation of an EZ */ fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); try { dfsAdmin.createEncryptionZone(zone1, TEST_KEY); @@ -476,8 +492,7 @@ public void testCreateEZWithNoProvider() throws Exception { JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" ); // Try listing EZs as well - List zones = dfsAdmin.listEncryptionZones(); - assertEquals("Expected no zones", 0, zones.size()); + assertNumZones(0); } private class MyInjector extends EncryptionFaultInjector { From 07860b1c9ee3e1d77d39a52d41c3243fdcc10c45 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 1 Aug 2014 18:56:06 +0000 Subject: [PATCH 107/354] HDFS-6788. Improve synchronization in BPOfferService with read write lock. Contributed by Yongjun Zhang. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615190 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/datanode/BPOfferService.java | 299 +++++++++++------- 2 files changed, 189 insertions(+), 113 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 158f7c18ff2..2a10d5b5b53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -349,6 +349,9 @@ Release 2.6.0 - UNRELEASED HDFS-6802. Some tests in TestDFSClientFailover are missing @Test annotation. (Akira Ajisaka via wang) + HDFS-6788. Improve synchronization in BPOfferService with read write lock. + (Yongjun Zhang via wang) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 0a6549de8f9..39e842ccfd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -21,6 +21,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Sets; + import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -38,6 +39,8 @@ import java.util.List; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantReadWriteLock; /** * One instance per block-pool/namespace on the DN, which handles the @@ -91,6 +94,28 @@ class BPOfferService { */ private long lastActiveClaimTxId = -1; + private final ReentrantReadWriteLock mReadWriteLock = + new ReentrantReadWriteLock(); + private final Lock mReadLock = mReadWriteLock.readLock(); + private final Lock mWriteLock = mReadWriteLock.writeLock(); + + // utility methods to acquire and release read lock and write lock + void readLock() { + mReadLock.lock(); + } + + void readUnlock() { + mReadLock.unlock(); + } + + void writeLock() { + mWriteLock.lock(); + } + + void writeUnlock() { + mWriteLock.unlock(); + } + BPOfferService(List nnAddrs, DataNode dn) { Preconditions.checkArgument(!nnAddrs.isEmpty(), "Must pass at least one NN."); @@ -135,14 +160,19 @@ boolean isAlive() { } return false; } - - synchronized String getBlockPoolId() { - if (bpNSInfo != null) { - return bpNSInfo.getBlockPoolID(); - } else { - LOG.warn("Block pool ID needed, but service not yet registered with NN", - new Exception("trace")); - return null; + + String getBlockPoolId() { + readLock(); + try { + if (bpNSInfo != null) { + return bpNSInfo.getBlockPoolID(); + } else { + LOG.warn("Block pool ID needed, but service not yet registered with NN", + new Exception("trace")); + return null; + } + } finally { + readUnlock(); } } @@ -150,27 +180,37 @@ boolean hasBlockPoolId() { return getNamespaceInfo() != null; } - synchronized NamespaceInfo getNamespaceInfo() { - return bpNSInfo; + NamespaceInfo getNamespaceInfo() { + readLock(); + try { + return bpNSInfo; + } finally { + readUnlock(); + } } @Override - public synchronized String toString() { - if (bpNSInfo == null) { - // If we haven't yet connected to our NN, we don't yet know our - // own block pool ID. - // If _none_ of the block pools have connected yet, we don't even - // know the DatanodeID ID of this DN. - String datanodeUuid = dn.getDatanodeUuid(); + public String toString() { + readLock(); + try { + if (bpNSInfo == null) { + // If we haven't yet connected to our NN, we don't yet know our + // own block pool ID. + // If _none_ of the block pools have connected yet, we don't even + // know the DatanodeID ID of this DN. + String datanodeUuid = dn.getDatanodeUuid(); - if (datanodeUuid == null || datanodeUuid.isEmpty()) { - datanodeUuid = "unassigned"; + if (datanodeUuid == null || datanodeUuid.isEmpty()) { + datanodeUuid = "unassigned"; + } + return "Block pool (Datanode Uuid " + datanodeUuid + ")"; + } else { + return "Block pool " + getBlockPoolId() + + " (Datanode Uuid " + dn.getDatanodeUuid() + + ")"; } - return "Block pool (Datanode Uuid " + datanodeUuid + ")"; - } else { - return "Block pool " + getBlockPoolId() + - " (Datanode Uuid " + dn.getDatanodeUuid() + - ")"; + } finally { + readUnlock(); } } @@ -266,32 +306,37 @@ DataNode getDataNode() { * verifies that this namespace matches (eg to prevent a misconfiguration * where a StandbyNode from a different cluster is specified) */ - synchronized void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException { - if (this.bpNSInfo == null) { - this.bpNSInfo = nsInfo; - boolean success = false; + void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException { + writeLock(); + try { + if (this.bpNSInfo == null) { + this.bpNSInfo = nsInfo; + boolean success = false; - // Now that we know the namespace ID, etc, we can pass this to the DN. - // The DN can now initialize its local storage if we are the - // first BP to handshake, etc. - try { - dn.initBlockPool(this); - success = true; - } finally { - if (!success) { - // The datanode failed to initialize the BP. We need to reset - // the namespace info so that other BPService actors still have - // a chance to set it, and re-initialize the datanode. - this.bpNSInfo = null; + // Now that we know the namespace ID, etc, we can pass this to the DN. + // The DN can now initialize its local storage if we are the + // first BP to handshake, etc. + try { + dn.initBlockPool(this); + success = true; + } finally { + if (!success) { + // The datanode failed to initialize the BP. We need to reset + // the namespace info so that other BPService actors still have + // a chance to set it, and re-initialize the datanode. + this.bpNSInfo = null; + } } + } else { + checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(), + "Blockpool ID"); + checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(), + "Namespace ID"); + checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(), + "Cluster ID"); } - } else { - checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(), - "Blockpool ID"); - checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(), - "Namespace ID"); - checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(), - "Cluster ID"); + } finally { + writeUnlock(); } } @@ -300,22 +345,27 @@ synchronized void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOExcep * NN, it calls this function to verify that the NN it connected to * is consistent with other NNs serving the block-pool. */ - synchronized void registrationSucceeded(BPServiceActor bpServiceActor, + void registrationSucceeded(BPServiceActor bpServiceActor, DatanodeRegistration reg) throws IOException { - if (bpRegistration != null) { - checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(), - reg.getStorageInfo().getNamespaceID(), "namespace ID"); - checkNSEquality(bpRegistration.getStorageInfo().getClusterID(), - reg.getStorageInfo().getClusterID(), "cluster ID"); - } else { - bpRegistration = reg; - } - - dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId()); - // Add the initial block token secret keys to the DN's secret manager. - if (dn.isBlockTokenEnabled) { - dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(), - reg.getExportedKeys()); + writeLock(); + try { + if (bpRegistration != null) { + checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(), + reg.getStorageInfo().getNamespaceID(), "namespace ID"); + checkNSEquality(bpRegistration.getStorageInfo().getClusterID(), + reg.getStorageInfo().getClusterID(), "cluster ID"); + } else { + bpRegistration = reg; + } + + dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId()); + // Add the initial block token secret keys to the DN's secret manager. + if (dn.isBlockTokenEnabled) { + dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(), + reg.getExportedKeys()); + } + } finally { + writeUnlock(); } } @@ -333,25 +383,35 @@ private static void checkNSEquality( } } - synchronized DatanodeRegistration createRegistration() { - Preconditions.checkState(bpNSInfo != null, - "getRegistration() can only be called after initial handshake"); - return dn.createBPRegistration(bpNSInfo); + DatanodeRegistration createRegistration() { + writeLock(); + try { + Preconditions.checkState(bpNSInfo != null, + "getRegistration() can only be called after initial handshake"); + return dn.createBPRegistration(bpNSInfo); + } finally { + writeUnlock(); + } } /** * Called when an actor shuts down. If this is the last actor * to shut down, shuts down the whole blockpool in the DN. */ - synchronized void shutdownActor(BPServiceActor actor) { - if (bpServiceToActive == actor) { - bpServiceToActive = null; - } + void shutdownActor(BPServiceActor actor) { + writeLock(); + try { + if (bpServiceToActive == actor) { + bpServiceToActive = null; + } - bpServices.remove(actor); + bpServices.remove(actor); - if (bpServices.isEmpty()) { - dn.shutdownBlockPool(this); + if (bpServices.isEmpty()) { + dn.shutdownBlockPool(this); + } + } finally { + writeUnlock(); } } @@ -392,11 +452,16 @@ void reportRemoteBadBlock(DatanodeInfo dnInfo, ExtendedBlock block) { * @return a proxy to the active NN, or null if the BPOS has not * acknowledged any NN as active yet. */ - synchronized DatanodeProtocolClientSideTranslatorPB getActiveNN() { - if (bpServiceToActive != null) { - return bpServiceToActive.bpNamenode; - } else { - return null; + DatanodeProtocolClientSideTranslatorPB getActiveNN() { + readLock(); + try { + if (bpServiceToActive != null) { + return bpServiceToActive.bpNamenode; + } else { + return null; + } + } finally { + readUnlock(); } } @@ -424,45 +489,50 @@ void signalRollingUpgrade(boolean inProgress) { * @param actor the actor which received the heartbeat * @param nnHaState the HA-related heartbeat contents */ - synchronized void updateActorStatesFromHeartbeat( + void updateActorStatesFromHeartbeat( BPServiceActor actor, NNHAStatusHeartbeat nnHaState) { - final long txid = nnHaState.getTxId(); - - final boolean nnClaimsActive = - nnHaState.getState() == HAServiceState.ACTIVE; - final boolean bposThinksActive = bpServiceToActive == actor; - final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; - - if (nnClaimsActive && !bposThinksActive) { - LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " + - "txid=" + txid); - if (!isMoreRecentClaim) { - // Split-brain scenario - an NN is trying to claim active - // state when a different NN has already claimed it with a higher - // txid. - LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" + - txid + " but there was already a more recent claim at txid=" + - lastActiveClaimTxId); - return; - } else { - if (bpServiceToActive == null) { - LOG.info("Acknowledging ACTIVE Namenode " + actor); + writeLock(); + try { + final long txid = nnHaState.getTxId(); + + final boolean nnClaimsActive = + nnHaState.getState() == HAServiceState.ACTIVE; + final boolean bposThinksActive = bpServiceToActive == actor; + final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; + + if (nnClaimsActive && !bposThinksActive) { + LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " + + "txid=" + txid); + if (!isMoreRecentClaim) { + // Split-brain scenario - an NN is trying to claim active + // state when a different NN has already claimed it with a higher + // txid. + LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" + + txid + " but there was already a more recent claim at txid=" + + lastActiveClaimTxId); + return; } else { - LOG.info("Namenode " + actor + " taking over ACTIVE state from " + - bpServiceToActive + " at higher txid=" + txid); + if (bpServiceToActive == null) { + LOG.info("Acknowledging ACTIVE Namenode " + actor); + } else { + LOG.info("Namenode " + actor + " taking over ACTIVE state from " + + bpServiceToActive + " at higher txid=" + txid); + } + bpServiceToActive = actor; } - bpServiceToActive = actor; + } else if (!nnClaimsActive && bposThinksActive) { + LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " + + "txid=" + nnHaState.getTxId()); + bpServiceToActive = null; } - } else if (!nnClaimsActive && bposThinksActive) { - LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " + - "txid=" + nnHaState.getTxId()); - bpServiceToActive = null; - } - - if (bpServiceToActive == actor) { - assert txid >= lastActiveClaimTxId; - lastActiveClaimTxId = txid; + + if (bpServiceToActive == actor) { + assert txid >= lastActiveClaimTxId; + lastActiveClaimTxId = txid; + } + } finally { + writeUnlock(); } } @@ -533,12 +603,15 @@ boolean processCommandFromActor(DatanodeCommand cmd, actor.reRegister(); return true; } - synchronized (this) { + writeLock(); + try { if (actor == bpServiceToActive) { return processCommandFromActive(cmd, actor); } else { return processCommandFromStandby(cmd, actor); } + } finally { + writeUnlock(); } } From 1ba3f8971433cdbc3e43fd3605065d811dab5b16 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 1 Aug 2014 20:41:05 +0000 Subject: [PATCH 108/354] HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615223 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/io/nativeio/NativeIO.java | 12 + .../org/apache/hadoop/util/DiskChecker.java | 14 ++ .../org/apache/hadoop/io/nativeio/NativeIO.c | 37 +++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 6 +- .../apache/hadoop/hdfs/protocol/Block.java | 14 +- .../datanode/BlockPoolSliceStorage.java | 18 +- .../datanode/DataNodeLayoutVersion.java | 5 +- .../hdfs/server/datanode/DataStorage.java | 144 +++++++++-- .../hdfs/server/datanode/DatanodeUtil.java | 36 +++ .../hdfs/server/datanode/ReplicaInfo.java | 62 ++--- .../fsdataset/impl/BlockPoolSlice.java | 96 +++++--- .../fsdataset/impl/FsDatasetImpl.java | 7 - .../datanode/fsdataset/impl/FsVolumeImpl.java | 11 +- .../server/datanode/fsdataset/impl/LDir.java | 228 ------------------ .../src/main/resources/hdfs-default.xml | 8 + .../apache/hadoop/hdfs/MiniDFSCluster.java | 34 ++- .../apache/hadoop/hdfs/TestDFSFinalize.java | 9 +- .../apache/hadoop/hdfs/TestDFSRollback.java | 2 +- .../hdfs/TestDFSStorageStateRecovery.java | 16 +- .../apache/hadoop/hdfs/TestDFSUpgrade.java | 12 +- .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 21 +- .../hadoop/hdfs/TestDatanodeBlockScanner.java | 15 +- .../hdfs/TestDatanodeLayoutUpgrade.java | 48 ++++ .../hadoop/hdfs/TestFileCorruption.java | 40 +-- .../apache/hadoop/hdfs/UpgradeUtilities.java | 44 ++-- .../datanode/TestDataNodeVolumeFailure.java | 24 +- .../server/datanode/TestDeleteBlockPool.java | 7 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 17 +- .../namenode/TestListCorruptFileBlocks.java | 159 ++++++------ .../test/resources/hadoop-24-datanode-dir.tgz | Bin 0 -> 637608 bytes .../test/resources/hadoop-datanode-dir.txt | 23 ++ 32 files changed, 627 insertions(+), 545 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-datanode-dir.txt diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 976a93f91b1..fafa29543e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; @@ -823,6 +824,14 @@ public static void renameTo(File src, File dst) } } + public static void link(File src, File dst) throws IOException { + if (!nativeLoaded) { + HardLink.createHardLink(src, dst); + } else { + link0(src.getAbsolutePath(), dst.getAbsolutePath()); + } + } + /** * A version of renameTo that throws a descriptive exception when it fails. * @@ -833,4 +842,7 @@ public static void renameTo(File src, File dst) */ private static native void renameTo0(String src, String dst) throws NativeIOException; + + private static native void link0(String src, String dst) + throws NativeIOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java index 72a4d1b70e9..f2ee446b4ab 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java @@ -78,6 +78,20 @@ public static boolean mkdirsWithExistsCheck(File dir) { (mkdirsWithExistsCheck(new File(parent)) && (canonDir.mkdir() || canonDir.exists())); } + + /** + * Recurse down a directory tree, checking all child directories. + * @param dir + * @throws DiskErrorException + */ + public static void checkDirs(File dir) throws DiskErrorException { + checkDir(dir); + for (File child : dir.listFiles()) { + if (child.isDirectory()) { + checkDirs(child); + } + } + } /** * Create the directory if it doesn't exist and check that dir is readable, diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index 95bb987602f..f3885d7499a 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -1054,6 +1054,43 @@ done: #endif } +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_nativeio_NativeIO_link0(JNIEnv *env, +jclass clazz, jstring jsrc, jstring jdst) +{ +#ifdef UNIX + const char *src = NULL, *dst = NULL; + + src = (*env)->GetStringUTFChars(env, jsrc, NULL); + if (!src) goto done; // exception was thrown + dst = (*env)->GetStringUTFChars(env, jdst, NULL); + if (!dst) goto done; // exception was thrown + if (link(src, dst)) { + throw_ioe(env, errno); + } + +done: + if (src) (*env)->ReleaseStringUTFChars(env, jsrc, src); + if (dst) (*env)->ReleaseStringUTFChars(env, jdst, dst); +#endif + +#ifdef WINDOWS + LPCTSTR src = NULL, dst = NULL; + + src = (LPCTSTR) (*env)->GetStringChars(env, jsrc, NULL); + if (!src) goto done; // exception was thrown + dst = (LPCTSTR) (*env)->GetStringChars(env, jdst, NULL); + if (!dst) goto done; // exception was thrown + if (!CreateHardLink(dst, src)) { + throw_ioe(env, GetLastError()); + } + +done: + if (src) (*env)->ReleaseStringChars(env, jsrc, src); + if (dst) (*env)->ReleaseStringChars(env, jdst, dst); +#endif +} + JNIEXPORT jlong JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_getMemlockLimit0( JNIEnv *env, jclass clazz) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2a10d5b5b53..b13797ae06d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -130,6 +130,9 @@ Trunk (Unreleased) HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable directory. (Jing Zhao via wheat9) + HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via + Colin Patrick McCabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index c16c15d34b0..4fb5ca49c5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -381,8 +381,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT; public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads"; public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096; - public static final String DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks"; - public static final int DFS_DATANODE_NUMBLOCKS_DEFAULT = 64; public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours"; public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0; public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed"; @@ -666,4 +664,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY = "dfs.datanode.slow.io.warning.threshold.ms"; public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300; + + public static final String DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY = + "dfs.datanode.block.id.layout.upgrade.threads"; + public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 12; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java index 680d73b7f94..b35365aa7a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java @@ -50,6 +50,9 @@ public class Block implements Writable, Comparable { public static final Pattern metaFilePattern = Pattern .compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION + "$"); + public static final Pattern metaOrBlockFilePattern = Pattern + .compile(BLOCK_FILE_PREFIX + "(-??\\d++)(_(\\d++)\\" + METADATA_EXTENSION + + ")?$"); public static boolean isBlockFilename(File f) { String name = f.getName(); @@ -65,6 +68,11 @@ public static boolean isMetaFilename(String name) { return metaFilePattern.matcher(name).matches(); } + public static File metaToBlockFile(File metaFile) { + return new File(metaFile.getParent(), metaFile.getName().substring( + 0, metaFile.getName().lastIndexOf('_'))); + } + /** * Get generation stamp from the name of the metafile name */ @@ -75,10 +83,10 @@ public static long getGenerationStamp(String metaFile) { } /** - * Get the blockId from the name of the metafile name + * Get the blockId from the name of the meta or block file */ - public static long getBlockId(String metaFile) { - Matcher m = metaFilePattern.matcher(metaFile); + public static long getBlockId(String metaOrBlockFile) { + Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile); return m.matches() ? Long.parseLong(m.group(1)) : 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index dec0d55004d..d065b5736e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -152,7 +152,7 @@ void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo, // During startup some of them can upgrade or roll back // while others could be up-to-date for the regular startup. for (int idx = 0; idx < getNumStorageDirs(); idx++) { - doTransition(getStorageDir(idx), nsInfo, startOpt); + doTransition(datanode, getStorageDir(idx), nsInfo, startOpt); assert getCTime() == nsInfo.getCTime() : "Data-node and name-node CTimes must be the same."; } @@ -242,7 +242,7 @@ protected void setFieldsFromProperties(Properties props, StorageDirectory sd) * @param startOpt startup option * @throws IOException */ - private void doTransition(StorageDirectory sd, + private void doTransition(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo, StartupOption startOpt) throws IOException { if (startOpt == StartupOption.ROLLBACK) { doRollback(sd, nsInfo); // rollback if applicable @@ -275,7 +275,7 @@ private void doTransition(StorageDirectory sd, } if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION || this.cTime < nsInfo.getCTime()) { - doUpgrade(sd, nsInfo); // upgrade + doUpgrade(datanode, sd, nsInfo); // upgrade return; } // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime @@ -304,7 +304,8 @@ private void doTransition(StorageDirectory sd, * @param nsInfo Namespace Info from the namenode * @throws IOException on error */ - void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException { + void doUpgrade(DataNode datanode, StorageDirectory bpSd, NamespaceInfo nsInfo) + throws IOException { // Upgrading is applicable only to release with federation or after if (!DataNodeLayoutVersion.supports( LayoutVersion.Feature.FEDERATION, layoutVersion)) { @@ -340,7 +341,7 @@ void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException { rename(bpCurDir, bpTmpDir); // 3. Create new /current with block files hardlinks and VERSION - linkAllBlocks(bpTmpDir, bpCurDir); + linkAllBlocks(datanode, bpTmpDir, bpCurDir); this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; assert this.namespaceID == nsInfo.getNamespaceID() : "Data-node and name-node layout versions must be the same."; @@ -517,14 +518,15 @@ public String toString() { * @param toDir the current data directory * @throws IOException if error occurs during hardlink */ - private void linkAllBlocks(File fromDir, File toDir) throws IOException { + private void linkAllBlocks(DataNode datanode, File fromDir, File toDir) + throws IOException { // do the link int diskLayoutVersion = this.getLayoutVersion(); // hardlink finalized blocks in tmpDir HardLink hardLink = new HardLink(); - DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED), + DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED), new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink); - DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_RBW), + DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW), new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink); LOG.info( hardLink.linkStats.report() ); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java index 26c7457645c..23e7cfe7184 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java @@ -62,7 +62,10 @@ public static boolean supports(final LayoutFeature f, final int lv) { * */ public static enum Feature implements LayoutFeature { - FIRST_LAYOUT(-55, -53, "First datanode layout", false); + FIRST_LAYOUT(-55, -53, "First datanode layout", false), + BLOCKID_BASED_LAYOUT(-56, + "The block ID of a finalized block uniquely determines its position " + + "in the directory structure"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 5c5cecd58f0..5a55d094e11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -18,13 +18,19 @@ package org.apache.hadoop.hdfs.server.datanode; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.Futures; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.HardLink; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; @@ -35,13 +41,30 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DiskChecker; -import java.io.*; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; import java.nio.channels.FileLock; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; /** * Data storage information file. @@ -261,6 +284,7 @@ void recoverTransitionRead(DataNode datanode, String bpID, NamespaceInfo nsInfo, STORAGE_DIR_CURRENT)); bpDataDirs.add(bpRoot); } + // mkdir for the list of BlockPoolStorage makeBlockPoolDataDir(bpDataDirs, null); BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage( @@ -488,7 +512,7 @@ private void doTransition( DataNode datanode, // do upgrade if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) { - doUpgrade(sd, nsInfo); // upgrade + doUpgrade(datanode, sd, nsInfo); // upgrade return; } @@ -523,7 +547,8 @@ private void doTransition( DataNode datanode, * @param sd storage directory * @throws IOException on error */ - void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { + void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo) + throws IOException { // If the existing on-disk layout version supportes federation, simply // update its layout version. if (DataNodeLayoutVersion.supports( @@ -568,7 +593,8 @@ void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException { BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(), nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID()); bpStorage.format(curDir, nsInfo); - linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT)); + linkAllBlocks(datanode, tmpDir, bbwDir, new File(curBpDir, + STORAGE_DIR_CURRENT)); // 4. Write version file under /current layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION; @@ -746,22 +772,22 @@ void finalizeUpgrade(String bpID) throws IOException { * * @throws IOException If error occurs during hardlink */ - private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir) - throws IOException { + private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir, + File toDir) throws IOException { HardLink hardLink = new HardLink(); // do the link int diskLayoutVersion = this.getLayoutVersion(); if (DataNodeLayoutVersion.supports( LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) { // hardlink finalized blocks in tmpDir/finalized - linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), + linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED), new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink); // hardlink rbw blocks in tmpDir/rbw - linkBlocks(new File(fromDir, STORAGE_DIR_RBW), + linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW), new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink); } else { // pre-RBW version // hardlink finalized blocks in tmpDir - linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED), + linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink); if (fromBbwDir.exists()) { /* @@ -770,15 +796,67 @@ private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir) * NOT underneath the 'current' directory in those releases. See * HDFS-3731 for details. */ - linkBlocks(fromBbwDir, + linkBlocks(datanode, fromBbwDir, new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink); } } LOG.info( hardLink.linkStats.report() ); } + + private static class LinkArgs { + public File src; + public File dst; + + public LinkArgs(File src, File dst) { + this.src = src; + this.dst = dst; + } + } + + static void linkBlocks(DataNode datanode, File from, File to, int oldLV, + HardLink hl) throws IOException { + boolean upgradeToIdBasedLayout = false; + // If we are upgrading from a version older than the one where we introduced + // block ID-based layout AND we're working with the finalized directory, + // we'll need to upgrade from the old flat layout to the block ID-based one + if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT.getInfo(). + getLayoutVersion() && to.getName().equals(STORAGE_DIR_FINALIZED)) { + upgradeToIdBasedLayout = true; + } + + final List idBasedLayoutSingleLinks = Lists.newArrayList(); + linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to, + idBasedLayoutSingleLinks); + int numLinkWorkers = datanode.getConf().getInt( + DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY, + DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS); + ExecutorService linkWorkers = Executors.newFixedThreadPool(numLinkWorkers); + final int step = idBasedLayoutSingleLinks.size() / numLinkWorkers + 1; + List> futures = Lists.newArrayList(); + for (int i = 0; i < idBasedLayoutSingleLinks.size(); i += step) { + final int iCopy = i; + futures.add(linkWorkers.submit(new Callable() { + @Override + public Void call() throws IOException { + int upperBound = Math.min(iCopy + step, + idBasedLayoutSingleLinks.size()); + for (int j = iCopy; j < upperBound; j++) { + LinkArgs cur = idBasedLayoutSingleLinks.get(j); + NativeIO.link(cur.src, cur.dst); + } + return null; + } + })); + } + linkWorkers.shutdown(); + for (Future f : futures) { + Futures.get(f, IOException.class); + } + } - static void linkBlocks(File from, File to, int oldLV, HardLink hl) - throws IOException { + static void linkBlocksHelper(File from, File to, int oldLV, HardLink hl, + boolean upgradeToIdBasedLayout, File blockRoot, + List idBasedLayoutSingleLinks) throws IOException { if (!from.exists()) { return; } @@ -805,9 +883,6 @@ static void linkBlocks(File from, File to, int oldLV, HardLink hl) // from is a directory hl.linkStats.countDirs++; - if (!to.mkdirs()) - throw new IOException("Cannot create directory " + to); - String[] blockNames = from.list(new java.io.FilenameFilter() { @Override public boolean accept(File dir, String name) { @@ -815,12 +890,36 @@ public boolean accept(File dir, String name) { } }); + // If we are upgrading to block ID-based layout, we don't want to recreate + // any subdirs from the source that contain blocks, since we have a new + // directory structure + if (!upgradeToIdBasedLayout || !to.getName().startsWith( + BLOCK_SUBDIR_PREFIX)) { + if (!to.mkdirs()) + throw new IOException("Cannot create directory " + to); + } + // Block files just need hard links with the same file names // but a different directory if (blockNames.length > 0) { - HardLink.createHardLinkMult(from, blockNames, to); - hl.linkStats.countMultLinks++; - hl.linkStats.countFilesMultLinks += blockNames.length; + if (upgradeToIdBasedLayout) { + for (String blockName : blockNames) { + long blockId = Block.getBlockId(blockName); + File blockLocation = DatanodeUtil.idToBlockDir(blockRoot, blockId); + if (!blockLocation.exists()) { + if (!blockLocation.mkdirs()) { + throw new IOException("Failed to mkdirs " + blockLocation); + } + } + idBasedLayoutSingleLinks.add(new LinkArgs(new File(from, blockName), + new File(blockLocation, blockName))); + hl.linkStats.countSingleLinks++; + } + } else { + HardLink.createHardLinkMult(from, blockNames, to); + hl.linkStats.countMultLinks++; + hl.linkStats.countFilesMultLinks += blockNames.length; + } } else { hl.linkStats.countEmptyDirs++; } @@ -834,8 +933,9 @@ public boolean accept(File dir, String name) { } }); for(int i = 0; i < otherNames.length; i++) - linkBlocks(new File(from, otherNames[i]), - new File(to, otherNames[i]), oldLV, hl); + linkBlocksHelper(new File(from, otherNames[i]), + new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout, + blockRoot, idBasedLayoutSingleLinks); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java index 0a0d57bd6e3..bd1ba2f0908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java @@ -30,6 +30,8 @@ public class DatanodeUtil { public static final String DISK_ERROR = "Possible disk error: "; + private static final String SEP = System.getProperty("file.separator"); + /** Get the cause of an I/O exception if caused by a possible disk error * @param ioe an I/O exception * @return cause if the I/O exception is caused by a possible disk error; @@ -78,4 +80,38 @@ public static String getMetaName(String blockName, long generationStamp) { public static File getUnlinkTmpFile(File f) { return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX); } + + /** + * Checks whether there are any files anywhere in the directory tree rooted + * at dir (directories don't count as files). dir must exist + * @return true if there are no files + * @throws IOException if unable to list subdirectories + */ + public static boolean dirNoFilesRecursive(File dir) throws IOException { + File[] contents = dir.listFiles(); + if (contents == null) { + throw new IOException("Cannot list contents of " + dir); + } + for (File f : contents) { + if (!f.isDirectory() || (f.isDirectory() && !dirNoFilesRecursive(f))) { + return false; + } + } + return true; + } + + /** + * Get the directory where a finalized block with this ID should be stored. + * Do not attempt to create the directory. + * @param root the root directory where finalized blocks are stored + * @param blockId + * @return + */ + public static File idToBlockDir(File root, long blockId) { + int d1 = (int)((blockId >> 16) & 0xff); + int d2 = (int)((blockId >> 8) & 0xff); + String path = DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP + + DataStorage.BLOCK_SUBDIR_PREFIX + d2; + return new File(root, path); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java index 738e16df78f..0dcdf0573e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java @@ -54,10 +54,10 @@ abstract public class ReplicaInfo extends Block implements Replica { private File baseDir; /** - * Ints representing the sub directory path from base dir to the directory - * containing this replica. + * Whether or not this replica's parent directory includes subdirs, in which + * case we can generate them based on the replica's block ID */ - private int[] subDirs; + private boolean hasSubdirs; private static final Map internedBaseDirs = new HashMap(); @@ -151,18 +151,8 @@ public String getStorageUuid() { * @return the parent directory path where this replica is located */ File getDir() { - if (subDirs == null) { - return null; - } - - StringBuilder sb = new StringBuilder(); - for (int i : subDirs) { - sb.append(DataStorage.BLOCK_SUBDIR_PREFIX); - sb.append(i); - sb.append("/"); - } - File ret = new File(baseDir, sb.toString()); - return ret; + return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir, + getBlockId()) : baseDir; } /** @@ -175,54 +165,46 @@ public void setDir(File dir) { private void setDirInternal(File dir) { if (dir == null) { - subDirs = null; baseDir = null; return; } - ReplicaDirInfo replicaDirInfo = parseSubDirs(dir); - this.subDirs = replicaDirInfo.subDirs; + ReplicaDirInfo dirInfo = parseBaseDir(dir); + this.hasSubdirs = dirInfo.hasSubidrs; synchronized (internedBaseDirs) { - if (!internedBaseDirs.containsKey(replicaDirInfo.baseDirPath)) { + if (!internedBaseDirs.containsKey(dirInfo.baseDirPath)) { // Create a new String path of this file and make a brand new File object // to guarantee we drop the reference to the underlying char[] storage. - File baseDir = new File(replicaDirInfo.baseDirPath); - internedBaseDirs.put(replicaDirInfo.baseDirPath, baseDir); + File baseDir = new File(dirInfo.baseDirPath); + internedBaseDirs.put(dirInfo.baseDirPath, baseDir); } - this.baseDir = internedBaseDirs.get(replicaDirInfo.baseDirPath); + this.baseDir = internedBaseDirs.get(dirInfo.baseDirPath); } } - + @VisibleForTesting public static class ReplicaDirInfo { - @VisibleForTesting public String baseDirPath; - - @VisibleForTesting - public int[] subDirs; + public boolean hasSubidrs; + + public ReplicaDirInfo (String baseDirPath, boolean hasSubidrs) { + this.baseDirPath = baseDirPath; + this.hasSubidrs = hasSubidrs; + } } @VisibleForTesting - public static ReplicaDirInfo parseSubDirs(File dir) { - ReplicaDirInfo ret = new ReplicaDirInfo(); + public static ReplicaDirInfo parseBaseDir(File dir) { File currentDir = dir; - List subDirList = new ArrayList(); + boolean hasSubdirs = false; while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) { - // Prepend the integer into the list. - subDirList.add(0, Integer.parseInt(currentDir.getName().replaceFirst( - DataStorage.BLOCK_SUBDIR_PREFIX, ""))); + hasSubdirs = true; currentDir = currentDir.getParentFile(); } - ret.subDirs = new int[subDirList.size()]; - for (int i = 0; i < subDirList.size(); i++) { - ret.subDirs[i] = subDirList.get(i); - } - ret.baseDirPath = currentDir.getAbsolutePath(); - - return ret; + return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 6093339bdcb..af467b93f09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -59,7 +59,8 @@ class BlockPoolSlice { private final String bpid; private final FsVolumeImpl volume; // volume to which this BlockPool belongs to private final File currentDir; // StorageDirectory/current/bpid/current - private final LDir finalizedDir; // directory store Finalized replica + // directory where finalized replicas are stored + private final File finalizedDir; private final File rbwDir; // directory store RBW replica private final File tmpDir; // directory store Temporary replica private static final String DU_CACHE_FILE = "dfsUsed"; @@ -82,8 +83,13 @@ class BlockPoolSlice { this.bpid = bpid; this.volume = volume; this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); - final File finalizedDir = new File( + this.finalizedDir = new File( currentDir, DataStorage.STORAGE_DIR_FINALIZED); + if (!this.finalizedDir.exists()) { + if (!this.finalizedDir.mkdirs()) { + throw new IOException("Failed to mkdirs " + this.finalizedDir); + } + } // Files that were being written when the datanode was last shutdown // are now moved back to the data directory. It is possible that @@ -95,10 +101,6 @@ class BlockPoolSlice { FileUtil.fullyDelete(tmpDir); } this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW); - final int maxBlocksPerDir = conf.getInt( - DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY, - DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT); - this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir); if (!rbwDir.mkdirs()) { // create rbw directory if not exist if (!rbwDir.isDirectory()) { throw new IOException("Mkdirs failed to create " + rbwDir.toString()); @@ -131,7 +133,7 @@ File getDirectory() { } File getFinalizedDir() { - return finalizedDir.dir; + return finalizedDir; } File getRbwDir() { @@ -239,25 +241,56 @@ File createRbwFile(Block b) throws IOException { } File addBlock(Block b, File f) throws IOException { - File blockFile = finalizedDir.addBlock(b, f); + File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); + if (!blockDir.exists()) { + if (!blockDir.mkdirs()) { + throw new IOException("Failed to mkdirs " + blockDir); + } + } + File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir); File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp()); dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); return blockFile; } void checkDirs() throws DiskErrorException { - finalizedDir.checkDirTree(); + DiskChecker.checkDirs(finalizedDir); DiskChecker.checkDir(tmpDir); DiskChecker.checkDir(rbwDir); } void getVolumeMap(ReplicaMap volumeMap) throws IOException { // add finalized replicas - finalizedDir.getVolumeMap(bpid, volumeMap, volume); + addToReplicasMap(volumeMap, finalizedDir, true); // add rbw replicas addToReplicasMap(volumeMap, rbwDir, false); } + /** + * Recover an unlinked tmp file on datanode restart. If the original block + * does not exist, then the tmp file is renamed to be the + * original file name and the original name is returned; otherwise the tmp + * file is deleted and null is returned. + */ + File recoverTempUnlinkedBlock(File unlinkedTmp) throws IOException { + File blockFile = FsDatasetUtil.getOrigFile(unlinkedTmp); + if (blockFile.exists()) { + // If the original block file still exists, then no recovery is needed. + if (!unlinkedTmp.delete()) { + throw new IOException("Unable to cleanup unlinked tmp file " + + unlinkedTmp); + } + return null; + } else { + if (!unlinkedTmp.renameTo(blockFile)) { + throw new IOException("Unable to rename unlinked tmp file " + + unlinkedTmp); + } + return blockFile; + } + } + + /** * Add replicas under the given directory to the volume map * @param volumeMap the replicas map @@ -267,23 +300,34 @@ void getVolumeMap(ReplicaMap volumeMap) throws IOException { */ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized ) throws IOException { - File blockFiles[] = FileUtil.listFiles(dir); - for (File blockFile : blockFiles) { - if (!Block.isBlockFilename(blockFile)) + File files[] = FileUtil.listFiles(dir); + for (File file : files) { + if (file.isDirectory()) { + addToReplicasMap(volumeMap, file, isFinalized); + } + + if (isFinalized && FsDatasetUtil.isUnlinkTmpFile(file)) { + file = recoverTempUnlinkedBlock(file); + if (file == null) { // the original block still exists, so we cover it + // in another iteration and can continue here + continue; + } + } + if (!Block.isBlockFilename(file)) continue; long genStamp = FsDatasetUtil.getGenerationStampFromFile( - blockFiles, blockFile); - long blockId = Block.filename2id(blockFile.getName()); + files, file); + long blockId = Block.filename2id(file.getName()); ReplicaInfo newReplica = null; if (isFinalized) { newReplica = new FinalizedReplica(blockId, - blockFile.length(), genStamp, volume, blockFile.getParentFile()); + file.length(), genStamp, volume, file.getParentFile()); } else { boolean loadRwr = true; - File restartMeta = new File(blockFile.getParent() + - File.pathSeparator + "." + blockFile.getName() + ".restart"); + File restartMeta = new File(file.getParent() + + File.pathSeparator + "." + file.getName() + ".restart"); Scanner sc = null; try { sc = new Scanner(restartMeta); @@ -291,8 +335,8 @@ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized if (sc.hasNextLong() && (sc.nextLong() > Time.now())) { // It didn't expire. Load the replica as a RBW. newReplica = new ReplicaBeingWritten(blockId, - validateIntegrityAndSetLength(blockFile, genStamp), - genStamp, volume, blockFile.getParentFile(), null); + validateIntegrityAndSetLength(file, genStamp), + genStamp, volume, file.getParentFile(), null); loadRwr = false; } sc.close(); @@ -301,7 +345,7 @@ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized restartMeta.getPath()); } } catch (FileNotFoundException fnfe) { - // nothing to do here + // nothing to do hereFile dir = } finally { if (sc != null) { sc.close(); @@ -310,15 +354,15 @@ void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized // Restart meta doesn't exist or expired. if (loadRwr) { newReplica = new ReplicaWaitingToBeRecovered(blockId, - validateIntegrityAndSetLength(blockFile, genStamp), - genStamp, volume, blockFile.getParentFile()); + validateIntegrityAndSetLength(file, genStamp), + genStamp, volume, file.getParentFile()); } } ReplicaInfo oldReplica = volumeMap.add(bpid, newReplica); if (oldReplica != null) { FsDatasetImpl.LOG.warn("Two block files with the same block id exist " + - "on disk: " + oldReplica.getBlockFile() + " and " + blockFile ); + "on disk: " + oldReplica.getBlockFile() + " and " + file ); } } } @@ -405,10 +449,6 @@ private long validateIntegrityAndSetLength(File blockFile, long genStamp) { } } - void clearPath(File f) { - finalizedDir.clearPath(f); - } - @Override public String toString() { return currentDir.getAbsolutePath(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index e8a06aec8ac..b133f60534a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1224,13 +1224,6 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException { + ". Parent not found for file " + f); continue; } - ReplicaState replicaState = info.getState(); - if (replicaState == ReplicaState.FINALIZED || - (replicaState == ReplicaState.RUR && - ((ReplicaUnderRecovery)info).getOriginalReplica().getState() == - ReplicaState.FINALIZED)) { - v.clearPath(bpid, parent); - } volumeMap.remove(bpid, invalidBlks[i]); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 795fab1f3a5..adfc896f7f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.DataStorage; +import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -235,10 +236,6 @@ void addToReplicasMap(String bpid, ReplicaMap volumeMap, // dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length()); bp.addToReplicasMap(volumeMap, dir, isFinalized); } - - void clearPath(String bpid, File f) throws IOException { - getBlockPoolSlice(bpid).clearPath(f); - } @Override public String toString() { @@ -274,7 +271,8 @@ boolean isBPDirEmpty(String bpid) throws IOException { File finalizedDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_FINALIZED); File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW); - if (finalizedDir.exists() && FileUtil.list(finalizedDir).length != 0) { + if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive( + finalizedDir)) { return false; } if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) { @@ -301,7 +299,8 @@ void deleteBPDirectories(String bpid, boolean force) throws IOException { if (!rbwDir.delete()) { throw new IOException("Failed to delete " + rbwDir); } - if (!finalizedDir.delete()) { + if (!DatanodeUtil.dirNoFilesRecursive(finalizedDir) || + !FileUtil.fullyDelete(finalizedDir)) { throw new IOException("Failed to delete " + finalizedDir); } FileUtil.fullyDelete(tmpDir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java deleted file mode 100644 index 991b58b3ae2..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java +++ /dev/null @@ -1,228 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.datanode.DataStorage; -import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; - -/** - * A node type that can be built into a tree reflecting the - * hierarchy of replicas on the local disk. - */ -class LDir { - final File dir; - final int maxBlocksPerDir; - - private int numBlocks = 0; - private LDir[] children = null; - private int lastChildIdx = 0; - - LDir(File dir, int maxBlocksPerDir) throws IOException { - this.dir = dir; - this.maxBlocksPerDir = maxBlocksPerDir; - - if (!dir.exists()) { - if (!dir.mkdirs()) { - throw new IOException("Failed to mkdirs " + dir); - } - } else { - File[] files = FileUtil.listFiles(dir); - List dirList = new ArrayList(); - for (int idx = 0; idx < files.length; idx++) { - if (files[idx].isDirectory()) { - dirList.add(new LDir(files[idx], maxBlocksPerDir)); - } else if (Block.isBlockFilename(files[idx])) { - numBlocks++; - } - } - if (dirList.size() > 0) { - children = dirList.toArray(new LDir[dirList.size()]); - } - } - } - - File addBlock(Block b, File src) throws IOException { - //First try without creating subdirectories - File file = addBlock(b, src, false, false); - return (file != null) ? file : addBlock(b, src, true, true); - } - - private File addBlock(Block b, File src, boolean createOk, boolean resetIdx - ) throws IOException { - if (numBlocks < maxBlocksPerDir) { - final File dest = FsDatasetImpl.moveBlockFiles(b, src, dir); - numBlocks += 1; - return dest; - } - - if (lastChildIdx < 0 && resetIdx) { - //reset so that all children will be checked - lastChildIdx = DFSUtil.getRandom().nextInt(children.length); - } - - if (lastChildIdx >= 0 && children != null) { - //Check if any child-tree has room for a block. - for (int i=0; i < children.length; i++) { - int idx = (lastChildIdx + i)%children.length; - File file = children[idx].addBlock(b, src, false, resetIdx); - if (file != null) { - lastChildIdx = idx; - return file; - } - } - lastChildIdx = -1; - } - - if (!createOk) { - return null; - } - - if (children == null || children.length == 0) { - children = new LDir[maxBlocksPerDir]; - for (int idx = 0; idx < maxBlocksPerDir; idx++) { - final File sub = new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx); - children[idx] = new LDir(sub, maxBlocksPerDir); - } - } - - //now pick a child randomly for creating a new set of subdirs. - lastChildIdx = DFSUtil.getRandom().nextInt(children.length); - return children[ lastChildIdx ].addBlock(b, src, true, false); - } - - void getVolumeMap(String bpid, ReplicaMap volumeMap, FsVolumeImpl volume - ) throws IOException { - if (children != null) { - for (int i = 0; i < children.length; i++) { - children[i].getVolumeMap(bpid, volumeMap, volume); - } - } - - recoverTempUnlinkedBlock(); - volume.addToReplicasMap(bpid, volumeMap, dir, true); - } - - /** - * Recover unlinked tmp files on datanode restart. If the original block - * does not exist, then the tmp file is renamed to be the - * original file name; otherwise the tmp file is deleted. - */ - private void recoverTempUnlinkedBlock() throws IOException { - File files[] = FileUtil.listFiles(dir); - for (File file : files) { - if (!FsDatasetUtil.isUnlinkTmpFile(file)) { - continue; - } - File blockFile = FsDatasetUtil.getOrigFile(file); - if (blockFile.exists()) { - // If the original block file still exists, then no recovery is needed. - if (!file.delete()) { - throw new IOException("Unable to cleanup unlinked tmp file " + file); - } - } else { - if (!file.renameTo(blockFile)) { - throw new IOException("Unable to cleanup detached file " + file); - } - } - } - } - - /** - * check if a data diretory is healthy - * @throws DiskErrorException - */ - void checkDirTree() throws DiskErrorException { - DiskChecker.checkDir(dir); - - if (children != null) { - for (int i = 0; i < children.length; i++) { - children[i].checkDirTree(); - } - } - } - - void clearPath(File f) { - String root = dir.getAbsolutePath(); - String dir = f.getAbsolutePath(); - if (dir.startsWith(root)) { - String[] dirNames = dir.substring(root.length()). - split(File.separator + DataStorage.BLOCK_SUBDIR_PREFIX); - if (clearPath(f, dirNames, 1)) - return; - } - clearPath(f, null, -1); - } - - /** - * dirNames is an array of string integers derived from - * usual directory structure data/subdirN/subdirXY/subdirM ... - * If dirName array is non-null, we only check the child at - * the children[dirNames[idx]]. This avoids iterating over - * children in common case. If directory structure changes - * in later versions, we need to revisit this. - */ - private boolean clearPath(File f, String[] dirNames, int idx) { - if ((dirNames == null || idx == dirNames.length) && - dir.compareTo(f) == 0) { - numBlocks--; - return true; - } - - if (dirNames != null) { - //guess the child index from the directory name - if (idx > (dirNames.length - 1) || children == null) { - return false; - } - int childIdx; - try { - childIdx = Integer.parseInt(dirNames[idx]); - } catch (NumberFormatException ignored) { - // layout changed? we could print a warning. - return false; - } - return (childIdx >= 0 && childIdx < children.length) ? - children[childIdx].clearPath(f, dirNames, idx+1) : false; - } - - //guesses failed. back to blind iteration. - if (children != null) { - for(int i=0; i < children.length; i++) { - if (children[i].clearPath(f, null, -1)){ - return true; - } - } - } - return false; - } - - @Override - public String toString() { - return "FSDir{dir=" + dir + ", children=" - + (children == null ? null : Arrays.asList(children)) + "}"; - } -} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index fea88166c24..c1eb49f4493 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2052,4 +2052,12 @@ + + dfs.datanode.block.id.layout.upgrade.threads + 12 + The number of threads to use when creating hard links from + current to previous blocks during upgrade of a DataNode to block ID-based + block layout (see HDFS-6482 for details on the layout). + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index c316684138b..fe298d33118 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -2353,8 +2353,8 @@ public static File getFinalizedDir(File storageDir, String bpid) { * @return data file corresponding to the block */ public static File getBlockFile(File storageDir, ExtendedBlock blk) { - return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), - blk.getBlockName()); + return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir, + blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName()); } /** @@ -2364,10 +2364,32 @@ public static File getBlockFile(File storageDir, ExtendedBlock blk) { * @return metadata file corresponding to the block */ public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) { - return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), - blk.getBlockName() + "_" + blk.getGenerationStamp() + - Block.METADATA_EXTENSION); - + return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir, + blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" + + blk.getGenerationStamp() + Block.METADATA_EXTENSION); + } + + /** + * Return all block metadata files in given directory (recursive search) + */ + public static List getAllBlockMetadataFiles(File storageDir) { + List results = new ArrayList(); + File[] files = storageDir.listFiles(); + if (files == null) { + return null; + } + for (File f : files) { + if (f.getName().startsWith("blk_") && f.getName().endsWith( + Block.METADATA_EXTENSION)) { + results.add(f); + } else if (f.isDirectory()) { + List subdirResults = getAllBlockMetadataFiles(f); + if (subdirResults != null) { + results.addAll(subdirResults); + } + } + } + return results; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index 6a994494c6d..01bfb0d2fef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -79,8 +79,8 @@ static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs, File dnCurDirs[] = new File[dataNodeDirs.length]; for (int i = 0; i < dataNodeDirs.length; i++) { dnCurDirs[i] = new File(dataNodeDirs[i],"current"); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i]), - UpgradeUtilities.checksumMasterDataNodeContents()); + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i], + false), UpgradeUtilities.checksumMasterDataNodeContents()); } for (int i = 0; i < nameNodeDirs.length; i++) { assertFalse(new File(nameNodeDirs[i],"previous").isDirectory()); @@ -96,8 +96,9 @@ static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs, assertFalse(new File(bpRoot,"previous").isDirectory()); File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir), - UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, + bpCurFinalizeDir, true), + UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 7a541e6622c..68687edea1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -81,7 +81,7 @@ void checkResult(NodeType nodeType, String[] baseDirs) throws Exception { break; case DATA_NODE: assertEquals( - UpgradeUtilities.checksumContents(nodeType, curDir), + UpgradeUtilities.checksumContents(nodeType, curDir, false), UpgradeUtilities.checksumMasterDataNodeContents()); break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java index e4d22cdd76a..176e9cc26cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @@ -239,7 +239,7 @@ void checkResultNameNode(String[] baseDirs, assertTrue(new File(baseDirs[i],"previous").isDirectory()); assertEquals( UpgradeUtilities.checksumContents( - NAME_NODE, new File(baseDirs[i],"previous")), + NAME_NODE, new File(baseDirs[i],"previous"), false), UpgradeUtilities.checksumMasterNameNodeContents()); } } @@ -259,7 +259,8 @@ void checkResultDataNode(String[] baseDirs, if (currentShouldExist) { for (int i = 0; i < baseDirs.length; i++) { assertEquals( - UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"current")), + UpgradeUtilities.checksumContents(DATA_NODE, + new File(baseDirs[i],"current"), false), UpgradeUtilities.checksumMasterDataNodeContents()); } } @@ -267,7 +268,8 @@ void checkResultDataNode(String[] baseDirs, for (int i = 0; i < baseDirs.length; i++) { assertTrue(new File(baseDirs[i],"previous").isDirectory()); assertEquals( - UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"previous")), + UpgradeUtilities.checksumContents(DATA_NODE, + new File(baseDirs[i],"previous"), false), UpgradeUtilities.checksumMasterDataNodeContents()); } } @@ -290,8 +292,8 @@ void checkResultBlockPool(String[] baseDirs, boolean currentShouldExist, if (currentShouldExist) { for (int i = 0; i < baseDirs.length; i++) { File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir), - UpgradeUtilities.checksumMasterBlockPoolContents()); + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir, + false), UpgradeUtilities.checksumMasterBlockPoolContents()); } } if (previousShouldExist) { @@ -299,8 +301,8 @@ void checkResultBlockPool(String[] baseDirs, boolean currentShouldExist, File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS); assertTrue(bpPrevDir.isDirectory()); assertEquals( - UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir), - UpgradeUtilities.checksumMasterBlockPoolContents()); + UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir, + false), UpgradeUtilities.checksumMasterBlockPoolContents()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index ee9e91dc733..104b043f1d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -100,7 +100,7 @@ void checkNameNode(String[] baseDirs, long imageTxId) throws IOException { File previous = new File(baseDir, "previous"); assertExists(previous); - assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous), + assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous, false), UpgradeUtilities.checksumMasterNameNodeContents()); } } @@ -114,23 +114,25 @@ void checkNameNode(String[] baseDirs, long imageTxId) throws IOException { void checkDataNode(String[] baseDirs, String bpid) throws IOException { for (int i = 0; i < baseDirs.length; i++) { File current = new File(baseDirs[i], "current/" + bpid + "/current"); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current), + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current, false), UpgradeUtilities.checksumMasterDataNodeContents()); // block files are placed under /current//current/finalized File currentFinalized = MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, currentFinalized), + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, + currentFinalized, true), UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); File previous = new File(baseDirs[i], "current/" + bpid + "/previous"); assertTrue(previous.isDirectory()); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous), + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous, false), UpgradeUtilities.checksumMasterDataNodeContents()); File previousFinalized = new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized"); - assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previousFinalized), + assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, + previousFinalized, true), UpgradeUtilities.checksumMasterBlockPoolFinalizedContents()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index f5dbdceaa17..88ad0cc2dac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -24,6 +24,7 @@ import java.io.BufferedReader; import java.io.File; +import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; @@ -80,7 +81,7 @@ private static class ReferenceFileInfo { long checksum; } - private static final Configuration upgradeConf; + static final Configuration upgradeConf; static { upgradeConf = new HdfsConfiguration(); @@ -95,7 +96,7 @@ private static class ReferenceFileInfo { boolean printChecksum = false; - private void unpackStorage(String tarFileName) + void unpackStorage(String tarFileName, String referenceName) throws IOException { String tarFile = System.getProperty("test.cache.data", "build/test/cache") + "/" + tarFileName; @@ -110,7 +111,7 @@ private void unpackStorage(String tarFileName) BufferedReader reader = new BufferedReader(new FileReader( System.getProperty("test.cache.data", "build/test/cache") - + "/" + HADOOP_DFS_DIR_TXT)); + + "/" + referenceName)); String line; while ( (line = reader.readLine()) != null ) { @@ -285,7 +286,7 @@ public void testFailOnPreUpgradeImage() throws IOException { */ @Test public void testUpgradeFromRel22Image() throws IOException { - unpackStorage(HADOOP22_IMAGE); + unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT); upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf). numDataNodes(4)); } @@ -296,7 +297,7 @@ public void testUpgradeFromRel22Image() throws IOException { */ @Test public void testUpgradeFromCorruptRel22Image() throws IOException { - unpackStorage(HADOOP22_IMAGE); + unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT); // Overwrite the md5 stored in the VERSION files File baseDir = new File(MiniDFSCluster.getBaseDirectory()); @@ -333,7 +334,7 @@ public void testUpgradeFromCorruptRel22Image() throws IOException { */ @Test public void testUpgradeFromRel1ReservedImage() throws Exception { - unpackStorage(HADOOP1_RESERVED_IMAGE); + unpackStorage(HADOOP1_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster = null; // Try it once without setting the upgrade flag to ensure it fails final Configuration conf = new Configuration(); @@ -403,7 +404,7 @@ public void testUpgradeFromRel1ReservedImage() throws Exception { */ @Test public void testUpgradeFromRel023ReservedImage() throws Exception { - unpackStorage(HADOOP023_RESERVED_IMAGE); + unpackStorage(HADOOP023_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster = null; // Try it once without setting the upgrade flag to ensure it fails final Configuration conf = new Configuration(); @@ -468,7 +469,7 @@ public void testUpgradeFromRel023ReservedImage() throws Exception { */ @Test public void testUpgradeFromRel2ReservedImage() throws Exception { - unpackStorage(HADOOP2_RESERVED_IMAGE); + unpackStorage(HADOOP2_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster = null; // Try it once without setting the upgrade flag to ensure it fails final Configuration conf = new Configuration(); @@ -572,7 +573,7 @@ static void recoverAllLeases(DFSClient dfs, } while (dirList.hasMore()); } - private void upgradeAndVerify(MiniDFSCluster.Builder bld) + void upgradeAndVerify(MiniDFSCluster.Builder bld) throws IOException { MiniDFSCluster cluster = null; try { @@ -601,7 +602,7 @@ private void upgradeAndVerify(MiniDFSCluster.Builder bld) */ @Test public void testUpgradeFromRel1BBWImage() throws IOException { - unpackStorage(HADOOP1_BBW_IMAGE); + unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT); Configuration conf = new Configuration(upgradeConf); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, System.getProperty("test.build.data") + File.separator + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index a2899eec9c7..1b4b3172394 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -445,19 +445,14 @@ private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex, @Test public void testReplicaInfoParsing() throws Exception { - testReplicaInfoParsingSingle(BASE_PATH, new int[0]); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir1", new int[]{1}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir43", new int[]{43}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3", new int[]{1, 2, 3}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir43", new int[]{1, 2, 43}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir23/subdir3", new int[]{1, 23, 3}); - testReplicaInfoParsingSingle(BASE_PATH + "/subdir13/subdir2/subdir3", new int[]{13, 2, 3}); + testReplicaInfoParsingSingle(BASE_PATH); + testReplicaInfoParsingSingle(BASE_PATH + "/subdir1"); + testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3"); } - private static void testReplicaInfoParsingSingle(String subDirPath, int[] expectedSubDirs) { + private static void testReplicaInfoParsingSingle(String subDirPath) { File testFile = new File(subDirPath); - assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs); - assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath); + assertEquals(BASE_PATH, ReplicaInfo.parseBaseDir(testFile).baseDirPath); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java new file mode 100644 index 00000000000..0966301cb4e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; + +public class TestDatanodeLayoutUpgrade { + private static final String HADOOP_DATANODE_DIR_TXT = + "hadoop-datanode-dir.txt"; + private static final String HADOOP24_DATANODE = "hadoop-24-datanode-dir.tgz"; + + @Test + // Upgrade from LDir-based layout to block ID-based layout -- change described + // in HDFS-6482 + public void testUpgradeToIdBasedLayout() throws IOException { + TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage(); + upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT); + Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf); + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + System.getProperty("test.build.data") + File.separator + + "dfs" + File.separator + "data"); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + System.getProperty("test.build.data") + File.separator + + "dfs" + File.separator + "name"); + upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1) + .manageDataDfsDirs(false).manageNameDfsDirs(false)); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index 81077c5fd8b..a7c6a69ac9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -27,6 +27,7 @@ import java.io.File; import java.io.FileOutputStream; import java.util.ArrayList; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -35,6 +36,7 @@ import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.common.GenerationStamp; @@ -137,13 +139,15 @@ public void testArrayOutOfBoundsException() throws Exception { final String bpid = cluster.getNamesystem().getBlockPoolId(); File storageDir = cluster.getInstanceStorageDir(0, 0); File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); + assertTrue("Data directory does not exist", dataDir.exists()); ExtendedBlock blk = getBlock(bpid, dataDir); if (blk == null) { storageDir = cluster.getInstanceStorageDir(0, 1); dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); blk = getBlock(bpid, dataDir); } - assertFalse(blk==null); + assertFalse("Data directory does not contain any blocks or there was an " + + "IO error", blk==null); // start a third datanode cluster.startDataNodes(conf, 1, true, null, null); @@ -174,33 +178,15 @@ public void testArrayOutOfBoundsException() throws Exception { } - private ExtendedBlock getBlock(String bpid, File dataDir) { - assertTrue("data directory does not exist", dataDir.exists()); - File[] blocks = dataDir.listFiles(); - assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0)); - - int idx = 0; - String blockFileName = null; - for (; idx < blocks.length; idx++) { - blockFileName = blocks[idx].getName(); - if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) { - break; - } - } - if (blockFileName == null) { + public static ExtendedBlock getBlock(String bpid, File dataDir) { + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir); + if (metadataFiles == null || metadataFiles.isEmpty()) { return null; } - long blockId = Long.parseLong(blockFileName.substring("blk_".length())); - long blockTimeStamp = GenerationStamp.GRANDFATHER_GENERATION_STAMP; - for (idx=0; idx < blocks.length; idx++) { - String fileName = blocks[idx].getName(); - if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) { - int startIndex = blockFileName.length()+1; - int endIndex = fileName.length() - ".meta".length(); - blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex)); - break; - } - } - return new ExtendedBlock(bpid, blockId, blocks[idx].length(), blockTimeStamp); + File metadataFile = metadataFiles.get(0); + File blockFile = Block.metaToBlockFile(metadataFile); + return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()), + blockFile.length(), Block.getGenerationStamp(metadataFile.getName())); } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 4f26e087cc8..bbaf3ed0e3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -158,21 +158,23 @@ public static void initialize() throws Exception { FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock")); } namenodeStorageChecksum = checksumContents(NAME_NODE, - new File(namenodeStorage, "current")); + new File(namenodeStorage, "current"), false); File dnCurDir = new File(datanodeStorage, "current"); - datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir); + datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false); File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current"); - blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir); + blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false); File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/"+DataStorage.STORAGE_DIR_FINALIZED); - blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, bpCurFinalizeDir); + blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, + bpCurFinalizeDir, true); File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/"+DataStorage.STORAGE_DIR_RBW); - blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir); + blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir, + false); } // Private helper method that writes a file to the given file system. @@ -266,36 +268,47 @@ public static long checksumMasterBlockPoolRbwContents() { /** * Compute the checksum of all the files in the specified directory. - * The contents of subdirectories are not included. This method provides - * an easy way to ensure equality between the contents of two directories. + * This method provides an easy way to ensure equality between the contents + * of two directories. * * @param nodeType if DATA_NODE then any file named "VERSION" is ignored. * This is because this file file is changed every time * the Datanode is started. - * @param dir must be a directory. Subdirectories are ignored. + * @param dir must be a directory + * @param recursive whether or not to consider subdirectories * * @throws IllegalArgumentException if specified directory is not a directory * @throws IOException if an IOException occurs while reading the files * @return the computed checksum value */ - public static long checksumContents(NodeType nodeType, File dir) throws IOException { + public static long checksumContents(NodeType nodeType, File dir, + boolean recursive) throws IOException { + CRC32 checksum = new CRC32(); + checksumContentsHelper(nodeType, dir, checksum, recursive); + return checksum.getValue(); + } + + public static void checksumContentsHelper(NodeType nodeType, File dir, + CRC32 checksum, boolean recursive) throws IOException { if (!dir.isDirectory()) { throw new IllegalArgumentException( - "Given argument is not a directory:" + dir); + "Given argument is not a directory:" + dir); } File[] list = dir.listFiles(); Arrays.sort(list); - CRC32 checksum = new CRC32(); for (int i = 0; i < list.length; i++) { if (!list[i].isFile()) { + if (recursive) { + checksumContentsHelper(nodeType, list[i], checksum, recursive); + } continue; } // skip VERSION and dfsUsed file for DataNodes - if (nodeType == DATA_NODE && - (list[i].getName().equals("VERSION") || - list[i].getName().equals("dfsUsed"))) { - continue; + if (nodeType == DATA_NODE && + (list[i].getName().equals("VERSION") || + list[i].getName().equals("dfsUsed"))) { + continue; } FileInputStream fis = null; @@ -312,7 +325,6 @@ public static long checksumContents(NodeType nodeType, File dir) throws IOExcept } } } - return checksum.getValue(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 38403eb8258..b1172a0806f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.Socket; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -384,7 +385,7 @@ private int countRealBlocks(Map map) { continue; } - String [] res = metaFilesInDir(dir); + List res = MiniDFSCluster.getAllBlockMetadataFiles(dir); if(res == null) { System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j); continue; @@ -392,7 +393,8 @@ private int countRealBlocks(Map map) { //System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files"); //int ii = 0; - for(String s: res) { + for(File f: res) { + String s = f.getName(); // cut off "blk_-" at the beginning and ".meta" at the end assertNotNull("Block file name should not be null", s); String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_")); @@ -408,25 +410,9 @@ private int countRealBlocks(Map map) { //System.out.println("dir1="+dir.getPath() + "blocks=" + res.length); //System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length); - total += res.length; + total += res.size(); } } return total; } - - /* - * count how many files *.meta are in the dir - */ - private String [] metaFilesInDir(File dir) { - String [] res = dir.list( - new FilenameFilter() { - @Override - public boolean accept(File dir, String name) { - return name.startsWith("blk_") && - name.endsWith(Block.METADATA_EXTENSION); - } - } - ); - return res; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java index d16a4bb9e06..755d49922c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java @@ -103,9 +103,10 @@ public void testDeleteBlockPool() throws Exception { fs1.delete(new Path("/alpha"), true); // Wait till all blocks are deleted from the dn2 for bpid1. - while ((MiniDFSCluster.getFinalizedDir(dn2StorageDir1, - bpid1).list().length != 0) || (MiniDFSCluster.getFinalizedDir( - dn2StorageDir2, bpid1).list().length != 0)) { + File finalDir1 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid1); + File finalDir2 = MiniDFSCluster.getFinalizedDir(dn2StorageDir1, bpid2); + while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) || + (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) { try { Thread.sleep(3000); } catch (Exception ignored) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 08f98d4bd48..1a4af42b00c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -41,6 +41,7 @@ import java.nio.channels.FileChannel; import java.security.PrivilegedExceptionAction; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; @@ -63,6 +64,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -750,15 +752,14 @@ public void testFsckListCorruptFilesBlocks() throws Exception { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - File[] blocks = data_dir.listFiles(); - if (blocks == null) + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( + data_dir); + if (metadataFiles == null) continue; - - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { - continue; - } - assertTrue("Cannot remove file.", blocks[idx].delete()); + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + assertTrue("Cannot remove file.", blockFile.delete()); + assertTrue("Cannot remove file.", metadataFile.delete()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index 18f83ef8691..7e36acb48c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -25,6 +25,7 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Collection; +import java.util.List; import java.util.Random; import org.apache.commons.logging.Log; @@ -39,7 +40,11 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestFileCorruption; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; import org.apache.hadoop.util.StringUtils; import org.junit.Test; @@ -87,36 +92,29 @@ public void testListCorruptFilesCorruptedBlock() throws Exception { File storageDir = cluster.getInstanceStorageDir(0, 1); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); assertTrue("data directory does not exist", data_dir.exists()); - File[] blocks = data_dir.listFiles(); - assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (blocks[idx].getName().startsWith("blk_") && - blocks[idx].getName().endsWith(".meta")) { - // - // shorten .meta file - // - RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw"); - FileChannel channel = file.getChannel(); - long position = channel.size() - 2; - int length = 2; - byte[] buffer = new byte[length]; - random.nextBytes(buffer); - channel.write(ByteBuffer.wrap(buffer), position); - file.close(); - LOG.info("Deliberately corrupting file " + blocks[idx].getName() + - " at offset " + position + " length " + length); + List metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir); + assertTrue("Data directory does not contain any blocks or there was an " + + "IO error", metaFiles != null && !metaFiles.isEmpty()); + File metaFile = metaFiles.get(0); + RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); + FileChannel channel = file.getChannel(); + long position = channel.size() - 2; + int length = 2; + byte[] buffer = new byte[length]; + random.nextBytes(buffer); + channel.write(ByteBuffer.wrap(buffer), position); + file.close(); + LOG.info("Deliberately corrupting file " + metaFile.getName() + + " at offset " + position + " length " + length); - // read all files to trigger detection of corrupted replica - try { - util.checkFiles(fs, "/srcdat10"); - } catch (BlockMissingException e) { - System.out.println("Received BlockMissingException as expected."); - } catch (IOException e) { - assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + - " but received IOException " + e, false); - } - break; - } + // read all files to trigger detection of corrupted replica + try { + util.checkFiles(fs, "/srcdat10"); + } catch (BlockMissingException e) { + System.out.println("Received BlockMissingException as expected."); + } catch (IOException e) { + assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. @@ -174,38 +172,30 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, cluster.getNamesystem().getBlockPoolId()); assertTrue("data directory does not exist", data_dir.exists()); - File[] blocks = data_dir.listFiles(); - assertTrue("Blocks do not exist in data-dir", (blocks != null) && - (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (blocks[idx].getName().startsWith("blk_") && - blocks[idx].getName().endsWith(".meta")) { - // - // shorten .meta file - // - RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw"); - FileChannel channel = file.getChannel(); - long position = channel.size() - 2; - int length = 2; - byte[] buffer = new byte[length]; - random.nextBytes(buffer); - channel.write(ByteBuffer.wrap(buffer), position); - file.close(); - LOG.info("Deliberately corrupting file " + blocks[idx].getName() + - " at offset " + position + " length " + length); + List metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir); + assertTrue("Data directory does not contain any blocks or there was an " + + "IO error", metaFiles != null && !metaFiles.isEmpty()); + File metaFile = metaFiles.get(0); + RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); + FileChannel channel = file.getChannel(); + long position = channel.size() - 2; + int length = 2; + byte[] buffer = new byte[length]; + random.nextBytes(buffer); + channel.write(ByteBuffer.wrap(buffer), position); + file.close(); + LOG.info("Deliberately corrupting file " + metaFile.getName() + + " at offset " + position + " length " + length); - // read all files to trigger detection of corrupted replica - try { - util.checkFiles(fs, "/srcdat10"); - } catch (BlockMissingException e) { - System.out.println("Received BlockMissingException as expected."); - } catch (IOException e) { - assertTrue("Corrupted replicas not handled properly. " + - "Expecting BlockMissingException " + - " but received IOException " + e, false); - } - break; - } + // read all files to trigger detection of corrupted replica + try { + util.checkFiles(fs, "/srcdat10"); + } catch (BlockMissingException e) { + System.out.println("Received BlockMissingException as expected."); + } catch (IOException e) { + assertTrue("Corrupted replicas not handled properly. " + + "Expecting BlockMissingException " + + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. @@ -295,17 +285,18 @@ public void testlistCorruptFileBlocks() throws Exception { for (int j = 0; j <= 1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - File[] blocks = data_dir.listFiles(); - if (blocks == null) + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( + data_dir); + if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { - continue; - } - LOG.info("Deliberately removing file " + blocks[idx].getName()); - assertTrue("Cannot remove file.", blocks[idx].delete()); + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + LOG.info("Deliberately removing file " + blockFile.getName()); + assertTrue("Cannot remove file.", blockFile.delete()); + LOG.info("Deliberately removing file " + metadataFile.getName()); + assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } @@ -405,17 +396,18 @@ public void testlistCorruptFileBlocksDFS() throws Exception { for (int i = 0; i < 2; i++) { File storageDir = cluster.getInstanceStorageDir(0, i); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - File[] blocks = data_dir.listFiles(); - if (blocks == null) + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( + data_dir); + if (metadataFiles == null) continue; // assertTrue("Blocks do not exist in data-dir", (blocks != null) && // (blocks.length > 0)); - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { - continue; - } - LOG.info("Deliberately removing file " + blocks[idx].getName()); - assertTrue("Cannot remove file.", blocks[idx].delete()); + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + LOG.info("Deliberately removing file " + blockFile.getName()); + assertTrue("Cannot remove file.", blockFile.delete()); + LOG.info("Deliberately removing file " + metadataFile.getName()); + assertTrue("Cannot remove file.", metadataFile.delete()); // break; } } @@ -482,15 +474,14 @@ public void testMaxCorruptFiles() throws Exception { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); LOG.info("Removing files from " + data_dir); - File[] blocks = data_dir.listFiles(); - if (blocks == null) + List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( + data_dir); + if (metadataFiles == null) continue; - - for (int idx = 0; idx < blocks.length; idx++) { - if (!blocks[idx].getName().startsWith("blk_")) { - continue; - } - assertTrue("Cannot remove file.", blocks[idx].delete()); + for (File metadataFile : metadataFiles) { + File blockFile = Block.metaToBlockFile(metadataFile); + assertTrue("Cannot remove file.", blockFile.delete()); + assertTrue("Cannot remove file.", metadataFile.delete()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-24-datanode-dir.tgz new file mode 100644 index 0000000000000000000000000000000000000000..9f666fed090ec1ba463c568e69fbbe23658c1c89 GIT binary patch literal 637608 zcmYgXc|4Tw*Z#Ih5)~nPLbi~7CrkF-jC~0qBYQ(8OGHw3jeXx|29sq78IdJ~Fk{P7 z3K{z{vd{a7exLVm=X36Ju5+F1ocn$T` zws~l?5ltFW49ba}53*eyN{>I-37%>ClR4qrhk56(m-}0I>Q}KI3M?hhUjvZ|F3ix( zyKL+>%M@XWxY|Wm%)^wRduSH&?qzvJu_W5^HZ6RPD2UvRX_$USYJMUh&Puv8WVei# zr#Sk!Zq^OA99lOk1#g&jl2%896^_WDc>H}jOh|jcn#Opuc7X^h>88_H)K5~~V_{ZO zdVc;T$K{WStxElmq(3sn5Ts(8H>P8oE%QKc9&RYDa<0{p)_^D#Mw&t+q{OA7!rY0Pq1Q+~^ulG4qw#nZ_<~Y>Ge4Js;~pnh zzY^Mz_7}RV-{b-V;*BnOV_o6R>OxPWRBK}sD~l{M>$ftcNZva#{92y6`cVTq8$AM) z-AI0c9=(JC#8Gk5J;5s!$>eSablUFEsdbx2EgcQ{@Gu|yBT5spXW@K_MoM0!TC%#5Ud}wR$<@<+?j{0(;ztTqRFt7) z??~d8#3CfttCGyQU2afyRq`3TFt`a@MHS`c6~yz~$Ty{HKK^{Aj6gYHnujeu{q4p|8-@bqgriF_FG!zJFz0;P;w4vs~VLSweQ zTzaFFzRTCH<#dvov< z@*7Y-gbWo=*TjkuwvNs(0oAjFp`(3v^$e9SIdSg{Ub}M-$qz8ouO0RBee;`m$=b>DSLVpCOXvNc*%*ladQCX}(^>g51qQAU*BE|Sd1lf<-(N)PxkM^h$yPpjbf&wYh=go}hp z-%oBbk~L&lC5zv&;lLk@Ok0&51uzeriE#oN0S2Lxn}=#K^?kln6M>y2uK@D@NU#M-h!vp{d z*P~~&@AxBWTq#jWMdlG`(alcB;rB70kcntX4uXMrBEn90EZ{6k*>F(Yw|ner8S>s? zfQI>fAya%HSR2;~7_k1iz48fpL7SyRCK!4ZWoel6H1P4%B~(iwhR-OZI6;@{jhtsB z{wlBcW$S{}aJ->mPG6AM!7Wp{4KR7WVjdv*4dtd^ePorRhK43|+RdrX5iO2Xo1@IB z=25X~LE4;Y4QsCNa#LPCPjt?Roi?(UpuyQ?%D-T6ka&%jy+&jNh_lhRrD`7YGI8$c zb$v`F@*F1k=Rui{dCU(~@2fpZpdc;GbNZW>w1bcI`};}55^BYthBMWqmS*M-8Ab_XeDlryARyeDUVY1_+%*V~=IUPF$G z%6;v=A#|k!LSqMYHMFyql~NOh9(eEfnd`kwq;Hh0p=yO(Ds>j9(8shv`0b$F{7siU zTX02Zj6BTsUeMGjM9x__NU(x7MS>`=Ijcl9sy=PiD{~Ho zh?MrvQkwV9p*+-zKGFLZTQ*fbRy1wAr`5y8>{#c35hzXWcjCWNHn#@$&$s-lSxnvZ zoA>?p>IH5#K)`4icPKPoTNdqDOS=n3U6z8ODFr_NVaS)6yol|CiMPN|2!&N4U?as3s$laqeYP! z4r=2b;hoA_38)CYoiBZ1*EFu#!LQ-d)kUG7Ra|9FpQ%QTRMyL7IAU&qx>Q`)TYe+2 z*ty>a$x0I)2zTQWJRLCSwJoo#N9lfo@RimIb&zYB3;smes=7}&*`AC-5zoYTmyzAJ9)?N%KaPIiBT*JM)s$b*KwH*Ac zubzoP$F&=`&C6fXu&;^*AuBI!n$m9vRk2Tegb3OZZzEZO(l=ucnd~vt+T##E1NE*i4V z(f#uHNYKQNXP4@^auE*QzmP3MufN?{BkLz`i74N5#H8OYCDyn2 zp4Nlv@J&xgD1T|6lVhY!J3$=FbPqQNfS$pyIu?pwFuX;YF?1I-b$Lz@Rov_IF2lB% zor@yqa;7m=u7`ae54ru8_jWtg<0!mEZi9!c>fg#LsM7n@~$Bsec>8hp9Yy~`5uMJO?gVLjVof~GcLnY zLVEf)uCRV2+`=MnA>{xUwr#j8?;37siwARa0In%Y*#btU=%S^QH^34`R)dV#5l?sJo}wKdD9S5Hn&o6KRKhexRm*E-GlJb7N>(aLytJ%Y zR`CmEnGXY8P?Kueb^_1u(+mRN;5sJgxu!UZgo zHb|JM3)I{nc>Zurop<5a=>G7TGLN&E-g^OCVr9W?W^yzb2_VhBC}KxpaxA8CRgL&y7nz}|C`E>1ee@e zx#p122MR6>b}={kvv6?_A>X^qTbMdMc%v*iVi>+$4Wx$b>&QrvEy~!f;WhJX3lek{ z_Vv;A=9A~R2VRtHn@8iDa{XTIIhO%m*zIC&L7o8uwwe9snFCbm?eEy`l#6@C=W_Q} zva*cA@WQ(gaifK^FJlp}uX%vY6)tj%#Q2SjJ|JE#4YnULOLHU&V3C|iVPGP>;?|#j zK)n0*_fIO$2798L;bE3dE7=H*#t!7N(cvR5lXIq2jnVi5LmIi=HLjMWSrNl?lER;_ zMpwjcf)$Jw9Q5v_hm!6EUPbZba;;?1@sI@#X&Ij5k=(u(&1JBTH_yNDRkLP%4ey`w z4#uQCv*>#eZLV+*d9bxD5N5P#fo0Ob-D8UCEB?Y9rVvQ2eR(mq>%7?7yfWCDn8R^n zDCm>f&nRt%;>)_M-^L(prX(_M3NkHZP9^LNs3^EytmX0N`&h_LWSF`r(c+5%LnVwt zTh?yU$to7_sZ7tUd8UDS^>5gzXhRBTyotlpnxck~SqM3JlxVtlAU~ z_+j2ehE)I2aElIAw+o>$!i&=okB0iJTlkxbZnZlEm&lh*k%7NGZQZ3XZ|XEl0y1GS zNkul5OvHg<8Oj9jKNgFXd;j6SKjVuEvshT&g)|W2xHvQ2$X=Awg47J%=&Si{5G-rQsxGJtptPIm!|&x*UKd3=TNR{MQ>0GzhmF# z?r@b2a*s#ru;VWdFJwO-U(fq_qoups1e>mpGh~Wd!FHM7dL;8?Ow^>@u8J1b{CvC% zyL+`IwIUnsd=9yHU zllzYN*Jx*2iA-R$^rnirS8AsyH#F=>u7-n|4LRLjF28*MZZ&JXDgl%hny@I-!d9$v zPG;Ng8d0_sL?EzR3wC`t$Xb*AiM zz-AR^|9*TfRFn5@%Ikk3*1eQ)l6)F1sb=L>e}b{j?iuvqZR{5!z2br z-GBAf0`9);MB4y`x%)Y!eY{JsR2gg&Z>v@$75qyCx>2%t!5&UMzv&{=+cGUPQ=5A` zGi|z!83iE5W<&*0h?djoDMnd0@;zCWX|j9=gxpY$IW7wu2DZbd8{p0ZaX5^nh`rN} z42qEQQS2C@aR@P_P04zz*ADs5PVgpwQ&&+uVmT*hiURe77qm51spoAT5n}uxL0r(^ zMB0=t5EEY-1)Ot^y+Y&@KP%f-U;k^_Nc>I1q;vh*v8~A0X!>gsQR?~oy6i0Lo%~G? zg;p4TDbk~u3{6;QhQG8UlhNC^`J3`eZcMt!a)QFaFvc;BhJ8zMQ`LKVF@cu56sD@X z5D_DjM_jTQk6(~IOSQFx|6;@;iAFxWKY!%%6_7X)bvUK z;g<_KyjM1eF7oky_2Rpw$`y!n`qN+zQ^#EhqmdY}SCmM4?^BcwRy25aUx+(2k0F;> ze9Pg@Xi61`$t+C+qL^=Wk936l+0nVP3wpl5_vJcYk;W13n9}Y-?DCxlG;7kQz3(xZ zx+YbCdIQDJqt-kt1RJ=vzO;$KX0Br`m2r=Ossv3kTyE!A=tWgX6Rl(M;)abCsib-u z)IuO8v2?4rhX2WO&Q|(m)YjKOR`qw6`Ao%>d()iue+ZU2ujBdi{fZKPGYud^ZkR5t zl(KOResjc7mQI3(`ubZTX4vt#QW+pns}7rI(O^qs--n&1=S&+z$Ab=SzuhVGn@8aR zw7%o-N-Q>H<<4$BPqgeQGzJudr&M9a|L|%k-WqGEidz9(c(C@KZwkMrO^{t(KIAlg z7COE;Q!5Z`-un}UQ7Z~bkS&HS$=Jr?19=xb>-715b6>jG8xx%xABtDOT55n`)%GF9 zwi8X(k@#D@jNiRQU8zlZD%V$t8UZrbr8VL^&^^}z+u4uos)_JmX9fi-Ozf-SJ zeF-HI2)%2xKp}fRCjCX^rM(}Pyo|_+`bE<3hSeRG{@N?wD>kvwxQcBf?U?cEa-;HL z-q*EplgM)I^6@;=m!Cx)B3{Acox;`1uC{51si`=e=is7YH2wT8VCqj`+)ss&k@?Nc z5Ms`zZ=h1sq|EA~Y`qsgRVKhdl^;pQ}hpBJu@jH*VnC z6jxK6d3FZNqbQTHIOf~=1Wm#YJM{&bTtcB4Yf(2vwf@?$we-g-PT3)v`H)D3Z58M> z1g#`d`aLU{p^3nSUuQ8!{Gb&M2`4yMR(&46DaHymGv;||t+4P4nPCe}=J%9;^LFpx z#f=cqq-#`9Q7O%bACmH#S)gJP+c~g7Q-dZllVvF%SE55sbstJ0XdypR^J$;Okq?oS z<@Tj+iKb@@`3A(}tywhOt+ajx+2b0uxH2!brr5c5i@3UrvG&VrP(cd;=Tuc_vwjU0 zHoe0n;2LFB{0aP_U&J&)wrF~skZj|Cv4z6YO;7r*9}0>gznIcpQ*qjW(Ms>Q>@q(=Hb+ z>kk42xkkST2y^`R{z}AdMU5V*3I+KQzx`gQG z?va8#=dHGoC7=>t&7u|ED{T~!-`0tL2Sg$8yDT? zx$;FB$}DIxT=$T9PUps%!oG2dgC{zmb3}qM!Y6EBv?!RtTbd1j)3PeiWPveN*#3MH zWZLs{oq`e70IH`@_Km7a?Zn&9>!ltg+_q+BvJbG~l`n8(BS=Lb4QPq2jSx84aLpKL z!>ThER{18mOgdPRg9dMCJpaXF{(uHQ3B&6kGru&S^&_Px-=T86B51)#zw%S=u zbYR`^UI_UE28{sCdjTcNTullm*jiR~S8A2Sma50g`gf*zCz~t;!JtK;-MmM1@@Yp+ zK2*qk*DiwaS#0`q!WrZFa%S^(pR*x`=!!&s2B9)j!gLIMg&OgypoQ;hqk3wdyIP`b zvkmgaZ0(PCwl;$J0Dy8yDr`sLglm>Fht)kX8Kvq84G)@0LOs3RMokUs$bv+nL^`E@Myzz{7_9>$sd;?ja=AEkOpaHj+Dz(U^b!!XZH(w+t9UQ zdKXz)^YHL%y&xIVyHJ)anwjw>!J>)42^P|{K;vX+n>$#}{$vqcU;drO^nwszoV8yh zr>H7ni>^pP(gK7#X!T1rkPc0LoGLgtTmTEj-8b2L>lq8lw!%|uU#=ENWrC=?!&+R41Xf% zeZ+!SF*XCGn*F?pybqHwM_S)uI14pn|5cdVE$CQ##T?2kKp#V6Y!;mM=g%Wh&LaLQ zqT+kJp}z#;>~JdMgu0VP#z0IZ=kxVM9602NZI~5r27@+&cE8$_2WDL2w@|eA5Ud*F zXqNF)x;qu$0Q*&6BUp;XqvQ&^BSPiighQ@RbYPFVN`Ws}UqR4xQp4P&aHOd6A-gha zT*dL>Ax__WK=r7MG!)4oYCRMgSHz^&+(x>99^&)|IuD>CNC*rqGOvuq~? z@fe^>Q9Wi??!$Zjd`hm-Yq{iTlnu&ZrhBP(9}{Lf$q81}y<|eD(N0?b#`V5XA@JvO zID=8S8ey-jw|s3`8#IivH=Yga_bO`8g-%8Brv$7XEn@|8*sp%RRV^YPk;^{p3sc)k=_XdIMew$dsLgyG!Th{$9OOp&d^ z9eg=Zq>+(MYh`#3JU^K417ufn;O-5vT<AI$Y!H=I=U9lW0CdY^N)nl z6y{lc*jp0s9C|+70Db$7=S5ub(k!JYmmaUWQdh*?DJjRceINd^H&wYm1Jpirb-)(Q z28Y}PRPOsP*V{;Ig13Cp?|WyFteRBWBC^zSbgwp}o%>^|xeoPPmJzKz=7*BR_ zz|;MAuV!DN)E7|{%hw8Z^4yEUeYLeMlFi?%wp9=WV?Rn4fP6_8<=N>o{g|t{%2)1! zNP2{oplsp2f^iLWb^^Z?<0~VA!O(^8yYIe(aHG|SRu6z!;&2MsqGI!G?!86)4aCY^ zd^}hJ>v#>Bna${;(}bnRhZeqsdI`=(5^gpz34pO>Q6`Y>)NI#MpYEA4{%an|O$$=S zpL9aRnLWR>J!oR$0eh5snIdM}R%k#nF{=~N75a2WtSW@5mX?H&d|41W8xE0<4EId( zOr52j!_{tP(K^SdL8TGblE$-YKMciuMa>s>GnmZ(*cvPl@cVW>bA^tyHnN^q8hZKlp9+bgk+Ng*gwQ%L01 ztBUw4ds%*s_+6aMR~pMneD3MM@S&F>WC4F3vB5n-_P!CaFW~HYQ6H{#=i(EaaKd@x zE5UQMY`(#5YE3X@B)N2r_cSUy=yU}4JuC#8l`zVZH-IRU@jJ4JaWeNKOzuT;}Mz=hRu!mBC=96Ggly`M( zONG9o_>5;S@TR~Ct2yqwJwM!f1dKQ@vE>o&5i0M))6J(AScn8C1lw$UJoBL7bd}B; zcnY77lNu zzv!D&+j?MM8IDWB*FJl2^v)bN$TQ*D)TXirWyNJU@psIfC--;8YcF*d;L?J>?> zp%iX{*&umjxF%9}4`USq$k{ptL2uTmR56;I#;Mvy^=DTxxS zsG{F9T5}n2!(`ll9(wQ}5LTUS6CZwvg$Da+j(G`lXUCjq+DP%~Zismqr-bvv5|?Dp zq&Qo>Bz~#&`XXGx2<5t;;hNFUWwNvvA|d8GiA>Le^i_L2`z|!WbdVxIjd=WtA7lRu zi<=%qkvRdWjH*ER~Ni8*pGT|fOa(r`iP-U;4eD~u-eYlRl$Cm zBbbB@a^l_ZSYu&3-4|U9IYvZY~J!c4XvxV`j zI%rJW-PeEgCTN%V9cwPr@1ycA2UmzKT5;$ok}skn&OY=YS#GruMk;2Jy_f>q({oW3O^nspOHtQZJ&-{XOok(9^aiN^H$D*I^P-Ym zo62u_>Js?K0=CoHBiHUo@_Z=M8)a z#x7&X;VzLcuWikQ34?eDa*8 zz8x}8!<1rOBs^pvDCTQ&6X$a6s%>+DYpWuhKm}x^vdTZ&2wqEUeN`z$Wz?hvvD99c zoX$-zHp|h1)2?00yIr$}nH8R=7vy8A3w}PEwIYeAOAkzZE|GW322HOG&-g{hhp}6{ z)g$B`nS5Oetw4kN)ox?#DM#`~Q6d>(7)|7Xl5rW%&V=9?>5`zOqg|{EaTw!XNl3Sq zk~`}bbU)cvFqxl+Ihj{N9Qv!X)OjdzK973Az&_E|^!kN!1ni1j> z$mMd=b#!cT0o647Qc?28tK+MX@WTu>tZk4G_( z(+Bpyg^o6B$q%B03t=g$Dl4;jrZ0?Uy@Sx!J|k9GJp;|&FKC1Q#7^- zhnR2Mv)TU{pDbbA_N3|8Z1OJ-;+^Ast<@FT3*t62FFpSZbcjK%^Z92SVe zB@4M*=63&Vo*G+S3Omebb2DBY3CM}WbI!!r1_^+V90SQc6WM}*wMCw0@o?u2rY>(U z7MHmH&PYT(RX96?!HEoQPD1X?vsNUV-EM@yqa)Pr@4k(dCPRFGh{Uf0vrUf3@JUk1*sM{}I+uD{ zlW+r-#&KYAS=|zcOyI)OEHUQ0LJ zhEAP{0FoO&oF@HgRNe7Tv`>p$X0J1?Z zrc+cgY=V3t1v{6qTH}Ny%h7lH5FQY5(~e382otyi%gSwYF5Po2sNGSIcDG=`oIfs&v#nfORRzv0xYNyneOuQRa%nX`KT~U&#Ka0c}Pn1sB`(N7@uNnX#571q{ z`u}`|0~gu{MV^a!+U~V6{Pn$&Qp(2_5N0aDbW+^^7g`SRK2;pFQGX7JgN>ImRh6@E zBdajnM5o@IMt8M;)L;MzL`81R6vK~klvK=|tlz%`as_J6PjC>(G<^84PTWA9=J{;i z=?|(4zK(&^F|f8vX&qOde0iV1Z$vH_iar?a8Zc>d^2LX8wQ>ZYFMBV$zxZZ)nmZ)3OgpFjBBe&ebkGHk2(V*btC zn>R<>Uk+Zo&a9^#(A8O&_G4nc^Jo?}BD*@#)5$Y0YtG?*5I; zXRZfF0ssMy40KeKX4*JL>zp6{;+s`cRJo9 zcpc=X-tpWxw6}Wth~A_$vkbai;B*;T+E;?kD!=|Aw9SP3*sVna`0x zQgXOdY&(@V3IF-xeSYi1$NKW^E`Tqw8Y6;ID&db^*Dkt@M0qfR_Hb`IG@}(rxh|9B zrUZPjTDKeP^BBBhaF6kC=uSaVXGqX7N7S*aQia!}RXfss9uhOnqAopqihgxWCsru# zyvST%p@|R)(cswE!A~I8w#PAvd1SAHxr+Dx##EggP8SyY;@FX-W?6$o@T<*s&y%^vJ&ktU4@=x@hEpukzlUM*=Qv z=Kez^|4R*F*2!{gvbIbmh{ z%PI@0{D4uf41~W=Tr`*j&m?tQ)I5wI=jyLMF3t6;3fa$|Dq35K%{2Se%KC3+eFU8% zdX1bU;h9kOUhdIDd20CH_FO&xUtb|k6Hhb2PJf97uXfcMZRD3ee;ihH-XQtV_K$P~ zkfLwdp~Sw|X^MRn+%Is^j&|*bW}J?o(l6<@dLxd=9dzcV^TE2%zYO0{e{XhHZ06dc z=y`OF|JiH`{d;oxz z?STACzl{P*nw6t{wo8Xi`UyJSE|241Wqr79VeA`M-=MVRzVR3LVKVwPyXwp%?yW!k zj*O+PDV=qWBUM1?n_SbS5lz+|CpD}CYM7CpqZ)!1MLy>jaSDTX@xG9(yXUfMS;nU@i{B1b zvFUnDzD;3^KUehm#ZRnuYq-iH=4B2zin~QTk;cF^@NG%-4%+&`#COF~))N}f9$3=& zZL-ToWPMPNRW!j@o~y{Ka-!FSw9NVY{+PJ>^Q3PmUA{MWD1omvCdaUl7nf2=g-kt1 z09DS@`FZ<)R@R09`zGU-+AWyWV?P^XA}#6K^lbrNG3ZBIGBoI%^eMw&qB5Ztnxz^d zl3(WPOFnK>gkH{-p3?-oXOxSlfw202?Q z9Y>{8BKal!R_#gXLPgwpDQ^9od2%zs^@a9RSvfOlbiVhDdq54TSNh9NBC&*Q#xTF2eTdK(nz-U9lFC?yd_Qa^lD*Z*UkOEkBg8`Os;Z~iY5!TAJ96te%UJ7(thr;nsmx~(xpG|N zS&??Bh$*ha<2eZza0Sp@P8HA*0gvzeMwD?*zxsI07VUamOJ1GxUuB%_wgk4f`Cy8m z*1>Ug!TceqaMSMq?~uC)uisgHe`Ui$s`z7w;9hgIG{vZ9BLhZPFOa05t00IlYBW}l zFjYMX8wKgQNvwAZ`H6G%H5GD&lj8VB^W6KiAQ%UK8t)5fCkVtIH{VjqEyLGhgY;_U zO3{zvObU)Q;c#Y|d$i|Nh@rI92P3NPzIm@*YQV+46Za=gM8F4T6$5V1zA2wdthJ3! ztN!mKl)nSAHm;^=r=2b{&>1KTk7C)iG)j{b2;3fNz26@>@}MY&=-mG@-RL*-Rtg~S z$cM(_O_5VUAEwe+vy^(|F^j&j zXajKyyV_W9+lgaNv~1iV)~N`rom2%=!?m=x@kdGoTvs6X;g7j`X3=pY$R=szTw0bq z)s;E^9kfzW%{_y`-}LUC6X*%*<)J82H&jgWDOXMP%wM^))&P16EQF=s>ltwG8U1wF zJ%i&tZoGh!KKxg?tiOZ7#|D@lxA9Xf4LZL~ZA;ST*{9DU25XZ;7oApN46Cn(Pm7n> z64Dg3EL!o>{7~tMt#mV9`?#eRJ$FT@R*z1M>a&5tAa+>&AIYZNWA{ngwvM88XdS?( z7d695KZUbS7Z|-)i#00u1rp6LVY^4{4raz7GVu`)*{$Jp!7Jeu!VF zWA|U-n3X<;C~QNqS46;e^UtVxC9ckMS_19sgdlP#x18*Ebzc*`^hNzF)phY^6SnL^ z`k(A%0gUP_18TYa3Qy5awxsFPGGzy`nGi8(|D!W8>t>U{)Gu$e**OEaJlT-keSn>F zZ8lr_#~1O4>^rc{)&I6dz784^aNd(dA5%+9%g z6o9$tH*F3iH;FBv^d7D{AzbW0;+i{ut^5|nVga}r{L?r8_G191?JjQ@uYWecXZIJl z2*#IeA3;GAzm=Bq72Y6dT3NM^#OrRHs=V7K%??^msE`ze*rS!tohY~bBqCvHEZ3n} zdEQ0&G-zrdXd9AB_LaNfkmL(fy*@HqcQ}BkJrK#MuTBZT3S22(J`p^SFD8`3zSi%0V9{zGYCEH3N5OOxW`F$8- zkLU295-gMW*JfjT(@OMo>+^8|Slbte*t}Lu7zxz;VeBUL_bklj-rm#N)9SmKFzukz zW^UQFfDR3mYKDqqo}MLO`<@9)c-&hy{hygXK5;ENb>CNhxrjxGDuW&)OnJQZO~x(@ z!e5`J=6yw5QB%*r?M4g&*0rYX%T(I|j0zcSZ!c>3?#T_nf_d#@NG~+V@wc$XD2Qo& zwU9w|^~p^0#!%xCtdRN?>`s8&#D#8@s(%!lPuFKwsT4pc&ucB)K6W0cI&G3N(k)<{ z3zAu4)?MaHiEc{o;m1+28V(HQnCPeS!CQ-7IFCXLk$CGk^Cp2A6+m6UJySGn@ zj70U7-T`>~CT*tJMVuCHuk+&3;=)2ZurjGQ3=~=w8L+n;5BdJ7h5qQ4)9a6WDj7+F zFc|Zd)w{}Cbho!Cz{~Cs`|F7?#&dN*gm>&zC=@Me8pLnMwxv;O^9;O|+r9LC3JyrX zemn5<5J=ofPYyF**+GAMz1=p%*-c^OGs|eCp2J!QmeS882EwChHHz8FlF# zB8&AyW@J}TR_k^F9}72bJd$S~;)B?&><$=uV-S zeUrV_BI;G~_oj^hHk1qgYj&MzzSl0B&^wB~3pT!z+iAck2xuEVvtMuudcb>NhPy=v zUEhE31SH=pSm!T$V)StA^MT2~%CKMm7~-DcB&g3YxV|7Ryw{8@EdiP;0QUv+bM0Zq zX^owO*X-(?lo%rsqQ;qXb#{_h_jSv6(DE>iUY4h?`%ju7ZV>pt0@?T77j@wx-?nDn zs*qO-PXNVnY_PVu_HS<*j=Z&IXAsTzhPLX-mUvO{_RO`VoexT^d$xqpqp0SWrr%FS zBJum#9rSELaA_z)G&aqaKk-mQoWBe>a^>fnA=O5zlTLQTa6Ng{JhmaM3%}O>w${;# zYdhk&(t(S;%PMjLr|te9&=E(%W)Vj|4<8E)YCZ~NEwMPZYVCtAsx)ezSiv;U9gGBw z)rGIn?EgxPfX5c|JRk&XWxa1+&whL=LGs6%z)ktEjn#v7Ra@G^+SUI&QBAY)Z z{XAW;*!@~U4n-WSMRef_B{h%qdOXFBHAo)uECv5md+G$N#n9_(b|`(_m{t(uIqf%B zuvY8;9lJ`ZJK8=AG{;FxXdUlG{(OSRuLio7q9|>O%o}ciQt=7vy?@;_cyeGs6;S9- zMJuH(`8z*vEx_LkO6(nPPnj%GH$Eo1Q=`{_kzDx=SsD|k@LNsehntMc$10R<`QS5W z-XY7$c@KdhgH1ZpHq$+{6ta*Nt6Kc~gk&czwE=mg)OoT%+Btj-Ih6DHp{ENsdRDWv zwl}JDJg)Q?Y|tQ`e++%0{=K=+ot+V8nY8X(6S$Z&?}Ov{)zaqih8~F!OXAH^p3ffcI1St`pq*fy+0cth+s@ z4aDejAFTkdlBtp7Jrd}j`GRl$^zU^@hKvO58+Olws2e;Z4PfqB*D zC#QsYe)(Xet$#*!@s;fMBPfxxi`Vb?xW3W32-Rl7iBUD?VZ5b`|B|^b-sqlN$NhfF zWv@4+ZeO`z$tyg|CslR|I)02ocqz!AAwH1rV_s!vi5x0BZm(4sYP0mxi5NR6+#x?& z{FNWG=YWijwrJeN4F0w`?$o0a9=i_Oj?*$9FYe^r?vGw0WYwJ}*QtS@*&Y*yA!R$R z9qYAtqFBd|QMYZ)8TP`VHJ|r>tzz%#jRM?5ppFDXS8`9`d=dJ+Irz|Sb1xP7Q&O_V z)35Yqaj_0T=PiyY=F-U}NFNV?NeoB4A31n%zP+QX)2_7|`=>;0V&7$aG7`z{c=BRZ zCG_q&$x>W(IwBS6$SBz^`Km*olb!@;7}f0*`e(xycAkWpj0^G_pL=0MmOA_pYf=2WeMS#9SrhiwC0#?H$7 zx1Sijsp0y7BH_-N3A3{VO=V+K)g6&XzFnb@UG^XR%PcdLH10Q0+%E!-JJ-A$@?_HG z2_2rE+^dh`*||^#SZN;Dpw92tXSl@43VAP@TE6>ll~3>LTbg^<*R)KY8W(Droaf8q zqxV!Cd>3=@PgsuANwpcBzSR&2aNoWO~SVja7RdyYi(hQY{>U( z)54t|%r^Mi**80ik!>MGjP_RZ1k#m+Hh-k^LcYi;#>a6^!v#j`DQXQ zG+)~1{=D4G|Fb!9@-yYpBA%L~GL=cFq%7DMsyYo;IWXI+d@$)aDy-~50yC1}&f|%; zkZix8bykfBJ(KClTK3WdvA~HIvkby5!4y9G=Qv4L}zKiR}ijKq7Aw~WQ;4gLhR`ne4^tSH&B#Q`sUgPFoYYI*+mPmlYS_%*>; zwZsuQS+TXBZfOuWeH(ykCCY=5Xe|1lE3S+riiEx;)f@cd3Hh8`D8alQi*bup@(lV_mj1e>5vNA*Xqg9{p(I*DmHw z%;cdvxT0Ll+f(rbD9gt>FvRkOtqwk9DE&`C$_noN z3g9tmP*}hUbh-7^(>B?K9W=8_iGK&^n_nZwbNxZ)q&8rbIOk5i_3__vpa0uIkN5}q zpEGe6Amk8>@9G=hTDMymNnIA+U*SH1NwAW;2`7^w_U8f8y???_xSuMM=|13TP%uV7 z=%1%;C#oHER$XT?U}P~79Pm+e8lwfkw7v7dWg^4i1g52XWNUV2cb3v#A!=hhJ4XjR zamTlog*xs@XY32aiBa+xaEU~c5>{zz%iZPb=3tBGj^!cQ;Dh(TAKm$rs$f&kk|bPq z%)C<}-_wQm*nN`mm%Ur!}xlaML*J+h?Kry|L?HZ)!aV_ zAwS9|Qz2IT+eDAS>y1{&NNrj~?(+Q$9P4$S1YQ13CmkyK)j3}LSr5G6n40BTih0{- zbfG8R85su1wtK7a`<|O?gdh@r&ZWa15+YM!6<@@${W0l8S}=gLq^BO-UJ;@E5UXfN z`s}{+73hQoe>QNDQgOpc=Lwqu`~Uhjb>=h1r}v+&?X$JKcT4~RS9-Y`a5eI@FiZad zD-rNB#~=H%qa(?Y`6b+>h~q}zYmvjOruF?5pFwuuxba05r)h92wtHW|Kl@XG2=qP6 z&u7$^0o^+7JBBWx(HEqzyVPB<^YBG7!oc6-sn}tGb{5wJW z-SeNv+~d5_!a$!>_&ch!ir`O9yI@5lTjOR8iA8Knd>%-ERt1M2N7V7gCU`mgG;Es3 zTIL$oPsaXGd^0=Oxn0rE_BZO{<_h-vtFKQYa0B@f1p#T0l9pUTI+hTYkWNVvmu^^c+5OFjUB16R zd7feB%sKCQ&zUo4J}b-BvwD;;djegnJOCx~ZmBJGrLs=@Y@XJYRx!^zI8IwS-J0>M zrwB;6-QdeWg+0%()(|nc7$}GqhHyJB@`LwJP3$WIN=T?d2u!y;<6LP~p18G_QAAU4 z&n1-JEiw6#+&!WmT>)Jd=oU%u9+AZGbSK7%HO2gkW8ndcSwJof}hEaUrUaLb>h zo39w>!V_f@Bv4;FAxN%T3ySAcoC2vi9^{^q%spq6%EBtyuZ(!Fc~yh`yV^gUz|au~ zjhQPp(f;@zsB&*edN*l#8W!Y5emv4Q7qA!h&EnE@!KjV%o?$5}J^caNwTyf2Lb zUNWZtWwR;y)Jw?I&f;B^2I9Z+U0|dD7ps{`_Wu82wG|A?;5WyZ3AAg4fE4-CJ&P}E zOn+|hTkdW76S3(A_>BK2Hks5fq%tSfN6OW+72!8gN!4gC6;>Q43j^}r_q}8Sv?Ca* zFW{D=rb~~cjlFDK7JGL0yg4@j0B~2%m1v(5xIDl=EuYNLo<+-)U55Xf0-j8r@{jwM zpCNmFr${RIxQrd4>yt{mH-x)fKXLd(O*G3cX13A#@!^+KAg#RS+kh#?FkTDF+3is~ z{5D^*zs3SQ@?lL-SmVzE!rm6YfW6hn9Ka^dP*rNc#J&lRC_!9_^DKk2PJhlJFu+3~ zlAeRN2BT70EFy227dJ%*d0X?MypY2bEiv{s4+4D)?xJIXOc@C7BVnQI&>-d`I^pdC z|C$q;5QpN9%RtPZW{o2uq%@aYf*hnuh629B4U$uv2xq>*S7TKTY?R9IJ2XRb!bSl( zfq&e&+!kujpBk401t4kRQtBFKEfY#-*(_ck0^C+@t3>{T=)(kpOEQ##s59q6uF|OC z)zL-0O+E8S0w5_=`w7omfp6`ERL8afT!d$nW8g@7WA&i|rg5J+uPg~G6;by?CEnB3 z0{^_he-BX!=nX-b%ZD#5^P6Fe+;RUyqPN>?Y8uIA~bw=*wgdG})DCUKyY5$)n5}q{rJ_R4Z zIh&=o#3fLM{h$C@H_5)lCkHW)A}E|;2W)Q8vEs_}Tr`p~3|~1f3;~WO(aX5gg{bg1 zOw9WTsV3C^S_0PxjG652yf2g&vtvfnhU$%Y2y9wzOpP5;4XK`?+Wg5?5uocF)7x^P zNG>ee&!847?|}6+CydJh;5~HuLt}t8NsavGDUfz=O~;HTLoSwFX7LRkv9b#*+k~z9 z%e}*Y&eY-4NYfR)U4UwtZ<se$!3m`f4;_QZeg#T#Y{cHC{ z9O^y*hY@Ls{`G46`u$7UOhq4qM(~Hb%ZQDfc%b+3D`}rsPG~a(Z*6}A>@n=2GNwU;CZ+Hp z&mAq4d;=Gbb8^OyR1u2ykO6J=uNUIP=7pOzXxIy#rj2&&6Fq+b0-pTZ=0;)y!W5)a ziSj$npD@;;l#)8FFPA0F|E>5Pc?W2IFVtvk;40)bV25?={lAd)G8wDeaUAK{E>lL^ zZokw8Mj{4}=1B??{E1*#`x#-hv@<`i32G6j{Oy7e@$i#8h2*IK#c{~#pm8whhmeqi z-KEa=CP^|qvDZZDlED4~^?$!rFeOM;Ci0pVJ2E8~dgvp@U#@*~RQ%W7siV?;#Lm|> zdkXzi*#iL{#{QYVHm&)aMJ)5zsGJH=6`g{$MS@%5%!H!r5n>GdOB*hp+_`{W)87q0- zvd3LnrF@#QY)dA4J-&6Bt@N6-Fz^aJUG73=N43;`^M+>bCw8ru{`d}$dMlpX76dX z_AFi(i|qMrXAL~3s#Ap9_aw0gi z)$14PavX=LhVwy~wB!eYHc%$&-hME@D^Hy298};UhnokiQ^xfDTm9fC=MhLVK}9rn zH@f=`5uDqj5gSS)#Z1Z^W;7M4oKtIitlcrQ#*RNkkm$T~r|L9V_qRRCj-^u$h(`9w+!VSyphTBEs z{Q-P?88JbJYf2~VV)L92zl0ADX~X*|Yhn&0#HeYG zp0ALfdV;^e28>y~L!?pTCwab&cO2h0Qpgh?bCN$bJULZ^z!p3TT-`setT>KC-^`^8 zZfnxVXbf~P8X-R9t$mrGgU3*9#papBG_PEF$dX>mFft&i7Sh4`GbcHR)8!sN0GAud zKHA1hTwHH09y{(z*pW7j1jwm=Hl?SpwP-(q z?&!q+m42xzK~zJJR!j=8ph(K4J;Ollc6JO$MSWO|BrtPNok69oA#oTz6eOq3a!M|# zo96`Zm zgI2Jf(^VzRq)2(466=I5I_d%sl+tF4Z5j-5RA^4;6o1(? z_|%ar#c@twW0Q~yU8Kf<^ZvIQRd59|?7$#}_OF0*o(DqttHT-7{%RxGuoH?71=l8- z?`e+ZxTI%Y(VMLm0tn?Kuamk^_K+x4BM{1FLBH}`P-cscNWGeS)%qAXr)pQ`23vtKd?}jq^iIoz9q@OJsFy&F>#x&{WH1%g>6<2fT}c+MpUa_)ga-8 z2rX6!Truj%k9zd;Z*@Dvp^K5b++AG5OWRSrS5S@atHmK&R&e;AAXTqUi zTmXc>vcdKB{p8OlSm2Z!1LY{|(wJBMN8Q$YeAeapkQtU`F z0~GGRc`Z_}*Tc#gvyx*>LmG><;d!?S>db@GSzloX3SE|eqGH;u9`|O?mOAEp{F;I% zej=on(Kf(Mmm;hE9q#ff>VNVP%4kzw=><$&rFEd>uOLWNr=sWvEH;JLnO@yCPbn`T z693)K87RtzH$>v&fC9nB(q7Gb;QUZ+vqYbm)=>`6a5+F8rmCR8{*pc|`F_E55yY|? zk9dVialX!aw#YW;1!Y+Y$DuQDd3;LYQ4k_mujw!HbKQFVj~-pCeDYt=g+hOe^N%i_ zf0XOY67?kfpNn_=cpi&?-J$lpV)*$VmK#=Q6-dr3zW2|4ufsOgX=0ILFO$y-D_=hH zko;-6US=@|%liJKPPD-9(xiOC6)!SZ zQC5v}M1kxWy-;j^ti%vAdlH{J>vr^N{@2Q-Yk1w-N4AGzl4XYtaUY%1|H?Y*#;45L zEiX=~H%a`2_T6{y%r;=l6NS=bY9Y8QWON@BLe#@R-#d1UM8eZV`_pNUAy_bWiZi6X zx{o{4Qp5SUY4FeYg1$hsp6H3y0vi=ZA%-zAvlmLY${;ozH{c+h4?J4N$1 z`jhjnGHzT_UK1wV)_1^898r`CVu(eX+H}j_a6q;(tAP*JXWfQ@$91ZU1b6jDh0y=nA`dG z#7O>jC|Puch39jc|~)mg=KGS zn&Y_*5fsqDkgq{J#i`b}=&H@`lxZ5=mbATh7<6P8fRl32iUzVzzjW{t85SYD5kN12 zZy5Q-Ob3=3$ij~F;3HPoq!n)gf^8SDBT4hZ>3_Z0(fo?Z7|#)X(+?ID zie<_Eok2oSy1~O(zqKDQYS2mymwG`tQp4z~P6tuME?td7u0H`)g*gCK(>0Fgj|x9Z z^H&r^i|9XwbY*ihZ2CqZp%;br#?EWJ_^aCVWtVv?*ol|$;=+*um{@OZ=3xTp4imYqh4sq(D{F~) zMt|O|yp@KXXuaPo8!&`XNKF9f>YBjy9~^V?l|^I>)9L3^t}PI?RN*ZD)$Z5~qop*I zlDP--5;R=U1$S44_{P$QeJ=$FSit~aw=Sr*DZV(hP*D946~*;+7gA+o4|hVUB%vi$ zh;8sJzHD$H>?KO04i4Y=ljAZE)kW-dR!uHzloK4r@tomxVGQp;;yLsCuS60SQA2=1ihmKKl*cv+h}RFDsYeZI>S5=Vu7)gryC4f zncvEn#`yaEzw+#XP>JrQKJ~o`8H9E_+!s)feD= zy+!&ia?dZs$MfVc#q?AQe4``uZ|BP-|3Yf*XI%?2vB)yuubo|t*5Ng_khh+9kUw&G z5JL}&>@*RynWfd`zo%dN~>oLknQW~K?-CUjw{ zq1DMCb2uB|LhbCC9+TXU$yuslSA+H5=H%TeJ8DY4G4^H;YM3AS;s7MVnb$-^#V)0E zl{u~tRAV43SPNgA{@!c#j?ho+29^_le%ymJ&i%-i#V3Dd?+*%!2J`QQU;)Zngwz3Z z1~e+TPFr3&Mz@U`(MqJt8zoBH1M3;Ja~e@3{d)ktFtO>)0NvTDku1)!qjzFI*0{3W zfDajP3yHD!Fq~Cp-j0jz&TuG$>iekPw{S>!kz@`Zy#TrZ zyG>PqP?DLK3zjuTKR}+Bk|SjE>okM9|B!SaK^dv=oa{|F-xoo?*^jG>kjmE*+?S8} zi49~F)rM=lU`4OhOh8lVd{*GAF|KvceW4?=V(d?A{2oy*1dwYIkBfpjqk=QqBejwg zP9FB)L$>A9iTwI*jt(5^KFWZHFyYsNqWE3hf>j~L_x0wQF<6i!bZaqzf-Vf?M+*ho zMd?lSF^XBW*XpiHd;uOI`Q^FETCYba!6WLwzs8SYtN6iV1=-a;jZ(*Ru3q3FqQ|eq zG_Sx@hYtZ=UDVbSygT3S&d$Hk`^^+zZUWFC$u_(4)0fy2v-KYYnSa;p7CdxhOZs+< zOlhA&r(2I#&L7fSPGbYq)Wxli`_Q|+p<89vI@>&3)ZdmEIe0rnz;2w_Urj5h3GQ)H z)%+zu6}9HYoW$a?i6)f!1eP<_(8I+EG%WcDa(KfGUk{~d2Q5LHwjGYtf%{;+(cKl+ zzHDMlKXfF;+01x|Jve$V{@cn`(;_C_!#6?2 z3)#uXz*>5XAxvOBeIVTzDrOxFTz3)JYa%!o*=KZvFI;%!LgE8cXEP=1K;AuLR2tZy zOlLVdy^4q#Y!1(5xV9npZX69mI=_r!nVRUe2H@|^39b{p6hwb_835jeKv91h=rndHhOYx4LA#BbAk>k`gpo$jWCT56vI-4!_@ZXbGeeDq+SzFrqQX%4Z2 zEQ;r{(0Oex+}!AnV|$v#K^}#_hGK;H-K7Te#C97(cc#lbkVnqT8iA*Vw~XezcEs zf0ssSX!kfXI6dvv)Mt=LqNY6@?;D@C6L#I%fb~#uza3D>FaPFS&t5Lltq(Qg`f)#0 zPLp^+IPSgJKaUqu9I^*~TJFKCIN^AHuL)|SD}G);yWeU|5S?`)VnbPb@0Es9*Iopi_FjxHl8KtZ1+!tNx{d(#E!3dNqY!mlG>20M6q} z6%5@O2%5s&m)kjJNp&=y?WKv!T~$~fJJt}~fhr-E$9P21Sz^YqJjV-e>^RmhO$L_v zTfzh7Ba3;U&fwCN58c569t9^1bdhWfT#~bCNIEm|u*bgCNqEL!2|WSv=x%hp`D(A9 z*uOck4%8827b92IT0O)RyUsk)FIP ze9UlnvO$@K*u6<|3lz5#5|85;2gc=s`R$u`EIC4UMo1=Mh7}K2+h6} zHlRNoZ*e#Oc9C95W$EBVVX7B+27$%rkysL66Odix0LQ$A1E*s?&CdP$!s%a;Iwy<4 zD{D!|1_`us;A9Bi6?XE1hxOdHg9tc8 zFGz8(RQ%Bk%F(0Vd8%;0C<>nffXnL-MKp-36BQZw>K79CX}$ewTy7@+G^1{0E!4Px z=Iscz4sqHcyl}esj=#Y-DY^1Sww?|*6ZlPo@wqezB?x7YiQn{6gqg7ZJeS?@e(y%d zpO@#%y+6oZ!Q1+WzB;;Eo%Mq`VBR|oCcZI4`;pg;AQ;T05DOiu? zig}$r-Y}PXauHuPzvug$Bx;p6mL8t!q%yGwUJP!vCaRV?oXsS{a`ehR;+zUaiMlf#{D|yIl$w!RXZI#qKm}0 zPWJsQ#!SV}f_qu><<6m)6v5ZtKLKK$ia1zw79DCLu!a3Fz`^pcWN74Wfn99MD0aMD z5P~LBFKWzKlmM3HcR6CY@cps>T>1a$Tr0V^}!XIY;4^h*>Otc+MrMo7|e^R zdzCky%UZgp_qx;`1clmyd8ftGViZ{b=)KZgst6I?>tE8E&9SJMi-F?4$I;~eI(QsU z*xGKr^dg^!@B&`PF}86_hkI$Qn*{b_Vjei0!<+&KdNVZ%)XnSTE3 z0b9^*I&sf;ZytlUC&!`;LyQs+dBpTh?af!BiUn9O%R5vmGZ`d=scO|hhV)rJj}@$E zn3&!%^gsd>p|w zcK6>_xD^I??s0 zK_RNz*#Y?$3<{SKY189!RtEi7>%LzHCwj#Q&u1?_tN@;_c{mFY6{dy!!$40p_U0x& znxdB9&6kCkh)f{>XF5q8`7D?{W)k6nC)L~;ZXA6Y;rYts&9|-7c+jHw3%RzxE2&kX zcYL{Cl0Us5I0q)blxkCR^UJC^Az8U~?;>t_TamG~%0SCErceX7t#eX}l1_Aj8Rq{f zBOoOZ3JA-}SXS`D@X$JIDzG@c3G7ZzmV)ud81YrUG{oa69Yt3;zZFc?7q-NPpge2< zdZiB?13F)L5j0K~kORsiG06LkQ%fm37<`}0yo0!wEq#Lr&)RmPX#)SQ%f}?M?C{1< zEM_lOy6y?+M#QlzZjP~Mq@`9E8Qo#g<GrdJg__W2aHz?nNq`&FUDzZlUCG zA(4!4SCF28JkH$jDc#-@hN*n+2Udu<9+GVK47Z3d2ucc8=YC5firJ`7kxhTf0IpWf zEF-09=|Pcd;j0w-o~CqYp2|9$!pjGmOioSzMoV(amvLV*9sH047fY(5DI9xEDO$0x zOIe=gZ4MZ-ID(;G(Ic-u9(j#{Nm+pwFPFJ+mwFg^Y0jIKxk&26sl`!W436s)o_z&P zN*KmO)($$V;`Wl(c^ndX+u6Uy&T0hzx~wd9xVWKr09_Ve%*QXa<1g(a7ApeN?$S%P z$6^OuSFTMPz6E>Nnd0OIhbV$nkrod(V4RbjiSKiilrI+U@!+VHRmn>5OAPjjqpvQ2 zt64}eCbILOMHhvoV*o|3Ryx&q(;|U;ZMZzQwC&b&qIJ9ryRdV^zL-DITSi+Cy94Mg zqdmEzLJLpZKSv0o)?a!|zVE?R_lfTPqONAT!amV|$Z5h%>N#eotdD;OQ#tP4tN~~C z6R|LT8$c|%y}ZR5@R$*NYWPE7w+Y7)VhU!-vGXWU;&i<$Aax}1s5n|{{^{MG6rR)Q z9oGS-%5{zogZErNH6m#Bl5%!O2`N!6t*oh6Z2$TY1f-f;#9w8YSw3(e8WC#tV(o1G zEg%171GXoQ-=IpaAq9#^`XCOozD`tx)B)dHJS~#{mzUl3>&re$hb)O|jAp>Nd9oCn z7{@B28NrK27t9n_EwglD?y%%dboJh(j$})=lasYYlIzTzdx$BLEe;2NbL(_#$xYXm+g4tF92BX5)s4B2ka0O#5#u2q#KsxVoiU8Mblg%O=i&CF;p@K zLQQ;Ge7?doY=@5qa5jtpSCgMBn)WUWtp4Y@h>obFF8t|>DLk(o)o?l!e_S+Z5$0!a zZPu70Z~I8AR>YANGdq}d@Ke;*8_@nfkTea~`gO}>>OecL5-@lu~`D&@mptv0a zbzec7%L9Wvj~?`N$90%)vilC91U3qEOYuwQ(@WwaXFy{Wr!HPIpcg#dwy|+*cB20* z*P{Lg$7FQ%IY6mVH?qUILClk=sF#3iCh45gB(k>lojG(ibk-Fil8J-rTWYy`tZzo`Kqw&~+wt})>V`%RSMyakYj}Pzq3h85Lm`3ysr3NhgFJM{ z)321!4nKC>Uxv82tI2}>P3nEZu~yd;BsZ_T2P*x)UEjdtI+%lUHbTT!H=e3Nqj!iLP`pPwktlthp!cB~ zPL@SxJ{|DD zpT3$^xWfbcm45~Zu$qjGC_0PWdc96L*B7Vp+xPJp9^&QP36y6yoQ$v_W`=q7T}0}j z$ExWoyykZ06As-!tJ12F5lKHv9a6Y{vN~}b&8o~hwGQ68f9=79!k%TQg`pF_TRjJw zcm|sLpU%YL0lu>3;@r+qiO)US zVnNI3Ink?ZEvMo@o*Q&kmp!vJm?Qn#oP|J5%4$rXlFj&2=6w@f597A^N;D zSTAtS=M1EU(lk;JYCDuX1<-=|m{k0|3#S|G1WjMOPe-Mg)IGjaKIY*azcun)@Cd;% z&3i7Z3~E;cOr$BR06wzykpl?jv2sn*``BcT7BGV*+Wu2i5G5Ny2_2u@Kx4e+K(0Ao zwA>lh%z$&sGh?4E@S0R?Zfk?yP-_YxQ5N3c&>PU+G|8+jclum!A?BBYz`t@y?P&SA z&k=>9`V>**rdh+jhO}m+d8o-hwe;R53rMc{;&+`y2$37izev5oiwo_#0Ru|P`sao+ z9dyYyIg|jkFVnw# z3p9$deTLz0&(kwr5>o?Co<$C$?8i7 z>56Jyz`FaSyAjytKFuZU2(3_mQKa^EpJncLQ>DR7oOpt|;+-UkaFSENG|}uMNGOj? zMoQ*p8#MOnK5krvW{RE=|C7{5)MqY~Tdeg|ex!60WD3vp)I3Z?DI0a+@;HWa9eJk> zn@-sM!k2$QRAP8Cd_X zz>W}w=SvTixz)do=>1Qh3XncG{Pkm>gJ0Lnm|5-S_IA4rX8P%ZB0rcCoj8dTMP%R& z=&Fi{a*GD|-Hi}_x@ezs>cHdCQ$BW&WljNlCG8h-7oW479oK>C{kr*W)l%3P_;N0` zT{kHUO4op8^k&9lu4a-i4NUG`-tOb_wDv+d-Gss}m4l?41{cxe8~z2B>9X~5GvQ#; zPW!rXya(F8fKdJp>eVtIKpF<(X|5U6_Vuoh(tFQXy6NH5CarJY*CC<84X?b!6vS7b za7XSxAgKtUym+wIY_lJcUBQxJ%o___OVfbrV^h2=f!Qyp-Uhjv(u($V^@Fb@LP_iM{1DqE1r_sc@$x^}a!CWJhXvGPsPJOu)I0^{HLrl)wxjq-EB8wgn%b5y9S z{|IuS+T@CW_J{wq!yp-)L7=@?^nhSc`(zGr9o(&h{Py=I-Rw>9nnRcLRnLWx68&BH zUW0FebJ`JITnG5Xmi6?icNJt9&FzW#BQm04a&Ln#Y%L5(kw53CNb484qDIwS3(~z@ zR^eFtRj(NEh6#AXo7cb_z8<=~`nP6oSIK(v=ul{Si3I>bYq~|q+g_V!zS4^z)pik2 zJDXNz9zOOhsjUA*C@C74Oyl0&#gkkhHwL(a)-qQ2Zk8h*Q&~aGtr0TF#${KY#?O1% z5ORV0VPHLl8fx&;$Idq0^<4MfMnMB0s-;2h2((wW6M;VI)B$};_DYkfS4{>d8w%7m zn=46m=eSI#2>q;}{Y_Xd6f9?v<(uN4L*p%fR9#`zOl}L0P+xv*o>d3+W&+%CG*E(b z^xn~^Ezr~b!I_V^^VzZ>xzs%Fx(RDF;yKmk45;+Ue4*(6XP}>BU~^kOFBA%Fx%Zb< zL^PnzX^4R^nRizFsM{Z)loXHJoDzIF`Yu7cBkOszvP^_IM;sMMVMJbbKURv42~s|i zu@d?l8WYlwG>~LGrIrBjh9jKa6nww)e<%UQ3CH3OY zy^4s4DcI{~&`xgqKKN1@&awWJx}M7YL?Eil;0E7SJ*VCCO3~nwa@)pomWvFKPtc)Y zRhE54Bf$|~7QV$FVz(qk`;D@qBf4xT%koB96#D{!+eG3fQ_RkA`40Znlijp3j zVgxWKw{->_)j9lM5BodhQV5h%QRBH&{M>xOlRt10$*qd=V<>H*bcpV3bM!#iY0Kz; zfCj~71z#~9DIo~eI1iwSx$O1qFToPOiJH!jYT)}GMfhAj>bwJ{#9ziI^omJ+x%UMW z!_qm^U0;~#V*Eu=tF!^oTv|8tSPr1RXo&Chq&W`Q%D%T-Ltoj<;q#H}c1_%QKI*I4 zQ=3>xE#VPzB0cFhIq)@tE`xiN>gEUB#xo;1pnM0dxGoOA1U~Obh_zM$f8kxG5|P5j zxm@y?gTv=fqq468bau%!YyZ-aCGM~cs3B|ayhGr4)V$&$<4<)O%`->V+y9iUB29&j zV*?0#yng{?ytD(%uet`?`XB#Z5gtN8C*9o1o5mJl@{IACQa07xjcato1(U#ncPZMZ!B-5aL=O62bF-O3);XdvSAUf`TzSIie+7Zq zP;;T@cSl(GNd3)(A3j&9{%*$QE!|5S)bNfJBq%GR9bBn(A5N2usX^Bnb*KyID-nQA+FMbh|J}nyL zs)nRmqM=gfi7p|?3p{6Sbi*``!}l$0cks(CrI4Ayc^KY9`v#G#m;MEj$a>aWlHK%A zPyes!l98ii762CqO1F__DtCg)-@XPXz|{x~mgl(>xv+Q<*GcH)s_D`taBzc^l&}~R zX*&2JIkn{g&Gtr(DmYoYUE&qXP8wjCik7OZRB8f>HC>{?31jlH=d74Kv&>?uCFO64 zlkk5eC$RqIBXLlJIygI+>|0B+TK%y}?q7ICzROGtW(rfCyy!)YX$nx$%J6foGfh^9 z419^MXZkyp6eTK0Y8;C=FK$6pB?bHCp7%qZ1_wkcs}z>XoLQ7Zh(cY@yfeWf>%Cq> zAz&<*PN$@kazc22qTb&f4SPXQ=4%s>M8NxWrs3|lU|Wcf+9XscK(Ir>1Dv>p)7a1Q zGy{B@cYWnuwg8#BrJ%)72yT@RWD!HW_+PIQGt3B6)v>Y6IL}q+vIWu1Az!TQh9FJk zf&d#}V95s;Ple*RB>)2LVKe=-6J?|h_^f%Vn;k?f)(!b1+q0)6r9f-Z=$qu`oWC9_ zn#@ZhBU2@`OObFnQM_n%kl%`kDC^fipXGK=zGt%dlO9p}ba_%A1(E?9XU?>cm`r~r z5ZRk-Kx8FU0-N5U#>(&S_P3p1VG;&xaGzuz-BK?^gv)2}@h$H9eHY0T)40)8`A#3) zEy7&rc?#@YjUoXGv``u-Hr~Gh=5h_V8y2k4f-?rjp{;d>xlGg$)Xvc+{04sr-}G>o zM%`<9puPkyXpWl$j^{g=S~YgJKVYFvlp<+}KRSG2nnbU^W++R~8+ zQ-ws8Lf$^}I7vanzN3Km17cPyzn`X4XM@0vgGeB2-P@cWI=zDx)Wc0D zQYkFH0!7RM@FW4?Dq0W@o@YAcAl4}@#@=Kd8`PaWSx?h?>DRq!(l;j&B=-jUWy>Et z$Us((t4<_>b$>nwjLN*EezlZ$gi{%b|Ic2mz!p2T>Q;e~40!b<^kJ?_E3ycI__C!`a*E|##4nb*00TV*SlJWVH0!)__~%7B zGx>V_&_#S+*txY1e6@o+lG9l;v8AF8;bR{>^-}SdFQ%YoXE{&gON2K*j|`_h%^|J! zbP6V>W8cmyN@btItqV_|`VEZ>t11xs_LHxD4>|VBHzdtH4~rP@k;slI4BB4Yt)lk# zs$R(5KpqOG(91kcKOOtzRp`&%0`Fg9w&E&a z7ptx*m3#xXZ6~|H*v>KsYt}f!ik@k=d@wos-B8VqaY==wGn-9NFcaq_C|TO)FAdxX zwO4lsw!QJE(Kuo^QF~|3{VYDu*Sfbr#58?Q2sa2z6?goJwPio&m>-{gphmA zvA)3%xve;^+=X|E6O4vo_zkuTo6r*8rfjuRI@*d8D%jy}1KXXpGKy8rUdSbEquvyV zw=^fK&Yu$Kz|&9exzb9q!v%s!-nw@Qtbp|#aU0X@(j_{W*goWf$A|N*H&%be%=>(` z_r41Zz@wRrWFF`tuA2m@In1}>TXO6Z;a@BdW$mAOE<<1`%<~HE*l{}D?)7vmf~+Gs zZJy3*KoqD-e@)3clH!!f4+D??34Hp62n* zh-)%s`v%kY3a($Q_t+)IMus$r=rhPw!;M&Qcjf~yS9jL)9}nBrOS0Xzb^bt zz;X>rjyg|Eat*@_e66&EdEM5HZGgcKKeQ(~TXTf$J)I8gn2SZAk6;lq?>D~>e2or% z3>F2usJq(*7uXgtBzo=@GvK*%HG2Z|e92Ot7{B1*rK6{pv55LTZh^vfY(X+%Rj(GX zs;l_yY@W-IJLwDO<0^dt20P$Er3@VQ$9mdb1jug&S8gE-rCdDe426Xc@ewiA|_Ds}bC z)vzPT+P&q9ri5r=m zSIYaE*Gl!X0yX?O^+=2zi}2Y9>Qr3qBjWS;%YK{5(9U5%yo{KGt#gOEI^*DR;tia( z%%56CiiOTeD{PLD^Ei+i@nf)^>qEzh-{^D6;Z=urQoP}(8g2t@HpI9l zuB*)*7OP|f@8XMhy9@wY6db$LzkQuJ9TP26&fRYbxUXLHRt8lGxP~g1o_2lHOiy_S2rePRFRixfw<{&FEp2dtH{Qvx>w>4zzyA0k zq+X%(z{siR$Z$aP*&$A~BN8fD==sBiQ+tWr4YIvdR->eC>l322&QEsW^)MwkPyTx$ z%M{z6_g-d;d{hTJjf>D9u{oP^s@{)(!~__I!<3M)!qZP@bJz@T$tAJD zduJBX%6*(()UEcKs{`uAw{GO9BqC0SjW#^Pjr3bGO+L^LF|7z*9~=60*N_TW0axG7 zfN2>e8VkjI4kQ&LxX9@~%~`gkn#_b@0m!C44<>U_=F*Bp!r6otl{?2DD*oPL9jXn+l1|kEGc!*^5|43XDb{3ZHn*c3jz5TGbUZLcO*-GxZgObM?#@ME}; zl2w->QI68w{$IXsBam0A$97F-cikdRmU5j*H-D*IX$&yKX84dxg<&26Yu~bNx()wt z%@p9HQ^ysBcwOxfW-6PtL_=AXiY!1hi`Vf>X7 z3z!}^^Y0+OU@SVXgtjHW$JMllA5OS7^E9_56<^gQo-r(56P^kRHNDqDQfA(CEa6e~xZ;fl7 z3b*-J&=ztx!U3>bv29iA0en(s2vUgC63qSKoCGoYrZ9j-57&kPRed0LZ*L_%s#YG+ zlt^2e_&kZ|s6EBn=o2m62tyarHapL?RZ-a1#`*c16C+d{Z3x8t&s`Y3RBeRE$cfaNcUF(um<&ai63kDqo20edS6*#RpnBL1rW~I@5|!bK zn1T126>GJrZ>L@>0Hp~UbCz09k##OM#lH*Y_xlUz_9doq%Xj3I73i_R`7k3a*^9ZG%h+zxY5V>W z1!xOf)V9l71E<3zyTN&cPW$RuW?n0zvc}U8E;~0)1*oVcdHbUEtUS?DvA|Lnw`Dtr zs;#kOdsgW`Wy%JjjdF(N^zSR4iiWy<6YXoe?SACtJhZR5P@3Lzq&)qPA{dvK+kLP` z{=ab#H9$njc^Dr6rt>WFnstu6eIDR@=v*H8paqJ^l87KkyZvjaaxO#r;Ro9+>(f;a zA$%Zcq+Ai@I$1;tC*J{QmS{=|;eO3oRGSncN&N0WHaJuuj#F2v;-ycsP`QdIe3><> zn@sV;gnz4iP_xSxen+GwC{EAZRRy`QhbM2eM%fE-WoVfM{j|sH{J8V|R~UUP{JR>t zOXLkURMZ>M4u?EB1vT>f;qD!LBL4)chAvC=ex!1G;r@FvO2$#?>=OmCwZ*oXDBoc0 zrH;PwSY~#VKfPFxBb#Q1AbMfAvoC6P4*YP9*!SCxT<8TP3RW)TXiywJ%NEG^Ff^N< z!G-!In@m#lLQ}^y8;YB<>XRdVLv%2k985D;&4~PB{ zfOYg8MRl^HE`;y9bux)5yW)23@5+g&7k;4^xt}497FA-`Rtv>t7(5m$^nOCt;ozfc ztri-ZKA1-DCh^C*!@=xXVJf}UqpBE3gWsQ)*--3KIZwr^-#SX6D+_MBu%Q}7a}>o6 zy&cb?D`RepsD+lkaHK>pOtg1J&EotA)9F#jKhzz4!I6hivuXYl3J25SB4280x5n9! zXAf4RwB>wA&i+aW`s~-*4if zm~u~BT~xXv%bseeK82$UTD0h2if5tqk&;iu3Pql%cR0vC`m7qN93lBgY>lt2IcntL zRHEZfK*j}hWzj!j&q5>r-lFK}TMdtinzeW1kx4H_Z6)VH`A0=9RMXZY$kF-fAFGbyHLg{5)PoY|FZA%g7oUj5 zcxFe{0(+<92c6(Lw zElzv&(7q?}55xj)nscC^SMi59Uj3`Xxq~l_Re+7@jrU#jEvHD`sIW)U362|D@ejoU zs?2!Mw<;n$qTYS!@b2I%xpRdL^(>q#gI-C4S~awh-XqZQV|SZL$9*xhVF#bjzlv%d6A=)Q(4_ZHK#263(J2eeNFP z8|U70#vR+ge3NI+_bI=p&X+fBx5JymlWMs+A64$z1VH5qW4dbeV&_t0o8`OA@T4=B z`+4q_DM+WDWnkTW;aG6%iR7fY(pSdDHqke+;ro9yU2A+cG^yUW7qp~}&$8iSlY71` zNlf1Ul;^)x__gZ&S;_ZHsPLNP^G1&=&uwal+ug2=a4b17VH9pBTN!=llr(M0mi z{&2fH6p_iE@^CvI|Ek-SDPDX_B%b;!c$hPr*9!nyGR!o7+5ZIUYej!LgXAzk|WMdjjz_@A4Jy*-QA ztP1^ez43qh#p#xiupOCCODN6;>VD>4hyPTL-zC&j4$1#hDRT`Dzx43mFUKzqo)!KO z2XgRJ*Wd2Q-ks4q3%#aZxNPIuXgdJd{W{+{=bW5 zql>%nJh`i(2moK8w7a`lzp`s~L7+5%gx{C{7cU9OCN%&7n)s>U&VtG`JHatj<;&Cv z7zNze=qZgCRMaFDut)oX_!VI5_pej9>;E~?f4chr+!qoMN!362oLu>rV!Zr|7_1lm z=ZH~y{4Zi${Fh=l|3!?>OaFt!VEl_19{*B|iob~AEej$UaEYpKyO6)xL2?(@wLe@!ZEi}wy6lLZ zr26bS#Cq6{kq-rP;m->Ys^cf9OUvsAe?bXc0O*ndh~ldb2mE;fqmZ~at9Et) zy8x~x(?Fe^wwt@~#|2RHwjxDP21RAu4g9o=KiJ@K2B~8Af*O!bq@@q~-Az%COzRaX zoKJT7sGhc~xUh+Ov&-)9-~Zh7v~GVp4(b3<+$U8Yi+excUClxz283{Bv{r=HGWacb<|Mjcsp)%{LODgYvX z*fG8;<@rC}V44(r{!fBG_>H;z_dD1g`a8e#&;Guh-QTz4IP>Rr)zDpmT849PFHw!9 zXl4JOxap$%`yJ>LPSGkZnmK_IP_iL1sQkTuYUFoAw+w57-Olg23(b%Zmw@=iOmF@~ z!Y}=8?0E0{Wobb0i<&z3a&^LOf^V{6@oPk7*!bIQICuReBdlu3y~;l9*-)=dafx%A z9`=s6mJSR}Y%EVNh3!zYBaFj-xdAo%lp<}B2NzgW_W&%q76le1+yILjO~9g*PhfW| zPq6!`xp#oD@(LhSRR)C87(iIf2MCX70m3UEX@n1GguyhzeOKz^Wi;a^sHh6nFhDta z1yCv}(YAa`qlC~X1!Axwbq^I6)CqBKGTjqvjuKxlgb5N65(!krg@Q0g!s+;@VuBj?11 zE)4;w3NGKXMzp4hnysxuTjP5ktm(cD);xL!)=cgNYaZ_dYYO**&3(y0`g5F|A@`(>;{YMuEs09*R-A2<-~4z1sVCK>tZrV{)p|T;AjheV_Wo`3)Qw+8;Lfq!e@ z-x~P02L9jDz)z#L_iEd>GG~jdBh!59UFO+~X5XZ#ZKn#%iQ4K&vRrd*I;aqY8X;1< zgjdt38vK`5P}DQkH_dp`pXyvtZ)4!A3zN|lky607%4{|zW|(a=I`^k2pGxWtV@jx~ zud3f(rrskm%<82Z7%_?17by5yn5TDrEowfawS`(j{Mgc#>kAuyZTbRtnn+PIV>Y15R8cNfQK#prM>iznys4NA zOJqL3^jn)05lyw9yP!>G6Gj#8zEIWE*Q_CCEPU+C!X$XANyrkBH$CrWI3@sJ85 z#eyvAo6QvpHM#0H5*5DTKaBF#TJ8Uy)t%@}o+^|SvF)3LZ6t)@Ubq(wab}F{$ z&gmGYFt$+QEqO2sm8j0v__MN9(v^Vai6%Rp^A+rmSM)FNX@6Bsn{gMB$~KwwTGC7o zjm?tlz&xa2y|>fNj+R>Ocy0Len%zYxwJEgpzt~$>pAl$qC zu?NQ4#xgvIEyxh=x2~D$H_#|8xrJ)M7?v|mkFoM`EOu=4YqKp~o9lc2vEabbw!FS7 zb&Mfn^!ns`yl4=w&FSXjSFJp$C+V6j3skOyjXMiXG(I{eHUOqQh+S+%L+jZdT_VAzO=( zTC|*On1$Q9c#aT*C8J&K^l?AJ7C9vJ>dPuR^<&tB*Q#fF`^xiK`K&Tl_?9c(4m9Z& zsMw3c4fL)D7Z^XRtD3Nekk+(t*7p;%qo%yk7Fi5Q3LFZtK5(vSK4#tw^A%Ss`$ix; zyTxhNSf3)U>3mjRj9lJE+5K(-9+mK~@E4v}bxP zv9$ZPaYoi`UMU%Z2>Vk`9@|nLDJYhOU&6mZl`VU$S0lS^^Hs~54^~L(3?VYghL9^c z&t%Bf653HZu-f<9QIpY=lB zr|M?xGP-mKg7k20ow;AXvcP{BqiLa@=io2=6SOPGqStP zSCzyFB|nS8az;Lay>y9zs20OqD^T~;tlpJS9;$eh^O^E7?EX$grf}!2vchsyv7eQYZR?}j^Oq2W{+}Jr<*}f!*MHbME211_1Jf&PZTv#e9c8n4@^? zmE61wfkP14q2>}|-%xZxMJKUW7n=|8suIoL=1%V~L^8$~89&w0l8cFcZ1Y^PgQ>qP%nol@{#3_Dxt@`#1k+4u7Ee1MWFTl5 zW>_vNzyX4QLV*-&`?z=&U2cBi45@38GN_wn^IX244j_evlnoFid&)u_E6?x7I=ZojrfhD!{%v#Q2dv_zk@J;gc+<^$&m1U@^@ z?~th6RA^H0nOy-Yu32t3gR$cLmgs#wFwQ1`W(OkDuO^C+1GLL$-MgQBTwI7^3TKVU zdN?w0Q2-bsZ$?uARg8V2h_en*SPAguv&j8=BUil8u`05n7I3a@&3_RW64kVTDot6}gYxvbED zLqXE|g=*N9{_TbHTB3J7$3@e`Y|Dy@MQ!W^wt5}e6?W+_TDH!#FI6u0WeG&8=w8=M zsc;~il&)W;pL#LaY7z4(%GF*UWvtpmx3|I#LJHKz;Y{Zyh!1HiXy7|N%!Eu4|Yu$QULA+CM6l526=pV?aa3#_IG zzw=~e46x!G4HEcrA*83b5*ZHjWwg4}qu1;z$~d3HS@B%Mi-uv@C0UNzpS4P3R{R$1 zrRll@Bd*=kZqRtkz7)rMlApuToZ97iBW(&OZVi=5rdGd){pM> zo|c9=)^}T0>>~ElLe}?I&4nYu0}#?zg93hoS(2OJ>M7bTH6I*(jI#tz8-!)Ix*OJe z#%UMu4{naRk-Sa_DA(MsmprDSAF;(}qD&?_BI`X5DuLB|#@z&eQv)wN<%rV+GfFQ=_5LPeQxc9h? zr3NXr&iP}NED#2HDs8+XP#s7zGVdn%GeqYttCrT$xq$Y(AwAfErgZjZ=JHccN8$x4 z515D%U!LnW^F^lb8J|U`R~RnV2U{)h#5t#3uDLd zH|`{GFl)SAtE*U-KZ4Pm5jCmzjLD&Wez>1B(h7e%q%Qv)-g%sS+^tHlyG*X9jt;-4 zZqabhC0&dy+}X`2YEFgQx@x~c$by^TJ?%gAEcRSth@<*NQ#(_&q`BtrpRy+oG8x94 z*U(DPgF#g9eRrmRcCzS3b;96N(p?RQ!8`9;A#_IuS4Q3JwjR;!i2PWU-WvJ5ql#5P zx!cFy&+`v8s>_G0YrOqKZ&2kix0ZK@Fq$IJ_R(YO`rQ+JNR`UpP0i9>yL`mpR(5(p z#pJP_15JK2lb?wEr2yhHWA2(sXD+4H&-@Ncd`R1(Vf}}aEDr6@UBMl(gCXk#+v+^a z0ALrx;5kR+O91MbC&lAZy_qH8zn>$LI`SvKN$&n1lhj{uVA)6XsSZyj|j=V!x>umO1R*dTd zdqrcp*mZ!Kvwh#)vKzE%*fZINoQ1w#@xcZd!!(mFK}a?%iEvGiMaP4btbQIE26-zQ z#g7_;nr(%Cs^ZEchr!r{*1UcKX42V&`J>L}OjACt+O!RAHKF>v=rf zMLNAXWi|PE6AQ*22J%|rN6VAcwM2*&{Eb6y$VS})6X8Vb)8M0|^_wD%o@e+tTsqvf zv{K*@-7Z?qVAUG(fo+#r74~eRTAfwfRjalI|1PmNSw-k1Pk;jI!FQqpH&C0 zF6d^43A|?M1p=-Z_dbk4b=>;^*_e)u(|+X`CkBO!gt^QoVuhc; zYc9SAccDuTtHq|XPnr^d>SlWGj0QozH1;8t-?L6)KFxR^n&4AUqEQ?8Uq(Wb5;{0M z!Z*PmAEmR+-%u!B6b1Wj4KRr`PW#+V!zoTTt?*1Y>A<=#95y&-(c!PHrzV|#1<>?e zPcQdV`+!s2E{0~K_|`7865iDk`Cn$A@Ycm-op7Ex7S1|(Oas|yxkAXWUnn27x(iSc z>*dRsB0_kii^HkJAAu{Bp5WI-D4#kzt9DI1NEL=wSWpUo>sinkEew0@sC|NO4(jrL z&dxc^WhrTZbUZeXOCW>F2ZzKmrX&sYHaFs3mWY-nfw%_kV4m)Ydl~%Ab-_IOi!>(!Zm#v zRg(s|&c^038mO4amqtHFJ3}N}Cuc+rE?Fv+DgZ<~_rV@Z&Q}}4pD{CZlk(;n@%1ON_Akd zbu7HxsfUKsmT~-$+ig#-eo@2#I6XN$97f4>L-&7LJ8vlmI5r>)2pE^UHmdi8y_mk|=a~pSLcR{`ARh+} zPqUIjcnmsGT`R4M4!y2?oKLyfI74m0>s1#*+28XBqU2QNi61Z+^je2Su|#8VG4R&* zp%MZyI1t@wfl!=8(ilTZMV^`TxUSXZtgld8vXa& zOT#IoiL(+|(nfl9S6n$JeVIXUNm=1msRKh3 zGRvroU&3~$h!zpQJU&VG>Jx3@D8pgUV z9zl!D4Bi=QLH1j!FrcY2_sL2qk? z(rs}lT*Y^dInk?iFW+2W6@w>okHP0Od(6*Q=uUGx{n3@W11*?xVRZ)i&Gv7AN8;2Y zk;di(9L|a=`r{5g@*^rB|GNhRKc zrg0Asp_Cf(^N`7Mt}a&C*um#;;;rnusA*PyWo{#3!bBi|dKSI9_+0gu#%Sb)XVtL# z$0-lcX>g*DCO=f!6!d&w8q-%&lk9Dw0Eq0SbJ*j0IGpM(WWmtFxAOMgz@-Bhakx4A zP-z_np_#20Je=6mv3YEy!l#=%Jr;?rW}-!&(m@ArVnXd!_iNsS2CI z`7_~#=6%+nRs^2C8?Njj#8y|Ai~^NIi+ywndF(c)H4STcG_=y>EYL#XfUCBU57oo~ zzbca*7;Nj70$xI6)S3|tdO9eX4a8yK$3#w{D*{JHVt-XS}fib?(>z9axxO&q3}l zZ2ZcC^s!D70f49R7!AUB%x5DLD!|E81-rM!|zCw!-wNsZXZzkkH=WnEA&m~RvaTL!W56%bc zVTVd|V8H}Qp4|w+1;Nf@t3T?%VzQ12OYrR%=vJ*{6}_Cmxo`SeyeVA7qeIP243e9l zlAsj8oE`um#jX~Xub=@~62h?%gmC=bxxVt!5M9x!$Elv9pSf78H9f7$E!chD<3;VcpSGBa#E)+G=uo1mt?L5?r4{5T7 zzD{P=JBTn<20?c>sJ=oM&b&M6o1Y98s>Xj#bBz0pC}Ujzk<+X+;doslsAxPkq{-z5#zQr|hqwSt;xlWvtqtzI6*V}g=yW|e%on6N^<{R1r6I5wecA=W zh{lH>VQQzn`}QN$u=CK@1;i$L5R~j`L5Z>AEdS6XXd;t3&KyBN_Bn)f`q0>W*0L<*q75`x zzT+u6OcZ4M8ReJZJVv~rqG1HZl!GW2@a36hOJTyYCD31>D!ZRJL(&{iJ7a&YyjZWp zS_EJ_P*@gtGstCsl-q1%Yn36y9CRSRrYN(`RtRB}uLJYvQfA|Zo#e}C@2xnFaCr!{ zZFVG#S^bbt5%RVAa$AElhg7lbn#fgm;Q;r(*vHUQj=Ft#qrkC`u{&3XI? zVz6Y!|8|*6c^)A54{;~{h^vkoq;(uRI^$nHgWx&!C^%o@y8~oF0>qRStuh8i;G!c3Kv%7gHC@xevIj}@CklJE}zn60} zTq;7e=bgAm6}7k@U%vry4I&#s$5Kz${Km`Q#0b(}oyT*0vzEMZ)O=O?|=-uU9OCWKWj5^|NUDY~HF`X|2GHtB0X66$C! zF-)RjT%;`z%)^(U%Ah&gLSHSXHLZ6f6QOCB(n~@vvf@YH*yagUi1i-EbP}Noot}^+ zU?8r|RM&Tcx)>J>zbXP$wF<*SS#DJoQezggqwtz3q&auIlOWAWvLB3kgSLwSzb3bA zxP3tvL%_}s38kHow5(}rSGyc5s9rJM1V#_SnZ}V!`aBB4P`S3?-jvk|23B1VpN@8-BaG)!U9JupDYJ4|dyUsPQn zT4AD}gOiJ452S4_LVR;~8Wg-BSnpSNleykK8YkO*qg^WP^?ICob)Klfq^(D`<0&aU zyp77@P!X72rZ!`*KLYN4)#PDlGGeYm{~loV-diYw<*F{GGNI0MH5#z=t4#Zm59T>9 zyEACy$ZyKK{T=8_%2ol30OP9d&BNd*a@t*!a0~cA%(YRWiin@`{9%U#m5tVv1Pude zjZx=J(tZunJ^DkK!kKd4>L}OaxAdS>n&M`LpdcT#_~1bj4sx?r1%^9CY%je}LO%oC z;yDOwLk!#=MJszD>FfKUV1l+{!Gu}wICs&ooWoG`2P|SGe=^x65e^On8bD>kj!eHl z{(u$aPvX(L4@jp14q-%y9@k^@&pmL4EPMn}J8jCD@Vy*FWZQDh3bCOhm_l%-`jb}t z9SnXcqRNj030VWr5pq!LAa~A55c}`pErvpo;Dod7B^@u~!ZWZmOG8b9PxDJ76nQCf zvf>24>711t4BDd1xzE}|hE693r+TW<68jQ>lekA|lHvUez!%HMMTNJrAojTAh?~(j zJC~%jjh=%jSz!|jfX)~d9}wPvWPb;tDnj@MDx0v~`?u@gd&_QcQ$<}eEUb@KAL&ZQ z=69)r7U4D#yBAdcvEVQ@4IF;@d+PRFd;)zv!7oLFkY`jwX#}7LCAUMg11Pm1*l18N zjr6uRD@CtAO$i`|{>(#eMP_{w73Tw`$n451z4UeFVh~e=ZMq~ZXgSpfE=uBaf-G1d zc~m%&#vU>=w*mNTZpShod<~NRZZZPp!eB1kozz1^UFNV2wt)HF0%r>P`umKT{mfwX*dTyX*1OL!|S^1(d-E18ZXi4n3o}KVWa&aRucnP_}el&yvrRT$*GxM6884 z4fl86JGdM_6epm9L8|5@gM!H!DxTtM=<75M%-FtKd=NnfB}NS4HVk`)2Q09u6Hyhq zri01a_gEt1H)MD9M^#Q=xnI2;Xzdd<$8qLhWt(XANa0~wFm5_%2_hJcV_-$?y+o|$ zZBw`a-lKU0XUGgH{(I)SL!qrKAg35TZ=iKwBIYsgm;2~w8`qbwT=nu>SKk0$CwxLg z;?)O$pEBR-GRXjRN@#hjL}>YOzKo6vPj&<;ZUHLZNyG`O$Ox-$T>whqd|tbK{6(WK z^7*Hc2gd{A1mFu^<)rw1?L`Mp~s_s=bEn!p*&Jpu(w-w@U0E2?I&6~bLMzoUqR2tz|n-Jy)MYOlt zL*&^_eAS`X#_6;`zl<+z9I?)%1(8W8JEAlM=r1jN2AdWR^ssW^V*2{gyTUcj_Li+M z3+T^oiTx-bx53R!C(MJRn3QEkcEr$~9Dai{9~`s}z6MowUQ9kqqdf%6jvRT&xsc{` zZh>1DBPs%%3b?E4S6%F37#LX)Ki0xLEJ3no08lzw?o8NcmzyUlrMGM(p$A^FKeJng zGI~$EtaYDw2{_V>fFe3mMxn2TXGX%G2C;FQu>c8ibrVlSKyeB!OLKU-PS=77X*xzI zdGOO3%QSQK?Mtq1D{L$)xLml8B?7cuY(TB8@z#x?C~+czW`K)OiPLR z?NEdk7$D#Il|2DM;Pa>yoMJwEY#3VSeelD%`Mg|0l2;!!%!`l zxV#^~87p>;SyB)GtZl^FSQvCSPkVUYF{i+8Il?-PJ_8n(cyz>Dj>u3 zJ&(e&eM5rq2U4=wb0RhvJm$avy^!I=^#pX?!(&5=6fClp#&U)Dpb4cg52htrO^f=KZsT;&Fl#Ed?L^qai1M@s&F@t9USD#bXrpa zec{Yw9^ZTx&><@>)A@#R0}uB`{#Nad{EY||7Xf%as3p3aUD_IKeT8}jK-6k-en4=9 z(0a?wL?@TZNDxQELRyJmFx62#CxNG`RyGqSXgfdk^36YEL>nnbVWq*W@^_vdbgLzU ziExKQRKpn%`kl=!5e|5Tm_f^SJ;U z4Eq^WPxAl)0>>T@62*7-l|XL!vBxg^u>;^4>@jm>`vepKf?#>|$K7L=x~k>gcBVVi zUWYJqBj1ncg{aEVscM04IU#hoSUhwX%*r~R#?YF^LW}aJ4r3xsfZH~}1^rWSKn-Ge zqIg(74@iT-zwxWmfyqen(PAXwvp^H*OxsRCy+_g-+(*)kVeIOW*hpDmgmuXoESV83 z%y@E)knsfA&*wRT$gKmE92nAS!96^N1m3Q@k}u*G58|zdpb&Vx1TY`_?52d!BMg8A zjY)b6HYE+XHPqsR6Bh$O*#ekxm5Bo!P0ny3B*17khH;&rVBg{hk=He8YO_$Ud36m0 z5#gMh;?K}JFy#b@+C1>W=sqI$J23VF%w)NI#aiNR8CaR6kt@DiQxxFH*V7>V$L+RYW30h4ux&rjF+xAj-KO>hdT_D<5cv$%wnHr;!}Rp=iHa}ZD6&8p$SIoeJc!0-}(vzrk$!X)eaDOs7WX!N#Hbs6A#LT+4d=*e17HKVbwTj z&agSvSG`Xzh4srHyTia<*s?rvYXKnrTK~3O2qdpr|Fz{L4Y?kuoIUI?ytyU*vk;7>P$2VQ%I#)rqr zo75!A=sI<0m~Bfk&ZqjJ!m#OZ)!IY}`^^Ve6rke=sKqkABr0l?62H@uQDE2eX`3`+ zNKIF*M2S2Z-9lYGHh7a?XmGhdkiA`GSu>uw{1$b5bvVlqL6JJIJ5LScK;8G=>T@ih z3f53J$l3y0xsN|CO_oy`4Y$Et`*xpOB~w&b=c&qX6CCUPh)S;3Z$lVveJ{UoWzbWe zFU=Er{#C~;5rvGwuX12`b#FTta1X!!_`zXT+Xs62aIJe6)vg2Y9LSC0UCDpdk!ai9 z1(DRS>B%C8Rm(op_d7AINnNK>2dQpkG4N}jwt=aG?4m#MNAD()wfUPo`lz1m0(Z8l zH+QUtQ59^>R`({DUS|?f+mhn*RVOLrxoCJI8Fhvi&DS+swG8Ik-*k7`Xa< zHFm#bAt(3clGvZVJCZGNjcY#d*3i-Ds{Zu#%wtyxNTr>A!pP@(5}YDGmux$uy|Xr= zW8m_|pr3cOluqoNH;SiWVZ$P?mLfG=xL@S)Id4JN=c}y6=6uxF*rx9OE!QVD%-d6K zF=AX?4P@hXPiHiN)vj{+j$aE(+8*VNBO8^kd34DxFU^ryl;0G7YV#qNoe@j+pF>fS zn$c$D)so@$vl{b*hPif=szpUV2W((#VjtW+25XaG+mwwDE%S%%(r_}jnw?RvWUlEy zM6QO}wS2ePR3McYWOZkF+b)kEi|X=TT6|LyZ?qvG=Ua~AkrB}K=ps53zj9bbuMe)Z z{bu{JZ?d-_t7a18g8N>0&M=4=6kB-f`qtb)@oSYRasK-AbAj!S*(-&i*XC~JYGx{T z=AVkQFT9s`%(-L2vm@QnW^E*BZb3Q1^ugCYBw?{U?X`4dWa0fRkxghQ>`@ju!B9D5 zDk$E1X6OwBvD3rQ+(^DIy&b8_*I^@|{$ab*BCO~ON|G|jk&x~Hy>bh>L|$wfs_L6t zoh(~U*;ro5K7(f{yJ6iQv#Lx!Src(fvD__c+bcrUbu@k~!ky)-eX(cKRKsidHzRY` z(Bh*R!6{;OF6=J6A)@xRMKR>9vwX-LN{S$%$~&X?XemWy&aNsKWlsjrh}(nbvsqo| zVRP}za}#R5GEEvIl#zjA>91Wy%5gSU1gcj~uU+%_gxkQHZxh1GvWDvciMqX3Baor8 zkzBVC8*6qfwu`FUma#oM80u_CeL)smy-UK8d@@bThPS$g-qS@EH(3l+=787X1s`up;JixQpgLNb#wZ7OqSD`im{{pn|W z9DjD)d=;I%f$NGPj*FCgnmn20wVc~&NVm14lAoUzGK|Ts6LeNFT z4Yw$^8ye~4eIk1aylWZI^d!eutw=wOO}Eb+n-;5l_}G1ibP4}3Gw+66>ExB~YgJjW zIGf?=MW_1)Q)KwC}`KaV6RW&TzyCQEF7)hJY$LzO{9Dkp0!*eWAkmBTfC{P8#FmW<>t4OeYW(EF}YQ+_Jm zFui{R`qb0uQ-(CN+X2et+VFQu@tP5X6mjEaob;0$FV?^x0IXM0t8D2W#=9=BkOCB_ z@!r&@_ZbpE@T)XjwW@2UH_e1-d>ES9(w)BD&8A>KF{Rc$E~vaUJ!57-Uw5uxwR!8n z`ohPY_m)xXb=>g=V)fnV`UL@li$fa>XF@4D4wFMSuEu0HVk}5R45tyK|LMfK%Io{} zPANUYS5>ohY%tRSx!mfw0)~ZoL)7`r75AmD88I@dQ+lz9FluXaQS;-=TR(>i7yA{m zjoA2xiOPYhh|I{TA4AVY*j79xo%|n?eBOzy+AdFjfmp72U-wq+60$Qo9c%gcz17e` zSNv4Wt*gzUmVLgnV_6ca{K1|#r?TQZR3%8+hWM{9asJ#X(i*c5D(*#`nt*Q2`#ThA zD9Kn4mbQOnTbJbEUT>G(?y6kfYOPg={*WkLA4pMg^06D7Lw2+`L-~)Md6hryQ$jHz zUn}J1Ug#l_+xPpoNZC>=eb%x3O`;?VpJEquCP(*tChHFoibAfy^q1d_0oFevmYbOc*^eEe&ly9XkjHdO_#x9t+qb|~M1MjDDS;3owjr;HRZ^yJlHxR^vm@T1Jv~-ZHB{onI*AUF}lKXtMbTEy&7NiasIpaA(KgXNhTl z`h%Fb$-aJf%et0WZr39ZK-tmkq(m z5r^TbS)pgzd)83{RHK0nK^BibN=ylril_)8QR4@wtU*+xkbrH}x3k@K<^Mscznv{* zL7hLoK&2W8j&D=B*HJ&Ho{AS2sMFiQ={xBybcTBes18(=!+aH`fUIp#rII|(3{z$B z)Js&91fWaG6C>@?Jv${uU0)%M=u;QwP&w4;6m#mr8(AS z@$s1W7WcbZo+b{Joc@fOm2=_5C-2y51FeIck~b%YXNAkfaO10Cb2xO>(7Cg7W2Q;= zZ3J$TEvLwY&gwj~W37ruo>Dwo!0)WBgv9jb4V77K&OChgZ}3X+%6~ya$HkGxzV>Vn zn1%k!y(o-@!H?j!nxbx$jG?_@oGkbExBXW0!PzK}N|&y-;bD~_xMHRf;l%3tHR7Br zbYOF-R9v2o$rgihc*d67mx{+H|6r4rpm=W1`bk7b>Vy2|Xod^eRm2W@$B%7?HzCNbnWKheoQNtYV3H3U|vzX9S zPKXxVW=KNSk0i?)7}O_hyuGwqi=X7wvp3E#Vv57AnuFJv+b9poY!J$$jl9&wV5FNJ zGGC>Hw!fKpOG_J>{zx$CJvU5QUfdE<=1bvh-&SJJFvoABWvt_H$d@W6gQ z>b7{h`h-N(v+%cUo>pwAoUCvXvnx7zHT%n%N^Xmq0@kg}%_s6C7_$@Ag?})YtwBNN zmQUIM0VYx^)mI9# z>$uN4h&9K_dMcy@w|%gCRw7ebHC|z4YrT~S>5{dkwHv#;q8<9500Jr*EXP|DTn9TU8}Y~=zhkV zMNsqM&tK&sc_Cg2!t;eaR~OGisw^Hp(u=$IoCdHUt-^6)PddRwzFhytlUYm-RbSz1O;z7lzNS zgO@|AJ2DQqySm_+##SD#*AE!TBc=sgm+_A&jWeFrXJl%BIF&k#Vwt;CE5#=!g09ti zW}YcXXw9ixukz{H{6r?Sx4-R=_R6!cdNss>BYX4;A^N-Ya$MbJV-HhHBAw?{=XGaS zyPr>b^eFG^O*0JGPMoI3QR*lStJGxW-{z=S?qiYg#$;{QjKT7@nv-^Y>x=#`XZz$| zS9(@@fCsun*DA>7&r6GRmMlA~9t=t~d}Gp@D(kck8P@ScEx20s8}we!{T?+HJ1nh{ zIf_+Yd>9@wShikviThl+^jJ%gj1B+b%+KCXW?_`K-G}aKlA~Ar&bQ()6u+=Zfx3)y zm_c9|x>75}#xIP6{YS|uKGnWhyTSk``}vZkP+kI`g?7~TynOl^ z&oX$|m~R?B|MP=uVRRE$YGB!1{`>q2x7-sh%O0B<+Pa)hu8YOY*rrqC(Oe`?3Czi-^{(xN%-N79x}{+|xGwbE+)E|8lg)#N&p&DUZYNd*pxW^RhzN zVmLa{>BB)$Tz2W%3upURo$%ioqP^lXy9MQC@9AX^4m30jRhiCZm#LDUYLBZTW*1;R zBWq(v9;Y+xpZ&5R@!qD}#=EN%CRH+5D!BGw8?M_jP{B=y$`t5Mw+@PzERBdR^QvI> z82>oT6pfznJ;L2FpbHa7vQr&U_Jxv$Pf&viGCq3dGY!7Z>aOVqaG$|31KVbzc;%FD znUTe|nDpu4`Q=3`$8R>UQnOSDLm&&GFI8hHW2)|~pg7fu2BtPLXE#-OG=`=44qn>* z8uoPl#itFM_oTf9igeH4d24&+xm>;t6tT{a%5#YIw5x;UY!oJSm~o#CPf)3k(wm0g zcS&0(%T5-vc;!D1>92XGn(n>Y-?wU+Mc5jRAM~%h;bXVX$mFA!s858I=x&|qR?qaF z>kDO)fo+VAvd0LOcV1c;X}Blknm$mb=uq*kc+Qelap`G_3dOi_wm73@YxI#{M-nF@ zuDi3xnt?Lo?DTcWESMeD&)vDiY_)PF^IcuK1ixogc1T40&E@S#$^6P~NpU|0KlDYq8 z>GOC~SpQk5_^352yzYYv(Fx_kMQz+0i}>M@KR|L;H;Q_kY@12yGJGgyZ(cZ2cuxYn zmnhC=Uc`uPel(@?=$wlUo7L#Rdgn#&cLQI&hWgx=`d3wskn9$<0;4irG|chTu4)ky zf1kRU@str`ag?*We)H>f>^`_B*XhNYX_?9}cy;@l)PlaAW4Y0A_kw7t>uA$0v+N%w z8+W?J9?Iwoo4Ut04H8s2Vm&oC4|1%<;Wv7VgtVtZ=G8`)=U#QNZC?NGq_wI-&vuzI zI(E|2J~Nare%PMap3Rr`tzo&}?~70R()etE(*0Z^ZEG|sjSr5viHbs@ovuK4PK`qPL4S1ye<17#=vZXqW8q@e3vVE za2dXAo)Nu%%KA|fC2vF5Pt0dEhTF%ZB%%KO*rd`tHcW)*(5r1G=g*mkRnin~UPIG2 zW!pHEM`wRoJ+PdiS>zwuj5o0;mUEh8!mX%G+T`*U@^5vLJ5!Wq)U)XYSp~N1bOn6I z_fq(p#p(hyx4TQ}yK2EtPp_nZR<1Pf@PbXI3QXHHSukWm&9%+H{7k zowKnko&AHz+_z-e7_&mF3Kw4Z^xC$kR^($82_ZGaAIaaFPT5ZPYU8n^-Yzr_n~V)> zi6(ajlQ4q|1#2zhDx5z`VlMt%$Z`#2(-oQTd27^uPHTQMCT(6xdLfwMO4DVWSXZ2` z)8`=vBUG4u(WY8gRQ4q0r}f9vp|gy~jC&3PF&{ma>LrJVg zY|{O!Bjih&*`nu&eAkyk>#ZCY1e`0WoMIee8A*2AKkgh%ag7hMTR0sr-aWvPo5lul zl6W}v<)u2rE4RC9sb!m5LNR(B)lrs~*=_ujo{!n9Pt+&T_JET2NxRh{DMa9pMx_00 zsgseo+5gAXS4G9ubwS2}1PK8W2p%j15AJkucb7nLch?4jYjAgWcMb0D*0@9C&_MT{ z{PWGjyzh0_d8oBdRqeWG-;?SobX-3ZRm5_PGBd#1Uq68aGMuE^r=DA~_7Az{IrlVH z=#DC;YpB~ZS^-ju$D2w!Yj}2nWU%Sb2V3E@s$;l@1BB>AA+oef?(puQ)VQcf?U0Xs zHUQ^u@1UWabeyR{|H~$7N`BZE)wq7oE=#?eIuZsU$j{E!Z#Jd!v(at1mO{A&j@q&i zPs%ixFQfH5MXs0@fAZ84Kd%=a9cV?`sE`3LX$lrR-K%PknH9Z%O!TJJKNNXjtnQa$ z3j>bJPZ<|u(ciJ+coA^MGv5{WD9Vb5l?mf46H>JUJoDvPCf5jab(7-TyP1gfvZlK; zfkbora;(+uf3de^?|hXjUc-TWhClY#r+cD?RmpYNI#3~;l{KK%vIuuQUrU(uUPj}n z35(gXz(S?!VsJ+Zp+PlR1hwwWI5$Z{hxvG#IyKln>*3W+;L)aBc1OtjKsdRN@^1zc zqc)b|XFUpRn_<$`H?bxB!2dY>jRSzEnctcmLYTwJa;DYDKzJ) zpqw2JU~pYthWtUZ>KAwD;_(f$G?Mt39^YMEEw$JJG1uuc6rU+eEARAqFC!549{r)@ zyq7G2^Pm*g2|Vk-8aCIG+f3yplUXJ+LrJN?6p~T)4KASL#F|5>u*HEJgl`G4(4I~O zEz)?_A4J^SEKVw)#SI3>o}Mffh|QF)OBIyN=U;pu1N0G4BmqFSq(UPq5Y`2hCtbpU zJQ(PoyyPa|2N*Z#5X1l1h0(pZ1x`i30L4J`2~coIvc~<~^vT#JsS}4&vu9>_$25Zf z{;-c-D%Rr&*UMW0P8H}~r7$<8#DPG3gTc+FTNO8V=juK|eh*hOQYT5rsqa-2n zw)Rx|&QQkkF#t2lo3(i%{l1g1iC66Y)1rYbU8)6iYU|yC6-#5Z6sd=4e&XOal~U97 zu)zUz>DAifp!yDRDLO`PRR4F{v%+@&NZ zE_M^i6|Y(Dy&tTj{M>QTi55A}_Dfbz4&X=oL=4fett@DH#Jw6FxF^uQbNG|z{1R1Y zTL0mN@Wf~917ufR?XCldgbheX-bJ(}a#WBuvmEp7R;xo8`o>clZ zaz;cbmi*>60&uLA3qiG%px zYM7GsR;aj6tB-|76IsUfjTk@*2k;)AQLc}!^;B;FpiXz-9*}hisO_7GF4zt9fg1kLT65=quXmVyspZdM`Q9pZ{v;TUZh+_%+%&|*tlcNj<}g3s z29E|qDm!gY@3NInuIp42e~zp|@3EQ)osamw=*7(Z+@_RNy0@Y=SAKtMQ9L?mi_3Lu zvhkIdC)JY`LPL@CS-m=sziHJIL^K)ewz?(4$=iOd82v=YgtwgFFd#1eK7)?`NpQN} zxQICI=3$|rCo~#&0-Nt^u-+ z@_vx|;+Vp6^u+O$^HiiMzOP$@*v>-f#AmU9XRMV?mfO$Y+OK`l>LHmQo|i^q$j zS)&P?(J~ne&zF*cI7nL7)@l+0T|A>Ryw^(ft?U8|saHFVxTpN0RXncpCG{D!&RVJ_ zw<#$ttln+Hp`NFQm-H~Y$n&YTe|gZIOgdf5zgInIrrxlLOr;{)eXk1jGGU1GYL>}j z9lPs|!9&O}vkLxU!d&%I6d?w@TSQ!#o9^UopFYLJzl$(Z5dorecC9!t6aIheoT{BId->| zLj3q0*eQdFlkm7XR5KmNI063qkDuLe-bD`pLx64=Cu^`x)?`zMcT0tLmGdFnAfozi zjEsVodYr`4GIiQ|qdetM3%+goRc?)|0f0By${~J=F4pRHz@dh6UejhK^?X3oNku1FKD*K2Oa zNeK%q8@(pAG2{wtFU)INCSn^@#X2@T`_J9F(xp0@JadvATTPokKJJh0Bwk*hv#dC2 zU#R_s`i!uwrC!*SO{6%g{N(&0aPQsToy>rXFW?Pd3AD`9h<~-YG;d%m>bLFCylHX;s+4o zrA+3o<(4yzEFZXq-7ucs$0d!f>6NlNX{*y|@P;M-{+vWt9b#)WRKRATOy+Wu1p-CP z|K_>oXm6o39Dj+&`f+Lcv@a`(<7@3#YaL$bLS9*pr8a7Y-1j}7k};SVgR)%{Y7mGYTOL79=FhkEJaWfQV1 zQ^oOu`7nyR4TF#8MCmf`z_53NI74tmNLB@;5LWSIo1O+-%e47CbiiS8d8U}|73vca z4X$TD5l5Xhph<7&(r%RiX2kJ{siq1b(gtl$vUn}P9)*Bs@M%05z&8@9$zrM(n` zfc>5h&J1T}eADKZ&oB?%g=wq*t33`Q;BL8+uPR59(Lm$U=uto?L+H(a+YU-)q~Xuo z4H)Z?CmG^#^X`;Qs_pD-+w7+d21_WD?KEGDhr_w_mHgzD69}Gq112B`r>vgJ%K>d| zlwAn8Yl5=e1mwIT5J}zzqnG;{T)|_c2An<2Dic-HpZtT zUkv3kux?>rgwFNBxmjJ_EmIP9Igj44u!PT+hYJWH$W)(`X1ZNytl!`|Nt1D!H4iX3&$~235 z=ma`ueI6@*jyr%^XWX5*reNWd`{}11*H3YAtW124F2nMeb*d$+4`Z1vUG$x8Yl0?&3!gW@7ZfanNnoo4aDYFX$%4vL%flD$pWz#@Dt3lX1IJeX_FLH^K1n_ zhr}PLRfrYFVXPJ3%OrN zx1d9yiwMdDu>;LpB8|$xoPWSY>eNW!j~$&SNzaMU3TRk+hP5sXCW-2;`LGbtdw0|j zs8q!TIPVcFD`Uv<=i7%m`7Q5HDA^BI5ZhA^XOm$%>Pg-1bToGyY^bKEfQUM$%_hk_ zZ#u{&Ne=j3G>F^wmUcW!iX{6@P_8r0eJU0+mXs|QVTI>eudhk8R7>4Lvjm>ty4xbR zRY5lVofkpQMLHwOZRvEbX-=v8Puq*(n6(v=pP(wg6gpI45I#F-G_zc`XoSIVaAsOP zc(&l(JWm#Xiq}+!p6Bk#jrjf%$ZF+6v&2F56yoqG&n5@VVrFH4uX|rtsi$%vp6J`~ z=4IjT=QnVHCpVuU&Hal#L@a1GOWDVN`F@0HoRyST;fq|flCgpD`IB=Z=_0iN=-WmZ zTX2$X>g?5ApE%}%@$+nQ+!x{LH^CtOTaUEYRa8(MFX;|X6W2Xf24r;ir5v~8#wXKq zZDof3XHV6;qwpf?v*yFco3xbu8=AJ3zv=9e(er%C_6w^60*-x6$_`zZ)8+H5S3Zd) zj(*#cofgkx{oPOQPfSm&y}ZUju2rzjqPDy+X?GVr-iy^o6;AC&#%3Ms_Cm;6E*-W? zc{QzC?Ve|9mncOkfgD=GxULeCz%zk(^?K_M{9o z$;U3(G^3~5Nmb_|BfQjE`Ut3A<&}QwBU$_o&26D^sIuYPK+lX(?{GEIIB562--CM( z)v*4DwkPmo93l%>2mnAQ1zrG<|F4z(>xCi!utJKm;r4&ULpTWlg7YF20n!t&UYLwK zYFCXdxHegK(tEkf)7KWZ zFstM)k*%XRNK{JsujlB>Th3G$If&`s&^Hjq;RdeTa&GI#!U-%>SeCb`l|=m4f$D-! zw0z|%oD)ke_^3AL9L|%4=;Ry?sx0ie9`$IOPCQM93W%tp8opN*4 zA~{F1wOJjW;?a85&h0|`6%cjKF-@MQ9tW`%56o5P_89gfPH>+W7_miiwlJ<~X!glP zSDM*7AE6DC^ByT6{Xi|2@&s#BPhF@#Nyb*!GKLA`a~13d z4du+1x?SomJ2`vmQVWOE&f(ZYo}IRd(=+j9p`3|7HLnU|Yo%IbYX+X(e7d#4DqQQ{ zc3tVzC$2V{Tt~dF(E|i?l>;|CaXah`;O7I@!3DnO+?h7G&A-U63iz4p+}70)m+w(< z`KpV6>Xevsrc#d?vC4_l<&y0%?JA`sx%SUIWS*_8?;_>0?LWs2zU$&VY8qpDgptaF z4T0UqK}1&4&i$@9+DA>zTCd-T^%(}3{om%}&|uXr_O&aIwBJmN zR@G>z$vgRf=?Qu>9hz9UVQX_9?YcjaT*KlBnFLi&j=5*uUuRzPdA^tAx5W!BAC8?7 z;sfUvJJ`Vz-_LHX473GrVJ05)gR$Ham6-~He4UIP-zo~oIPutMNA)F&v5_s8=p}s? zU)lQev8Md2`{Pp_N9fNtkpZ-PFaZ+L@z(Lwyt{_eJ9DCr{W_yZEU<>yX}-2b_t7=x z`b@TJRdnTep2Aln#2;L6aT0PKZ5~P^-_X`s_OPOT;9SvdgGkQVi&NiUKd+qsR($9y z#;i5sWYD91ick>=KUuwHRyw8&t%H=j3HzW|dFCb|O5RB@*uJ%sC$qHH6dJ+r<|_Ut z`M0r6Tu+YP1+$|+1>yUK+S{_A2rVM!@Mh1e&#%0Ljy*wDIJMORyr+w;gd$SBKKJ%X zXwl(Q3fJ-o9@Qct{r%rB(#)n_;`5T30pAo+rahM)0+X%Jp2&Y|SXZ7%gw^j}t4-c( z+Bo`GxmP`4(V(j_o{e5kSt-%{IdFyzVdu=u9bU2R&wmM`T2iGwa~5=Puv-=!DX(5R zMiJbcX-PafTkIExrhSU5-`>>0wd++@o^l(&#ILPRL0Xhhje=(JR2g7p=a`^u~=73nudeSrS1pv({ zEWi;)+6+XKARd8lNbkk~7%+qc1=oPOF8`@^paVd;fJfsF4Q}Dq)~+|OYO;n1UWgY_ zos(}1-@QMlG+Wa?vD9du(U!4FuO!Ols#K!?Zm_Li!lw|?gYgt=}0s67%V440se z2KWOcellzhQC<_N-|=PUgTw9(@;!qzPJ0Af-VaQ@B}A9k3X_-;%SAd5;p|XoAx;Fx zN;c1=380WRsF4=R5qvtkLsFDPHESlAjTk#N8)tSfUMHXU&O_ zIehe{Re#ms9I)lZ_hEPN=8u;jO<-O>hKZa^TayKkAokGfaO){^j$rh;zn%IYkwvfX zCZ6*2+S%rF9aWP)Lr-SKJ zaMb+!+oAwA4N+0nF#}&|gRvF!`ZrScs;Pq(jRoFjH&$|UxWSrf#H2+Zr`==XFjJ- zd4G4&_nS(wuy-&kSNl?J!Rb-k6^G7B`aLxXU)Y>B6J;^PmkMi(Z;o9bJ8tWpVauOa{n!wYTJcN z+lUt30E_Bl@nk+jv#__%^Yy1it^f6V?-+sbRt`8hH=^LS*x zQy>2lE<1)({rG246#)JHcT#Y1NoFLr;9}w3O8J56n zI573fa~Xw%6Qv+VNtD@8!}oHyiKz`maFFHf&6?Xf%K|;wcEpO)OFQ%d$~#F{RIM!v zCcE(im=K~AkN3pEmAE?R+c@S?^Du8|O0)Xs(fz8tC@vz2G~3Zl^P?8uu*<>ykJm-) zsW=yAPWT&*Z=7X90%kz@T)ua<(Qoxnhl(G3&=Mh(i|_*44q-G?k= zXtKE3Y2}!W=UUJHU>n@k?#k-j`0^uGuTpvw2px~qys^Gp?L;wW)dljWIJobVpex`c zFJ~8o=O2vDDg43R$G42s8Pa&e6l2nKvMoKoW@kTYCZPtFrLk}}%A%xoP)IWP~nC>J1^7_C)WprZ-g#+=w&7I4hL#WUMIPh=|)OHj9y5Cv7t*=&Qf6z~$BJka0g zVN)hWobQ@c`M+`fmnRgUep>DsUqUU}GWWXQS9lYTXlV?|-hS7pr=&S=iNWF%?xMXr@C#VQLEwUYD zFu7^$cD*yq=_3KN4(n;58p+nFSM=DRCx?O;XVq1GRA$TDTzJs^zMU4sn$@_xe&E^d zKSX?d{0|Y=z?6SpBL?Sh|BNB#F-clRBl4Yw7vAzitNlu!{&o~)ijhRyY3zd>jj-7# z&E%KL9ZlvbO#kjuSEgd>%~j}D1Y)wj+0dUgpLi<}s98!nQ$j&?^&zi^OVE?lhx!Ts z!T#ZwIgTB+s}k=y;i6qpbhlIm&g(NVBDb`w62%_ZmEPbVSi3h`9?>TULjcNO~C_z7$q76Uqjmve3&3eUhq+M*t857FwHpDZli4DBNb0M56 z%G5OUd6&?N(gHVZTk}%Q^%Pb=<}M9&N@u-!>6Q1{F!tn+u`3UmZUiOOa(HfcCi%`g z-q84-(8VK6fv&Wbt!Jx{!;6c_V{7}`M0Mri?u9T(KjX1RL4Ftwmp zw#8KfS3+Q0%(y0-)e59%s%hHq%h_V^1J9n8RSD^UEu=94-Mlsh!^MiWek+H=Z0+~W zXFREebXziI-$#(bKcfu_dZ}EU%=H~Of&7o&OaT7|qziz(4DM!92ez;DrvC}}APYcy z22dC-%0FUSc{H&xOl*;0A(g)~w+Ed&7NeM>*pSfn3!!jVb7NfhKdvjwt`6}fcIQi$ zR;7hEz%y_Xr6@~TKkVeuIJR{s$dl3**2>k)2+u#v%dwpg z`20=ce-_Q%YD=f|{qEpG7|E#4!9|fODj+bA$M(rmRY!A!iCQ}9d@5zsa4dkG%8r#B zyMo?MTH-XflVeQaAo$^MoqMJdb1F}2X!?xM&Q=4x(N|0hqV4ywj87XYJf!85da1;n zr&-B1GU-;@;9a&!v7D&77;)2WsIo=lZ~dy$+;C!Br=EJBx$R!)Vi=|UZNew(1e?p& z$Aq*CdN(W2+4(gav4LRi(PwH~+$m~dbm`snYC;h6N)BGR1zyF9=iCD$etSl3)v0_}dCL)+GGV+mtMA>s zi2xl`0WY6O8J0ef+4aeDn}->}kE;H9;;mYeVmn@}b#Vx9yS}8^80KjlP#u zK6KpX-$IJIHJ;^yT(mc=yB&LE2a~`IC;A zaUZ{B7!w%!iZYtsd@Nb_ohOzuQqwh;o968vqY1sK55C^) z%+$cw6x#I^TW1@tOcF}%keJh^&5<+(tybLnvS3KS`HJcXeAp*8^v#+V|FY60p{v{{?VUO2w^rgUu2Q+i~WVVoe3waR5}evi9yM_)U7#9J_|L)#T(pPZal z-x%-?V*>dL-CicE<%oZ!_X0yq2=5DL{*1+z0uO+`0VMGwbiR0Kw45R3s-ctohG2-8 zLdAs|a`)znqeJ(>Xnr_yT3dp>xeu2E&WgMETWW4lobB7|Dnc&;1Ef`6n^JQRdD2p1 z!*UQwBEy7=-Ld>dS^2JX_QCV--2O(Ct9`D?Kk?dE_wM(B^3~xQ0jAbeDHA$HdnM;W z+#dUp`{Pd@{dX77sA-xTUMU2T?O%UrZ@WBl$e)cQylTTe4j{oqZm1xm@4LTj^jq{q z$Bpg3dgepMYX>Iz0p$~-L*`qFK;p<1}B!}`3&3Z+gNW@u$$HKWWSD=lkg_Li)2=Z`4Te z+*b10P#n7K+Y0TEi)akvheBYhk*r-~-}NPzU7n~5D)(FCFSE!|^i94?4nP;Gh+$$$ z73E!#1J^1x`VwO-SzGa%;^bVD`6VFO*hWTk1%tAkp_&7f*}NIPlFa!cORs!X z9aLzZBICgA2s5Ml9tC?3Tf!)pnkWlb7O_fg1<~k3#1gf~ zJVSKis%+G=?NcQeW0{uCqZ0*b<~mC%KlL`&vp9aJ?B$AHlGR4M3PMiblbmpZ4O{At z2v%{b`pza340EoS`{_&I0528Rh~Fo^?ppgGu`Rijr1wDFl;bxC{CR|6_yE8d_=@}h z479C@Jy#U~WKh5+xa%=%Nx3Bq+<{FA_!8a$ZFg``EpYEL6>dNiQTKz{>RXQ7bw3wD zpTH95>F{fbN*@MPXM2M}1IX^wTC;1BNG%eROZB>%Nv9Z;1plSL$R{}FhN#!I@1$V# z#I#46D(bFx!s|q`U;M0sjGhYF?5O7k6|fgH%mj?#CQnQy=Oy5l5 zsTF6-UD!t|8|i`6+`o5@1bq_`YJl%R#r((d(aud3CpIdA4F&8audA?K$4kype1<@F z-v>)v>dJHw1LGnHd}YnHHF#L>#g{ZPhLKTsjfRiEx&zMS2lvlserv1OJQZ3-seQjf z3kQ#(VNmG|?f%f_`_zxQ$lOkT&vB5)voKHayh(N;xz|ISP3qhKplG*gsdE}FPPhNm z{^j4xJY3wST@iK!$u;tGty`tR0M|VFB0gyvxnleH1T6)Rk$g4V4O+>2>r-E*+s7RI z16--f&^1w_6aoF%W0`(@GV?+0jGe3AGuN>dD%{IE4_oag=N%7N5-%)U7=KbOljNhp z&B(sSK9x^fFl6UXGj8q-1=cHpan9{GU3N& zZWsd3A3|j(l;mogL&bkhNMN!===ET;sicrdDvn|ZE&xN10CE@51Af+vLHzr7$c6&3 zSOUOe9wBEJO^Ct4kVrw)gWY6R_VCKR0$OaY|b+PY+%Hpkvs zvKRE}S^4pQGh$2jF8k__+Z;2_q9MQ@#rf^s@0w6i{UpjnBT%Ag&QI*qGhM+=?46GU z!CDM0;f#%yG?O-VBYu2Ab^Wu2_kMw0|e5i}@oWsSDmx@ugn#^%EnLM22o5LGiOu8@6zxijRUz?d1 zJWYl%&xjTbpWg~Qr#r^7g*zX1v{+o9i=}m&`c^*nLdb{M9IxKbu)5sj;^RwI@}ZkW z^8QAzWuKX$Xw@D>|GvWMEB^Q9_SFF@8M&6Gg#qYf+R3!+t>}7?#6AIoRY`SyTBiQZ z{0GV6+jst)wZrHw)|=+fJ!Ns-?A;$}^>F(s-#z^VbY z`bzX+_>Vtdpw?d|rE9`-psNu;xk5l)h93G%a}8jPl(*Z5A3CJ5V!N$0!_N*Dx^l=Q z;fH?Z@roX(`OU8$uO%;#K6v|;11|zGCCvOfnZ)ESQa2tLhDLNfWb7J|L~q|*Bk(!i z5C86b*)uvenmFeTV7(Xe6u@wZOt}3iQqQ0zd=)PdtIcLVPeMUd zVG$rBkPfdVGXXC6=s5_GWGL*(-H?S@T@+)&iI+wUPSzvuZI~+63$+(Cj8D*1&MAG^ z5XvGyjMV1IEIT%(#JeQr(Y*&SEX9^9k_d#dX{XL6ZoPxlk zhLyG6M#k|te+q6$I9x!EX2mwpwlZ36;z%4o#jA>t4kD$}##<zGG;$DLR449PpWP?Ric8G#A&71x+F>+#``7gw z#g!bTf>Kh$=Il+>M|4?PpW~?W%G8$Wyqiv=%Z}8lL9*@4?3D+M@ifn)76E(<>>_Pg0J^=$$Bf6@48OsSG$2`uqmO zq>1?BfemGj?}k$*OAqlU47eXSONrJdstO)J4>{GE!JYb<6)lIZ+P-?fH#U- zzJPgpPzw)hRv&S}k;JaVs~?Y+l%Lpsd~^Qt%I!RLJyqX{jW-VXx} zVMSq>W>}uNTu_!PbXFZ#+OL=O2qe?30Dw1^FbD^kCE+T9&w6e9tF6R?ofK&}j@3X|Sf6PNh7jY1~vXsR572 zb6P*llVx=(ysd?azI%NueN7i{f>u=@63n_C`{fptk8u5sumvN4vEX*#3F<(2*%gHd zJ_Wj{5Xay!@%E$tccZ|8EYM`L96b8;-(_Q9kDh~DCSO1o8aq*Gu%^ie#&M!8|7E9* zF3Z*Gs2Hp+G+X@2n_}Iir+Qa7l%Ld1eC1_NYkpqTgE^Nyu8?EdQv{sgnRc7i$7Np7 zqCI~^WIO@}-3!GHIG{3MyVE>jb5F&XHv1EmkrW5aj_I&A{uPO-w zh1B##e*EAqbD;3ft7mnhE zy`27$BOBths;#-r4_Eu!uW9&+Ds3U{>UaVR>Zo#)phDf4{f67Kh;n(uk(J;V>Upp* z8NZZC1np|?P}y`XKWldt7+s-bx=#v&FCTuII9=~` zoRdeK4-#j!4Xz@E=&n_Hp*yf>^`=@T>Kd22F<+TuG}3C{5S|V=ZgjTiN_o9X>?LS? zo=8)PwEHlk9xF3#^}faF=$>)$kL2$lsfHemrLbHH=^KZZj0TmeAq zawrbK2#~|<0q_quYVgVEHK0i_fv_BCkxi^;W^GkdD>SA~~p-KlNR1&fDwJ zwYEPAP2x-+Pl`0qZvGZU1JK=X1hg(53R>oEk}7rro}c~Z8&DahC^%4W)#2qG1gu@e zvhn19*aXi+URn!Jw)7HLn?VEMU%D#`$xIhE8yHuly+|4vSAkGP z^j+SVXeI^5`%-VxzxNdQ(nh1ypPp~Dy`I^ZJu5-zZl7Y*Mg@k+lBrP@od>9OP4j$S zg5+a|C%}V;6?T$(N{gVc!gil6M93+*!nDWsA}=7sKiOg%zF4$}Qj+>4Rg4r&Hi&^D0&=9nPc{>Z*!t<=0}gCHXXMo){vBwIdP-q~GR?=jU-=9=u5 zUeo^$K$(g+P%}qE6TJH;eL6b-(UE^BX*So9>7nXydi5oPB`%8ZNh_zwO>{P8DGwx; zo49w8cX{#?F1UI*QL)#-k0F2;jsK>sTBTf1>1Q*}tW_nXNS|Fsz!0;@ntUTyNEP}b z-}a2dOz?c@*$MpFM3~t99hr$EDk)1#8>&w-Ccc5V%#Cm^0kW!V({ba}TdqBE7s}M!j8r z^(~;043Ccvc;$U(I8>VE)VZrYtnTz!z8^o$K5OOZ%`cURdDgBGC@e})vWJfC&|4iK z0694S2xd5AF(;VO7Kn;F6+fo?qsz7YK=`-ka}0H#$8qw=mfqKhqB0z97#*oNx45Jc z$~njmTYW5plwo{SB`)NJ?? zs=JO~@npQWG1Kkubg#*4FyQDAqSC$5tQ0ZrFbSKsr+Q$lnSbG&HnnxE{H}VsaNJvf zz$7E)m2YxF@!g(*uT~Tn7c%I10^u|bM4Z^C@LZl^9 zUrF_(%KAc<+lf|)!@k~XCCQUvjPV_FKWMJWG_*avMPAug_ViE4@cTaP*^qUrKq92Z z-RRFL-WJws>iz*!lLB%h6t2e{$bZUx?9bQ!%HhXW9P(o&F?Z$xM$0EdURTTwFnizC z4^4Yfnn;omJ=EHbs#w(_@?$1El6bE~`?ps_T06v*O-$m4&o>6$m2MV!B*2Ari&f5B zW>dAnIQa0WVb~-!x*xuqpS|Ab?$MPLjb3!o{QDSu?a4{t4jH%l_32cCw}aw?0!f=E zyZi@CqkRW@W;sYl#0cN~xVnw3TdlL=deb_;U2Fv-6d#?{Ee7}^CJBNDw<8_pH!T`%!La?*37&VM$mx9Cr-d59yB0wP~L zbQOk|HbZz>HIhC;zTI!n3otr!Ww>>wez&%XSG*sI9UlHm*7b$%i(eiQY!0nOA!q9T z%N+OWL{jCA-<8lu(eL@VYdi+Kf$r6=k7~0XkfRQM1v4wR#Kq7_8@wi3Bdo^z1aYX` zTt#HPsH(d}P|To^fO(G%owgRy*t&FihtZRh0!mJ~TKwGglR3~?Nx-UW5%AjggYOR* z_!|H{MgT9{!En;Kr!3%SB?XXl3AiaDAh#EjfTMm6Y1fg2!rF^(aPcERbpj0M1tKvE z{)HF*S2zQndjc=^g5aFouQ}G7D8Jdfh)3kz0JIP=unLSv&j0}ZVhkXC1R4RlNDxQh zS#keuZmVAGx?&g=?YR(O=DFD!)@JWNQcteBs|M zjcQ)}H5NGJgB>3Cwz!ZAD>Go$!i_uWGu$9_se5VBxMV8ikNeTu(X(cbfF9z3T^(E3 zBA2frz~|ts!X+b}$|5XEe;URhL9ke3fUQWdG5%g-xkGdeaKi5J&)l|5C3Y24#X9^iCk9OR@) zv>k-AVo5#iloddSZ`d*}uqIrke31z;mj+mhj@1bzUJKL?(L~aY$?_xH$$usNZuRa7 zClMiI0-0Wm$qmrw-@#oiC$}|vKUmE=YgjN4CLqkpYBe}CRj&pUiN}l&?06e9CS=IS zc@ux?^D@CFo{XM&wTRO|_{&?2SopdI!Q_ZDsSCW!Abml0hOQ+cGdxyKukc%4Ns71|Ko%v1!rMyL-L; zX>vWfjKM0~EyYnH0-*)cu|KAZ{cGzaXZcyt*@l0rzbK5{o9q z84B9`&y$2sab0yj06GH)itoWyD5 z%h(gT6{!|soy+bkf=(c|XsVy`SuP2kDoAO~(={tz(3>->Ll1gnBvmR>{`|GneqXV5 zlLJv4`(p>0c7`9M+dcSx$R)5KG~8!>Pf9yt%qPj7j(dF>BRcY;&DF2#u^&4-1+}=^ z=DrBWD_5gT#O9x!BDjG(rKYL>ZrD4O{wYv>MnuittXWzPK=5FJE*6TYsvqbvd`s5L3 zipu2WkLl!T2QN$olfxjZ$seF9comwD(E#n)Q~HQ)B=5QOhdMqQTGlKeLJ@ zkcSB$9*m(2)`hd#jbY+Mot~1}!ne#C9cSM>Q}i4Yj0+vSPZ6ZnuDw~$MeZ>*kyAR5 z6K3`z{Z=S0fE2B1wO5BiksE#4S@+`KwltD`*HSHRjQz@hJjaQh_mkM+nuC=%bOGE* zHR7PUD_i%o)8tr^K0rn2S+53PFniMUohw2V5p9thkX778=#Tb9N~AJd{O8jPPl1?wJ_ExfE<`28uP{{Z)rU+1bWrHM$J zxpW(q9VVI7Fh?i_eZKnQIe9OZr-7}JZd`sOoN3UkEXXQzfp$I4`*uTEPDu!^HG<@J z`XSxJ;x71C%bul2+v()o)z2$wo!cts+N)s)%JlWKq+1mxwe%2W>xpMWGc&FT&0X)k z&KDFEET{&VjV|hkhliA2r+f-pyQrtK{aFBgES>~Bes)qBm}Rx`RjlwwcikNPgRuM2mWGVBpmRv z&^i~Ds4492=ik!%k}{sc6iHT5z7vLwiWrtc8Ksi*)x~(@ygzHKm*b-)i?cEvs!^wV z$NaD>$-~V2I!bT!NpGfT0~Y)n{*XYV?B*v})xQgig;Lp3bno@ESiM7iP&htH&+A_| zyj*6B`Q>OZBd5KTRwQdEjSLoHrK_Ot;Qvy(re&YX?GxclAY=q7o}61XymXFAFRH(C zkMR>7CAvd0VQ1LYov_t6&F)Ub_`xE&w_HWn2@F2n6Sn;$B>P}JG*OJ?0!vASHnMc` zZyqi!?61{w_Hxd%!&WfWFsvcK1EPST}MbEX$#qT(aGKAfUqeGHlOgcL=k-`-s|AO7n7l!9d^cN zC}F`?vHyptw+yPI>zal`a1Rc_-GaLZ3GVLh?hYroyE_DTcMXsL!QI{69nO4Q_w!bL zKc{NfOik6AHQj6P?nb^J6_c5P(xKDNg~W|p5CJu!?e=Cd5PIz<}xV{7?$ zi-MZHIUL)M2WfJ0zlZhctiZgzw-U- z?NOy9=lMV9te>CN&R$^ow6uQ>c=_-k68-vvLVN{o>J1~mk0g3{=dKEN>|w_0=5F+V z7D!UaTXVu?_de4}ch^mE`7XQ2LmVV5UY>Ye-pWQHk!oIfpqK z){HFOQ?`m@AM*ltYKhF1O15I_0EBGWWYWXFdf@u}l>7V~JB%^Ir0rs5TKX57-cL$n zBAx%<6Z*@=k4P|wI2<6GSs8DS$ zr|xdXmd~&|Rm%RUrtmv>gT?93h&n}zJ4t5(B9LmC+jxlZu^$;}n4!fu3;A!LWB`=% zCsra?P{X2%Fh)EjdPnqGt&Pi$B|Ct49W0#;MWlbr%MH%q;8p+ z>4?`4+$TIzZs}kb`~DHi_~$jzNo=A1V+|ToC!>vz@e+lFSjg~K@ggV&A-DtbyRo4OUtFo99@i3Iph(bBaPsb!BnP?G)BfmjMp+ZV zYqpxioS?&jCiYt1t(*NVZ~LD1=%FFC2VD?2n~P$T1CYt!^xzS-U{Rvf|9C$(yWybZYmf|$Cm z4!qYtUW&} zP5&&@$o$tX0fU;3)NRJ#H2JlDmJX*%P;Z;SBC-q?A3}EFoAD_D0$9-$^W>-UFSlSp;TkIuhR3cm2&r0y)T=*@t{ zIZ!(Tymtbk013RU0`%DX*&F|BBlP+}S}maTF0}X=o0O~ysY4Vs zCjvOZV%WA&z9Bn(O#M{^5nu96t1}^NhGu*@tsBs5`=kz8O}#?nFU_Rs=dpgQGdrM4 zQ$`z%M90l)od3)lG5y<2yqbYdJOET6LiLZ@b+oa0>v)q$^;yF^7p+~hc} zc9yt-E}9_VTQ3vm6yAx5SH_%9RESa4u-%sS6IY49!?~`tXHfZV6x7@bnBuvZ<}lU)q@%35^hoxlLb4BXmqqaCS*dJn3F+%zhu^$q>;`nY59DD5@ zo2h7##LT$+n>G+bym>orkPY7i`h{jhmVFhh*>B+q^8_EB+v+U({lkc(=3<4OAyqC} zQ!TnBW(6X)F%QlLNm`Z}AoIUlUk8=^SY%@Tj9s;aW-zQu`Wqw1L`UMP$?CrPIc53z zkE?-2p>QvTPAaqPTU^A4G8!PirUnnJ0oRw#c;FB7*SHLzodWz92;7%^uWW%D!1_52 ziUu2Ofy0FEiT!2YFE{sD-#|p~heLZn+JM13HCWC0w1VNcBedZiaUp<{4!kb|bm7bY zZEbzrPy;|M{NK7fxLT)PYZ+!;2pmEH@cP^S0!qIHrHNGU^AoGCp5fO4R}OmH1F3XZRrYYmI*AMEWDQOEkMMOiUT zr-Gf&dS*c&Oa}JU>0jp+ik2K8XEAPW))<43QeklB#l07jQHES7=e3GQDIsaRBICTk zN?Wf;Q3(aL@qFRELf1{+dj@{@VzW~31_AH6JW1EqcFS(qu%jn;<}vH+%F{<5lT7&` zQ)g0RSMFQNVatQ8i*uVh%aN$S1{G=UTiL)bRcU#22*JM2=XeNqWm7_9WXU}c6e;zo zF$t88G9BloFGj+ke*(62#~2KzRhE_pl14PYfT16D8pgn{kW*fsU65S?T6r&;E9_Gb znu3X!_wCtxs92=nGdCpImVEzp1FOW6$l$c}dAHCYG<^y3%#S>%(7}&t&2sF4std!o zOQNBWM-I-BzLU}x8fcv=zX6QtC#4>V@^-yX@fOqo0@-1jJVLURqATy#Mvw| z+7(AQnZjq|=6f5Fv53;4DL3ANGsaa9`2n24ZKd9Votv(oBJb3(0_46#`(V9l+nznG z$nv%Fa}&x8foA>irMnjhQ9uN*CdHz5Zh2#;SAmb9YyfO9_68Kk&z?OsRJ1eRir3Zr zLgq?b(Hkx2fm5>8G8AKmz>xaU>_<#N2c{bH+LOfzO;fC>&NX5)8>a?wO{LohcK#4^ zJ1E}FQ4;jP)#iBezQWhA|7APgicp_g$|*N$;iB`pG(A$nPP@O-r7)%CzCG*!75y)@ z&;j9T&vfTZuKW9$ik0IMZ$eb=iAToIK6B@XR>>S65#BH+NA@^(Xloq$;RslgfrIHC z){(rp9ie1uGgY(HRKiXL$SatLW+VR?S3Ncpug})n_=}cp5DlK!#W&P~QlD9fIz~Hh zq=fzJa{ulZ^4w_&2Nrq8N0lHEE$AP&QHHyjb|-$Qy19BLJv=LDVB$(izc2wB_l(EH z&Ec=gT8-B6XI7A^M7?OKX6Ikta(N8->#=PAVm?y)eBodbV5vH8Zual)u8VZ$6p5Vi`N09Lyplq2*^O_@W zl(h1I1P!xQMb`&|KIn1nB$1`TEsOoj@hkTr`5SK1kaAqkshdRRsieLGM`Q+He|VP1 zjxE$i@OJA7*28H^rAjEAa4X(OfN?O%pU&ma$?~I!g%6<{R@GuwOIu?V=e~PHMZ;&O zBlc@e=__CgORH3U<*EjW&e}d`;r2AcN@vueh`UD2LiB&m#H5`xt$0FuaScmTELZ61 zG=i%KNUX3*Q$ng{$-G7(bsbZ~w$S}Z-^Za`9L&@EMHF2g38_X>c|5c+EN|314d4W+ zHJcRatLm5c|8YBFN8IWSqRJzj5{rJJamKc^z5H_!Uio#RQ#BS^TZ;v!d0@wx?6P(x z^(&afLfPM3vN~f(hr85TYiuRC5-lxD%4ML&rh3imuD-;y3H~I_IW^OR?O(?$XR@m z2^NmwO+l=>!Mo*7Rk1}NG*xHy>o%kucx{_egUpB^F_l)HZ<;M*qkDP0{ImeYAfQ<< z($qumRH%jVYOWzE#7<1XGR)SLS3U}!eM04U12Z^-Wfaww$zl)|udv23KEK%LbJ8X*}opGgt9eVZSw3NSDSFfxNN-WmZl z0u|uZq8$mys|1H-d^je2$PZFo0u^dk(2+Dgi32PzAAZYu!+AjO#|5CpH~^%BH-*Lw z-h082IbQRrhHal2+W-∋y|^QTa=v|8BlZx_id1np6;u$A82?<=~Qg6Ml&-oPBf zID^|RBVY%x(zmtb+_f1|*siA?IdQkwi04ZepbKcA-x?-Ta5 zQ3>T0Jjl1*D(Jt;FrUxl;Jr1x(3X75O2E`Ar-mPAxQBP&s5LKfX-MDA@V=DQC?1Ze zan4m@8KM5o-v6{>`=&_NL`f=x%?c9B23yIVx;(my9^)k|w>5FsZxlDOxQWrk z+2*q@ejH6EKQ$$K6t)h@YNcF?a1kc|E<|1Di~r^0;+8_JD+GVl;ulJ+ek-i{%OxVL zy$CAL+Dnx|i}l$en7@P7lzRnZQkj?H8B;cqTvZ)>E36rUJyZ#@CUrWi>K%L3$C7rHUDt{r@ z*ZLyZM8va&Pv7z=4^}|io*~s}Qk{l*M92>lb(P+)BXewI6;*fEH0}8wmtdBdi z0aOI{>Y&^iZwYRbe8D_l3LyV?sv1O6OOK(HZe1;h2lOfmocU5Ab5%?h%S1u@yD=;_ zsT(WIF_WBbQFXbAO^LzqL$UwQgFKnRJs|xJ=m!I2=pw)*AV$gj3h zijSkd|G4x1*8JCN`k+C;p&&pW&I7p6K6zDdKN20*3YJgExM(N*QNy>Kj{=ox`6M^& zE5Bu>Hh^iDhuBr;sGHGQr?hS&9RidW<-{#HA>1n({%FC*or7pM7YEQkto^oDl86XqxGg>&kp2hQVqQ)j;d&*yg0!rm)0`S6d94aC;;OBWP6JlX=e$H=y3AtOV zBVP?`@cT2TBdU@?pqN$LQ)!51c>gqWB8?`y>CZ)~NX*!A*SX;x)A7}q(2rh5gtkC4 zOwGoePGK73AsxH;j3yK0F-ZLq;&aIN_(-syT~y#VHC*T_f$<|2o@xuf?-o+vTTy8O ztFajGiAt45j%3_b30k=f8F3E#QQ#|Z*b5*w0=vpTQOt0y30dN6{r@7?T;me`I1Sdo%aLyBU!?JD(4otH)8>vE?O&9U^FVoGP+rec z801FzrurR2DV;dcGbwj3E@tOE@q)H8bv&|#wiW4z$F#v^_Ggdxs^-fICS=bRCw6E+|AJBzBceI5~!+c$@sNq=8CN0PuV;E z2^2kl!X8ppuyB=?{OZ)HZ3lIbZ1ra~aBWEWyHZl%w@CQV^;3s)tLEF1N}QfKtSt98 zgs4&E@xa-*wniFxj?tdxuhrZR_RW9Y?mC&}!5aV`yp;G`$?%sJx!yQ6G z;wMoH?Avp|7rJ6`2hs?QtY5wLb9|QoccZU|#=-hQ&!!84)eMZR|Fw|g3y%r1Ul0lw z%9U`W-`ZGM?>5dZN`xR?Gt-Qq-b+!}lm@&raaD=O6nx>Bv`?)jJNn0veirnt?@2 zzPOVh6^6f579`3i1)oOfsD+b>Sn^T`)X8U_CKL)j#9`Q0o%9E4Slb0;fkDH`226wia?`c?*q zT-$i)mXoAn6}E@oO>4JQowAyTa2WY&H8RTS?Yv42LfxbCkH4!=qR_7X_vl{WJ$BXe zO?ws*GEPD;3vxx@uux3pc4=$_$w7=T4jr0RfY*R&adUdYB7;oLsjEFtgEH*!u`rzZ zaQm>7likfF+ab?G`vm(hG7$Pgz>`pH@pgkHE`fiBn#^?pf?%{iDxrcmRQFM|C8_$3 zl(+2qt2DM9ppFalEfe}X_^To7p*HGci(vK!KfB}YRaxC$k;=NRzC+gW$Nhk^`ujD7 z)Ss&CI87SLhkq5#v1mioGludUPLM){l+tt-Zt+P$`69P}SqYo-q!_%m$wbcF*k9PT zHSsW^$$841Nc~#3w3{n1EDdkT6}g|Q0yxUYsc@G?jMd?&gzxqU=zn(XrS-sC?F5HI zNmN8~@~o|mHo{#2pMcA+o1m@UHXsE53b1Dg9|lrhKtErIS#)1Okq0tF2w2Zt3e4YY*2Wsnq+nqORAxH4zLtyxu zK{*)q1@vX07>xK(9r`hW{KVDg_5rG&{{DA^>yHoj&;4=LZ)s{3JsUBP)O*(;pg%+< zwqbzY`db8y?rreAUIQ)HM-S72EJXS{$SPDu13P#$T z9*mYX9j0ohOX_)JeP_1}gMP1}O$@LhtBK0h;;q?&Lq5V7AZN=Effq*kp|aa`Sq4gH zSK+avZ82awqWL1f#=dE8Cf<66z#&JgruHWzU*2oCAxcQ8C+O=S&(FUL^~)kWVjI;q zQ@F(2lK0cPV-4#+@=6`*&35eE52EtD=1%;@3-q+yO>|a1n{>FHcW&$pN-|O_LAG2= zQxELv!th7URNe81es zQ_bC}lH<#Elh(cUhuih z+&513kTM+qV%|Qm(-pEYA`eNDqi0bQ_Qv%;%9P}@+__TZFkyjSqDKxtH@f?`0sqte zvx`s&dH!12qO~xq^~#mc$ff3H%|XV5!3-r+j=1_Gw!&0F^+z}w`YhR$FpmCfw(-u@ zyGAPF(4J1%9JO7wyYsWYaBh7W8wxRxqcK{2FT|RF!S3^9)H^X;9pK?3EC#M^0v|%( z5tHiH00LB1E)KltKk+~PUx9&xbk~5-U4YyF))fKY0aW9*IZ*TETjKx9)p7hCEMPKK4s%ie=8MBz4Cv3w| zRM@5eZ|5=Us8pM$Q%i~6p@Ba~r13p7X%NwpH&E!X{U5pjrvU5x?Vei*-ie1mbi=~P z)xpYyF{X0%68d{7Qw4{0fvi+cHr`2(oN=0@d7|rr;I&zRXFm@~BiroLc>d2*b~z3snnrQ5eIifAY*>^8ggFbq-Z^I#q9k9-4r@4YdkW?A?l6Eu@is6 z*RQ^ds)CBsyG1{Gz+b@mo>UyFVdKyg5f^Vt+n9VFQ6j^IYr1lvM|jroD8bGLtZ~>~ zt>)Vun+u@*<=vp~NdhjB^PT2IJLf*+8UME{`mp-K%JN~>b)sg%Z_7Ei*Qo0(OQ+?% z-?t6H9`|-@5#70dspn3lX1kwK22aEOkXC2!kO}*gi;_<&_ z8k7#l1XO4_S{#nPq>6)0a+ECUhI^!QaSsXazw|Pb0do(&%UcA?{5IW|zO~UB` z8Ra4_x{7Sz6hL0A_;@fG=_OY&9@kUOo3dhvb~||8X=HYr>e*tS&`3HC@0)uTRb28! zZWK=^q(1pi`W)!|Da$eB^Zg}mAE&nmN7y$Mr8{bweQ@UYkmgxYiWzMApF|PDTOM3h za)sZOtTu8@Mq+lxda^_JvsDPkI~;6F^C2ov`a1OJrzDZ;<@@hu(w8uDbE!(_`zhph zMQUO0BY7@RUn$VYBB~Aavc*m#t8kzMH92k`*z+}@no%TGuM}#x7BZi}tJpGxy0dpk zghaVmzu6W4mPTWmr9FCO*y~WLc407hWgW#vbNBSI#S1sYQ1J?93!>BV^E>KWBn(`=%|kt+DR9} zr_}q?fO79xO9|Sf4uWH$)zi;rMnNxJNAF?&fxVZdMu^-+E^<>z5^lab!1esj?mcoZ^_O|) z_gqEcDfv>a8MSQmRXVXxN}fDnEY@yXU+||EE`1`$Hk+i^g@+b1TABYDpX3J%*m+p@H+(!_JzEmOiP;mVWhuw!0Y!he*!!Oik z+5XGfd^We~Sf<4qy(=o?ZcE2I+DaBz)|DmsnVyY46LLA@)O>9H3gs;(1BGQbSv&|G z6Tag51u(sJOTkfADZ%?!)ZFYf5q(~aQc|?Ba>sfr>hB}XwQ3aP_QPj{`vFVWzj>#}J~INdK-8BlGZ%oW;?J#{4Ci!P|-0~xQa5=%8) z5tWa$mD!h4yfO+lVoV2ltOsANRECf%mdM)9<1yeMf0tXxZuF~DQK1{+j*eX4;Y#0! zW;Iy8KH=-u>^#|YAZ%KdPNUaS(%FtZN*zWm9fu{gsadR(?2b3nv8d4-Q&p+2@>SDD zI*lKM7r+}zPVOi@Y({^1gv5-7c>?_eJp68jm}()!e&V-%_Fq!G*j%!>w=g(fb`8#q@UPH3%iB0fm!{jFu#i%)54 z$6K8-8S=1N%`Jy|^0$`f=nXn&9=kYyUFrxz;VAW2;6nV*U$Rx+T7lz=b_D3U* zkSqA88%efvl96MxE53waS7?P;FY-OrMf`H9DYwiwELd4w z{I+R9psyoOO}CoX*&gjZDk!h|fDhs=GKp%_9(U^98VhGW@y5=$7sz2EDyKk+F&9M{mUAMIz zc$3Mfn_q)IkOXJdLdeHqzh}a>a!)Ub?ue#(Yb6#Q^3E zE4-Kf+g;dXm3tf)qp#I=$0NY_e~bz&0VhXuo4!eE4?#~*A~YF<&K;zTrL?GUIRLqv zj;z(|u~f;3xziwO=50uNoXVpkU2P>e!(DM{wSeG6|I}E)U!m#t{MF|#+3Q72VsJ}i zZ3;F;4?DWQarbQNWG)Z!=tOzK1o>3pZ5M zkCvN>dVCw?gFUDoVqJd(?xjw2oH-*#8_&@MCB@i+mDy=!Ji1C^H>{^#glXL4>xP-d z>iSrD>~BH6qPs>hReC@|T5l2Vr9$0ssOER6!IKPJKgRwQ4)#T!2J5wt5o3t`;ZX|}=0#RFP!RKVG@=bO4bGEl~x@aq(JCy0Ee&Z6GLGl$|sgk;L zA}gQ;sdAX-VE9`QTlv06<30~=-5Tr(C?DM0^PCrU`U}9|{(neT6@Yw+!(}`Lh}n^P z?f-uu>jXUZ10kQlz&SM9b8jLT^1U~FTNyB$6=L1|k0^c02Y!MLcHbs%dv8AoSSxn$ z9zgekx&{Q`GT!I?wsSu$A%|##`1B5Y2Yh`uxQKct_5^PK>s^p9ps4ma;3Wg)Eh`pk z3+B@u@f2Wl2JCgV0Rf0pfDR(r4T$m`_60!uU}k(dKa~IXz&?&wkVfx+)|il_`8brb z&oj?RZ$JKCkD2&>ukDr@Raz8PsONoXQz@+>73mj-JecHx3 z@A^CJ%LRj&02vxKAG|L4Sej{C{AUWRL=gM_mT#6`U>V#kq<&A@okaO#w+n`VUcPYI z_@4g&LRi>bJRl)s1()uR^e8!zalppc`tI&xTyYbXx8I8tmlz!B%=M`527tuWW(J5q z4WWTect8O`Rp!I`tDbq)U|;VW%Sh9f$|haB#tx{;HvPhxN!`3)TvRZCsfmd0i^Yk` zT_d!OCd+O-l)8JUxBU5w&{>X~hU(?wU-%SzD6wPAW86$8p1vtPbn?W&qMw7GK8u$A zA`wN9lnx7~Aw(ffL_r|?xQZIQeYz#A!-fQ9PvUd?R15gj*j&1NL@H!p9XeO>HI;Kl zpG3hJU1$GtZI|>X5BIu6Z@5q@_idr48*UdpJ90c4fEveIE?DHIYBk7?>CBZG?pB#V z;QWIZ5{x?EJA;{1*8+=vz?kVH!gU<%G*Cz}waNm@9@twM-!NF6TAI2XvGG4 zS+Fsq4z0ehY|va9*xI1oHe)vzH)A!lD0pK6gM(f}|X zAV-H6xj*M`UYL!|-f#o;DSY%)YIY6TJ?JaD&F@&H)Wf(*f7Xy(7FXx=X4){+-E44tp} zX*NIv&*C1HLo&=Z-do;!qK~m%VK7K?BVHXl<*`aMErgNNjH5i#oqwFH* z@{(`GO2wgz?icuvsj>{mde1IG2)A>asbz1fyBzV~CVUm>)5)z8H$s=#-f&bUGk>?+ zWea7AP-p8k4pbm2q|b@?2u*Aq%}blYOb4O;-&47T^jg z+7*Ay?UIH6CKs7q@F-=UZj5Lhth{w{VAe0+;%xZs_|xcCJUdnRuGK1k*{Q#b_{wVb)k|kST|CkL zF8qN~?K4u;_1jfq6XqK=YLGl3d3HK|7nVBKNNJzaw?L5vFc_xk(yZ@wXDTT#Fa9J( zaiG<+BIrq^weYOCVl!rYLB;zg)(372x*QW!vP{*Fermj*k{`|6EhI_v9i0&``EL3m z?P_v7qUx|Pd}eZx5sWhtHAPeqc8W0&yxkdsTI)6%T6PVKu0mxZLrHeKWSJn|EJ~RQ zhpoftDuLr+>|bU2>Fgs50au22x7&|YG7GEyPinn%VO%o)*w9-BP_>FSs+kmVh&sxA zp6yA=6K`ZtcF#ZnzS$xs-F>bq^`|35QA@IaKr4=rPReAf+N_Pney5m}9Pj8}U38Ke zAYbo|DCSAY{j5L`Hj}rPCJA7uRvf2B zNqJ-^3D8(AdZD9(^?ypM`Z`5>{3$6P9T&An+oxGe6Zosp+lHF<)I4hO=`eq;T_(zn z60SIOx26*W3a7RTAEx_}5TUkVVl=`5d*}b+m$rDR>%yV}%<1H8-nOy$ z#X|=6Awtzwh}gm89~<%n3d(yV(V3 zZ=#WA&be6ot%c9#k}7UEeu)UPH7P#Fy!h_6$w++$(IfA&-inl-c;llGgrRbNmX>Qi z$43$b{?VU=8y)rySK%xH; zKpotxmh)tFXD?W}Ba^IhkHPczoah^6cvr0^1ebQnvR!Or#1+i#;3&nJePO+#x57B@ z38@;F;5SaUo}Q$Uc38WyxMh@Xickkdb4LQ^@C4#I@xiSTgjYQc6EeIwA*IFk7kipQ z=o~IRdp#Zmyu{~rPLSaYaf3^%sb`Iq_w}=B^P!Qqny$Mxv2c4tD$=!O&VH1J)AR%P z@ti9PjsJP6_zyB+SlZo4<~q+C8q(37@62o6$r7*lT6}?c2Q2dn+AY$gGMe<(xA>K+ z5`ND{Ib(f_1&t~aex6a40~~^o98)gxyL*{^mgKBuEQgJQ1?;7o!FK@>COLWaKWekl zi#N}|)xmb^5nAVccrnWlD5_HeGUAftQz~etiDl@uvQB9_D!f{Q^!uurn|d znD7R92x4RcUO=4+ZyHP=;2ZIOQ{>5d0ONi0S>Zdt#DF^jXx)9-o#U{*&4R8OcZB|5 zr@03`-1hc?g`$71+FyyE&1W}~vG;J%(xnf#SnicMlj*U9Iy6k)w@Pr=tbbdMDYsb` zUFCu#gK4~Y%ARV3T1iedM09c%L-@Mvg5kO54Y$KS_kO*)oWde?M^giM364;=mCZKc8VlNHph?VIJcgS zI+zk@{O3$UmxRbi5p}^D{#IpBE)x4ks6q|IW*S9 zT&VuXPKXJ3b87|a7@itFrsy-jF0{rgDb5kfeHwCs{%b#)s73jd!AS`33!+FL+i=2X9BrHwX$zWj5>ZpTF?WK5y@u)%xJmbQf-8lCmHG@7 zfeTtcDPj)21g`8;BqKdmN`gqk9uYdj3fRP*dJyB0=<`e1ZE&~C_F&Sko5iD3U7%n< z+g`H+6YC5F@ZqKJ(gi@@J%VwzUVBFosct~n029&&AHxGbgR0kKhoe5o)@Sa+7o7Lk z=*FiHF32K^6sAHkOE2nv$$a9Qn#r$pQ#-WZvn$s5By8_8Jun*)u-C7K5{!Ce^osmv z)8kK7CumADsS`i=SGk?V-HHme z-7f+YQ+{l{1M_!Z=EVPn{_Eg4S%xj$ncGwn9HDxKV&4! z&GOUl#jf!V!jqe--!gwBnZzjRrjkQadcDZ+qEEb*u$B_g`4>2FX19H78atH+dF5?t z2Gt5t`O`p)97Q+V>ku^JCy(UT7kSeg(Cq zUcvP3eElo^23PZg>O;*}2zh52?got>M^{-L;w%6oE~ zGP_&nPRi|D$5$JL^smX}4?Sv+PPb*2sF`PW^fP%^H9f>(1|g#(|BX)U#5cIVOE*tw zOL{^)aBqDCKY(IyiLZNa4bb`k18wGnkBA2Y;}3aP{mrkE|5eTZDEjRi_!R(vKLPJ~ zX*jZbz_%CFGobGaaA!bK%lYu@gYAF%AKNFG1Oe*+4SW~S*=0xuSeyU>w?gULd0-4! zMq<5>*#S=9^Fr^{@*@ah6EF_mcS4_BU+3Y}2{uJ44OCx~IQCC2!SHLfLX#1YjGH2* zU0X#nkE$+`pkj15y*y%2aCYPFOoRE&!)%Rda%MLVvX=FM>QpO05VyN?AmK}$>Ydj~ z&LYKMU?W>y`oT7-qM5DtucT94Snbx(Ay%WE;^oqa_wmb=}{R6uZ?=ClZy1iW`QLQZt_iLgQ#J-nS3T$dxp) z2hiD^?KbJ2d~qtuX0*rDfdw`USFQ;XBj@j#CfdJMjgaTfr{j5V6R`yql*9CPjP?FJ zwPCdX-~=nF%g?XV=FJp|67IB4yaksOw25a%ij?pzm;|i^4Q4O7(A$rtkh2L7#=jRl zN9+{Xl1E{Hi@;L3p=;iu%mO)`O=ELb8S+9rQLNE6S|EJs+wz_EEj5`1Rt|E+=h~Z$ zZiDcR-X-K^crRa$(oAU_bmAY+dBPP8Wzpo1k9x(Zz0wGhyWGCpl@$EJ3|*LPjN@;>h)&25_y*XY2)IO7w- z@w{zUFuh_bXx}U-67W>McQih?v*j?pP8v&j;hs+Fghy0Vc=j3-D?4(5R~$7{3(MXW z>N>Ex_fw#)_o(jB1`)14;zERmK3ojm70~;@wg2l~RKQ1Z4I%+=y@9K)8$TpIJ3@SV zK|KO^CP0JW2~hB-_ui)eifab6^@eKxR0^>FyZicozs8z-CMx)sj~+J$1P6SO`OgE8 zVgG4*f-&U*N8S zjIdl8smr_jvdiUQ<8_-G*74xrkv3^$Oh2g^@TtDvob>0+Roq<)FW8(L>kjo!vOxP@YdN4h%7%3ZT|HcbKp)%PrCjUO z;%DY0+_6le&MZ5lnu`^2#N+NWD(5O~PUN&tm6n}ShedQtU`u8o0wGnE6u{8QMxUI= z*iWv^0+Iua*t_`5S&&kvZd3OZ=4sEWlRFe8e{!>@pUW_gne|5#xf>QHBg}qzr|$=`7~$Q-TadWBZ)uoJ)E@rMwHoby%_oE>YmB z6ie{0?x5C;FHEYa3SGexDVO9~GDBuPP$`a{2al+%oKptmvUw_A_AU)?Ddn z$84OBYj)!|jx*&!Q~685U@w_*LCBSgVbe62r81y*7)s&BAN|y$)obiUy9=N0ls|R= zp%YQ3#INGjk>9!G=ZAw4gvtMsPYvVRNCx#PSf)x1I0g~)*|$Gi4kfCt-r(^wf@Ec+ zMH1<jm?qLFrlz()H`5lb>^gjnT~U(!Xu2f$$ZEPu~=D-URuL=s_Ag_P!n} zEtgVl&uqNRuN0lzzBtd#=XI%GfQT$7Es)K6=O9`hls5BdKXS}Rqx!*`iY^knAma2Aoe;9Smz2^XvT9;~6GR>MQL zm8kNuTkZY{->c|L2HafZ=&*sM!I=;17%}Ml7lxVDMNN9p*BM*p!v!r8#5G1X_4X1nh|!GwiJYTwyWa7^M?t*c>t+fYAhrv8?^lWRv-Rpls&E@MMf7IC%l7wFvTYeBj>-Nx+}P0z^$mwD z+pXrd+kwTD7Q+$V-URZ5I$1olNz9O4-jkuWOutSaUc0f2LCNo{ZLPD{<&%d zyM$Yx8@N-@FhZ!lOU(ZV?C5WH!L=k6V4y`;3=Ap;-xeMKQQH>pLO#V`JeCvk+n_$U zCUAcU3RZgQh1=sTvg&Nzxqp<2erK5?|FZ(Cs}<(uXbMw}Vm-rAE`=&|%vP2abyfO> zWiS=@Xs7qA1~n}}5tobE_bD7Td|b1qAyN0#DIEL7-md(e{60#2|GQKZw4ES$v3?r) z$K81nBlB|gOptxjo?j((+{Xol1^Gh(Q8C*$YKyB1gYY-4)e$HyK> zP;<2-x}Ba^9s5H5&|;zDQf5{|WTUtm_}zShe=8`d0cZdJ0mMK%zdB=4y#L}tmZMR; ziJVwTPG&W)9`)4`4bCd*T0fCO$^G4t0k$4pg%_?Q6w;CFeqeFpYQL45d}+HC8C$Ve zPVs$tB>t;x!e2(lQT;!v|3An6@5lZxBB=d;eqSyk|0w_OH|A&mA8FHIec6$qxCG8u z{{p}M{%_R#KQDyweqi-rYQHdsnMvwtazykEQk_2tn8~I?s+#=7Hsy_sdc~7!%Kh&5 zrL(du<5QY&IqTXZafnL#5&14SVoN6Fw+(7-qZ}SR29I%HjCfQlng6_BkmrgErR>w;=b16a{iU!h5lkI2hw_sTKiD#S{<80fQ#b zYdE(W6Hj#zXn~CABlhuHL#IBxUFO6QakJ1;+A$hrYD(B#iIPpUZ0I7LoS~yX z5LwQ%D#^7_gYLT_a*oa0(_^=B!(vqKp$yHrMg=Zx@zw8`S9jA}A zb5>8N&&ChEHuu4fBPYwIJv9o)95`7XA~n2;t?VKgnXL_XE>h2#xmmkhz zm|^!7cwYx1Qc9SD@Ehnd6Cz)*+t6(oh)KDIm=X~=PdgPW+~iJ*+KkKQ;G@X<{O$2L zER?xR0@eSc`v3D>|NX=N6-M>{zbzM$|6l6=QTzW~3_tt-+6Z#UWk-VI68OjT{~y`& zi3oj{{dW=n`?CKx^v?$b0>A5D2na;!|6=&5e@EED%gc@g#U=0$*MEL~p8t7_9}))w zh5m2-|7ZUHZ~Y7M^9rER30=e=@c&%@=W_iQ6#2_B{)Yea@B9n$qSk+02o_3Lc8W=! zpP=EcabkYUfqDzbD<($W71lAA79cMkkx&}|Yv)#FMPBn5k{x?u5Iul6O2un7f)xjc^<7lj zNh>>YJ4hBvCEZOT8-@<^hjJN0Lj`hsz?c%Yd%!w$ty~u!5pFYq)7+ZCQoEiB9Q~Io znjxQdDI3)LnUX*str*CR+k}Hnu*)mGR~4VjIva&2+6Dkq4l{ed={(qp|KRqR>}kJR zMK+enOPqC)&Je(z1p_&m{j)P#iffAf2LiaCC(6e&3t7t(kqRN#2H#?}4Xg_+a*=Y- zs%tjiQ3*}V^IE`*4sZIR6*YIvXAJhjS$|)3{UyMx%tJH2Z_*df!{3Bp?$b?R%w)(b zmvq)$&(QlF$xo00PUaQ##m!@1q1_zA-y)0c*TM*f*Jg~0YLlhNDnK%JFDT9=52lab zl{WY6t!qZlQy-o3vmj}0jv@=`R_K?`Gz%K%dMWgk)zu7cZa25qj@8r`$4-)?k#EBq zL`l-vRiBw#hQA+m-_`M4yWyW{anEn_E|GnQw5*HvpC&k^FA@v%7K^1=stTT2cc;t% z4m)gfAXsqENqEctCY}{GgZcj>@L}B%LDB!d8yROLVyS3 z$Fq9;I(-_-PW|i~D!NpIu4q^K$=O3d*#vIMEOcb^LBU}Fnf)8|@rMij&R;tEf|~ls zMFXR~z2qFzA2j#U%oo$?7kJ|A>eAlU*p0G9vW3%hT1pM9Jxg+|KS#-YfU5Be9YM%L%o}%arElkNGX@ms=^7U~yQI`q zLPXe;p}y09$UnPlwED0_ldW;iL`Neg2vM5;}G zksTW-Xtu5JF!xQ8lqvQqk}_!|m7i85a%xW!f$)r2K}KBN*QN>SkcT{zs;p*R)F?Fa zN_%MD7=)>b=g_QQmEBq!47+A{AH$s!&1$#`OzC?_y4(-PeVIY4H?8Pdt4)Kxv#WxL zJW6<0L~^HWU2cVi6Vq$B57W3R=BB}>^hnP!WrIA%Jk3K)h_51RA$TiZa!^mdc^Plkd?@;=`5Pr^oulO3NFFO(xm%zE`pC9;_W1#B)?~7VS_T53VMkIVn z4eHW60_?JuGy(}~x-EmpWYE+HB0E~6<1>ZrIpsQ})DTQvuVA{B`q9IWjM~p2*ae1loDE03oeCZMCCGs$8cN+h*n_DI!ih|6{LG8}ZQG6>+R_@2%i+4iZn3GwGaqcm@s zn1KN7GbdNDR0LoA6QcSTlG7vI{ll8e6Xw2OJ)Nh;R`G^lxq{NolP>|3TO0yN(JPCe zu{^8I`6xp?1AU}#>^GVAScH_6y_3Irjkm=P5<;<8sp!a|F#w!)EuZTEN3OJPmg~RS zI~q}zZFGXI(SC^GSOrpLEDoVt9z_GSGu@?*GS)XOX{p~O6oPAF@V#FP&==sXaLxA7 zy%P(G9H}GJc0-!oQW>(E)O&!NQLhTzPKm`gNxfaX_JviD4iZd3-UGT1j-_hR@KM>o z$H&g1{QRx7jm90`o+nHFR)*loT{?u-K+c;^L}BKB`fTZ-(p;FMynL8lXb#_$fm#3j z7XC09txF*GG}pHfo>?kC&N)2axj9#_Bo15nl&Y0)a!n3r;Th&s!LqMA8-~aefO$|8 zX`UJEQLU&WhsV-=pP5U4Ae|Y~F`{(cYKm5GBLO=A+ z_e1|E|L6Sq$MgS}?LU6{K!1Pzj}TA*<^Nm=_ZsV@Z8r$l=zO79!w1V~sEg!Mjm;ZV zml`R@o(((JZ7f*wj82*cbw0;O(qr9Nm|#$1U_tCUP+^)`2fXnPqse&9>9b~6v|-aN zvr#UO^j7~UD-|TP>D`0h9aJ0KR|^+q5fI)tU9!P^PCXfC}N{{U!FZGtYE8 z0lR#`1KM}A!S@rRrS$hpM28+Yv5-XV$y&C$wAQI`Q<*xsyFBUdtt_z~J^WN3Y$*_N zG~fE=8~fn6mu)mcL`msoByfDipLq0sj~LZDG$Oy;5fX^uJ}ecTS~%)MM1fypj{&D~ zC=^h!#KLce|BJn|e2cQ(+BhjSfP{1njpP6#h%^E!BHbM`f(XdaLrUjEHv-b#B@z-M zNSAbu42`5T@7|C5+xuZ3kNW`qFu#A`UdOTS^E&T!-D_PJ$zA>on$N-8qGf~qr!nUR zR6hvz9co8rH^r-;!gGYGIzK+eew7R)F6D@!ey3vjqnxi6%fJ`9+H{T=eJqiBAFEff ze35!Clr-8~UvesY#C-ZNL;(NZ1G$aP*2FLL>7j>{V%Q@MIilXmgW9V*^EmXn3fWKh zLnT?`1Gxt21T8zXE9uld9jv5B;hX9Ar%3!O=U0r9LVX%)jdfXk2}vxegiN{vpir0z zhdb*ad9ZZ67peanYzfEi0$^_Er;lW$h%6^K$2J%+%|7@e#6Z0MG*&SU|(m&x)k%{w~9wuq#>vt zN17}!1MIHepl9{GtngyI}!1Xi1E%ZbV3WLlD#AJ%D(m&ye5IlLri|2&~16!psZ!a4TJ~(GI48?FGs*K8% z&IaU|!yrGe)SAkW><^0z_Dc}$C$boBWxYS#*eYjjdp<}==dmB~Rx>8Ao!ksne*0wL z>P!6&S6em^7Ay9IsIHQyBLFl76m~v8Ox3Qj0NiJOFEYh5U*kuSE(1A>60g)LUscaV8<`vhcmY_w!-DFoSj&#b*CeRsC}5N0=M zAq`5jHZ(toa>4OGcH@BLpFtwWtJgtbW=e@;USa`^-EY%;rXW9cO)p%C=sR7NS(Z-~ zx&d4aaHEgcs(%sC-|yHj=^wfOb0d7mFJZE!Y{XoS*WA!3s&abcTndBsWNNc=V!dY3 zuIm#4X1{rIGH+m5H({ee)GJ6yv?e3bw)tE!5l{wHl>Yw0eS)5oP3>MxfK>(F3)#Ss7lAK zS0daIf;xEl$MPLg(Br0LM-o$OpW$N8WN0G|?096u4xvJD;d&$l3m(rBRB7~G?XYWc~lrgFaE)))$X%oo8@yRn_+JU9u$eF01D4e7w4H= zdG3BY$#E@{XP+kSHTXu2Q^T6>kQ@pV24kW)5g)xTOQ+C;dYMA9q~6TIL9^i&Pj;*q z8#iOaA!Tqr5nQF=hoR%^ouFRyxioa;ZJSaGM86n9^V4au>hv)H&yJ91B@w8Uf6#!k zeBP(OKsU|an!ry z8sC?W=xjgL5$ZqJ%CL(Q=C)aVL1E1)!O=nWRo(jFFHZ%ZlAI9PnA0?4y?oNRPrl20v%LZv8Yz~Fg5Cr7+7tHU&BC$KEr-N_(}>Jlim17N z#REP&OoSMR`YtEA@RdcvF%EaR%4Rm%b}cRa0jmT7QWd_K1D6n9XvqqNPqq!i;5f?B zJyZQ4LYF*<&~~;WZ%FtcqDM~l*|UcNWveW%z7#_TbC04AYScdqI-pek2s|rjY{=%S z&+mV93x4NrsRe*RgLrbahGvgjX@zS-Jv$1VLZ6Jsob2)35{}?&D#CF*_A&cpHAUk+ zMQ6PB;R1%4w6y_Uz$O=&LPYfrW)g^rUPAEi43ptkdaRlyx6;sT$GeKIJso%j_Cx1UlketN-8smHPib{*RphzY$o1#&9QGy1^gW^DN*! zDVgn1>X;n8%|sm-ileLl$+I=Dvq7p)E#N7VuUV#l6;+e14S2e_&UjfzBsiMEx zr;v4t>52ucJYHLgdzHT=sS!2JWRt+^dpnIyhsff>bwybB4PRZ?+USmibj660OT*B2 zMd?>TJQ0$o6T;T*X)qlRqPEf&|2drr@RS0on=P~q9p{vELu9}vFw`aQ2pdR zoy;l`A0xL&JdSDfzHy>mT<%35jfo_Z?{58F>5t5m+>09@QhO?IEI#VhdqpY;Fd1A= zcE6%IEjg=9OTcRHj;I*16X-KW!6{yrDv;?4k*UFpHke3Of2zjl8{E%oW9p@P=; z;p2IOCA{w4&-&U~>3mWQuae4NFlY_$os=f+mUz62-7s`2EBSIatCWF!?FmuE*~+5Y z=+@Ko!DrRAxcRgnt5+V?(zXlde#m2CPyr56)kw?a=Pv=fl&QO{piS!R@1N^CuCk%t zv#Rpf#Brv6?XN3LqHy%Z@SrZPu*Aq*&TcDW+p<;6$AC2*Oe+e?V!I~eR9^s}5o4}sHH$r!TyWlLAD^d`r3 zt>-T<#ND;)kMIYUnQXQUZqYXTPE|`ixbj<@QB~VeiAeY}=70GA|NIXKIsbDLT<`vm zpbT(-usQi8T}Pb6b|&jrwmYB69R%}J$xga7gfN|C zf8Y(MPE(jDo02Al-O9>U!CQK%9E_H)OMn(UvSsxeV6C75;{WDRitDPqlz*CP$i%4T z#eF=U^gy(TECandT$Dp_pK(xcj>redo-HqrIWiE$(8Z^{O?&%%hpHY6tRu;*o$}_` zEMA>jdPCbxO-XQ-T}|;yW^j>z)urgPYrYXXLByU~yvxQWOm{_9r`8iHb0F!hfn#}-#~BQnrq^p{ID#S;ldgO>WJ`S43querf z^?KqnY6m?ZFu)o#eH0^}#ms)*Uu%r2!e#MqV(u_o5B_Fy_c=#Sk*9UT3#RDIJ|zfU z;dHK;%LO|VMivgG_#-e3PdE3#z1pv7=xZxJHq)lyaMK)(x@dks!ybnYil;4flrh4K z{@4r*8N;s}iwz$hJ$Fp@bW?dxs)o~t`PhwcraQLb$5G+PqNa@cz({!L4=>c~)&I{v z@L$qD^8f#hkR#xHXI%ct*}z^AU3cM2_VWXdR{<9<>$Zya6uR8gpFY+sv9WgzrhAc! zkLQuo?C=fR>Li?LKyQ3R%UqJx`E7GzDXtuXL1804-m}|m%pIRPTIqz6O=b%y%$C@e zdn-%7t;?hi&%!AuZljy<@{=o?pF(L;s^pu|kQXha<#zs~ z(#TI#$u}Ki0j-FYfK(RM=XJD4)ZDGDcabH%r-FS*hZ(Rd#`Pij=PZ1(L zI27V6DyuJIds1il()Y8F5}4rVE2cqDPr4z=U9%TnJ6o>&wAggH5_kp z^=Rz>pgN@;UB-7KGl`hGhfx7F%Y|n&k-6V(XM%_ecf{-c z0pu}>{r?AZ z6Z!w8`XAZ42_{{{2D=HDF&lHUN*|B?QGjq~6CTK@%**MI)O+(iEWng1j2|G80q z`v0&-kK6B#1j%pUn)m%KeGSlH_kV&CoTZ#ok6NZZGd$ zXw64rM)bA_fxE)H9 z>ZTqgz5SV-b1iZ>(H<4}K*cYv-A;g9vP8V7=>X4a>sj_1PJ?gbV@6#ra2Z;gEgWV3 z;B!0G;-qfe6QXrjaWV{RGnGUg($GV~m8~E7?l|iv`s=&J_@gA7s{6w#?Rt+81IF_Z z!ZDI;nWpK1aH!0}`y$@(6{vJ1UWoT9-$gFAm?DEZ^`W}Vb55N}VDvNG&Cm|$t#0c! z2{%nVF&Uu>UA#R2YXn1DU*q-8|NgrEk@tV!46p`Sg+02(4Zh}a02%of^`Xw5!ZSyG zLA&paLv;MdC*I})I^XilABX+W3^G$6kJn=Dj=HPUEX@{dPTsex7Vw#Or@GvPJ<fLdAe4mfhba=J;dGXg}&lR+M={oF9rnQz+GA?{O;V|wHwwG@#cv!s-!!H_IdxT<5_?&=JOBCUTe=@*Shv^@BMXP%)Y;ySILiS;TrG5uablq z;4w1pr>nQm$vKOkhYa%d4-ZczU}I02qRynH+}{gm3A7&Y_2g7Co;A&U)sC#VwYUI1 z6->)68yl#HC7K!(jxLSF`NecMjSR6(O zCXfV(c8xPTnu?nWK}WB>m66Cx9w83O4SeqHqK`?R)~@KLZ;9Gp0j{igpHH=KE!Fc( z;=S=FeZ3WMo9OJeCurgxo=J;mC>_kMdqm8DC0LrV)ua4YPdFt7J;8B~lx(OyfP0?6 zbm0)u+|eA@-Y&BekQ3Qe{M{f9bggNf;epGhPQ8Y~v43zO5=CXj^DMUsQ-SKe*Wa zpWhYy-_`%``(Hrx{LiHTTzHfcN$d@^ScmFvFYWPtMXC7R!0@?a5tYOHGKa6)Irn-u z6Uj|mZ1ZD5S`~GNy8!ybsJD6C*wH}iNT6U4ebFXP5aQ0QDI*@8tQui2@ASa#$E^<} zmcw=}Gx3K%GT_z`ot~~@rE2-tr?Yrx&(^_?U?%*yAhjFqKI|M zR#jczKzV;E@_cmv58eN}IRE#n{{uqz|NdewBmaN${SSz~|8Y6|?*D<7_K;T{37RY5 zV()P_01C7df|RU?}Ti;p}Foc`s23K(XO#lGA*jipv0lAKTQr`m;0cWGB{aPp8pm7rn0$%`G<{mL*Q=?j}^KQekxlgy)M!Opu$ zQh}A~NI|%9NjPPG&Mb%78gP;}TiKfk;};CgRlwY3JDafJ;*C)PS29R+6i_0UzY$G_ zm$=UgY!Rj~@W&0kg0OlDimg%&)j{vmDMR>&tVc9;c57ykD@_9ylK06qV6DTq&U`RG)meCo9L+!~a}((C+Dyys!-Zg{KYU*6~HEO)>Nco(ayMDlJg;v9*V9`RU{ z17k&4q`uhcAu7>ln28d$#<{Nx4Hmdn~1@9n&Oz<@$n!Nl4%cdPBU64hPk{vA4hrb z9JM5cOKSHOOg*8^s)QM>hsx9dnyH-B)fA~803)P?dxZxxXW7Hae z&oIe@Ma^ll;7;y1Nvm*~o88%xEZLf(DD_RByQuiJ)vrk+F}3(XaT7aYM6Wd|nM}dy zMf>-WKNlIKh2E{0v4DePo`*NlQ?y3P6~=|N3WF<<^OT*NzB25Ds#4IJX%-{QN5b|& zPmij^V;^+K#Q^NXxY!WsZ)2H8>ccd%D-&aVSeF z*-4nuT}NVNg%ckYsyt5zx>xo{4oBLr6he#qGgL(Q6mT!v|;Oz4@nFf3j_@dp5{jcOV-wiTOisN zPN$W`iQJB(7XJBhiakDY>lx8a<(>CG>&=44eW`sk|wQT8C2Iq>jqgYgGZ*`Kwjvct{Ax#<0!tvIqo z6f`LTLuUi`$#u(df#w8&LS?Iat=1|V$ z(nuz`ICL)E5Deu0+&5cjQgejv|DpST7x(=a^q5abh#%em`+K>J{QpV+4?X{PN&N2r z(U4tNxavsITmfkPN9+H^&3^!Y&3_A__5bhXGV+hs|G${u`tSGiIQXg~L30J5^&hSO z7q|Z{@N55@53T=yFPD-3Khl5n{Qu?fTmLsx(j=}r5;Ru;TL01de{ueg@0b4rq4odo zpv?rhvljxL30J5^&hSO7dQX;%m0DU{{JuLGV=c?{vU{* z|GON1>%Rx%VBA$lg60ai*!maz_c#8Z`bWS2eTC?8XVt613ax9dVbfHAX|R&8&bB^c zc%JNQzS0TbkDGIi9wIGfW8vg^xWti~tYT3)%MMnuYbq?yWhXX^@JKbJRxe=_)=GFx z)?u%z^?baPKYv-bAPN#pp;cgB-6pP$zqR3K*gC=Em!;=B{t7&oA4t-^N`4ftJ%=*3 zRO%tUM{l9*w%SZ*!T)W9%K z5ry+?>0m>WTHypo50UQ{DQOiLD9IZd5sR_vHAM9T!a5qvxD)Bxyb+%EN*W`p;5~2N zB;5{SnGc`I<+Mjbwk7*mli-zZy7bd!XM*jJ?@w33bcl@idx_i5k$2u1gZhkj*sQzt zLJK~fwN8daciacXl@NHsJi%;|OSD=kCS&~EJp#0${3o%+L)98wRJ7wP1HBS5z3a+V zMUJBe1iH;6A_ZKZ@V<8|q4+RiG&Azy%cTM#)0=;()vYaH-m`~UyL{U7xGk4vGQ^c-ROf>bJWGl-7=oGTie zfSqo5mk5wrFlPgXw!3^dv|-ZOj`?S+x1wbsOpmO;(l#iQFN25@w3PiQlihJ#JbVQIwhDz!)SXCedHqR`;{^4DgPcOb zC1IYykYVm)pRDbVzzw-K& zWwN19_Ki(lGKZrYh5FSH@>Y^~Ipdp(;tM0UK<{~$W>GV}&@WR~>&ExP4oqq34&O{0 z10wIQNVcB1Seob?#Pe}>g4h+y^tYnsU)#6I2m!o9o~(!!X!U9zy)`MMj8NtD-~2i+W-H(Tt@!? z#Q&q;|GOl9`+u2m4y>z=1kDw2vHQOQ|Nh1w`G0i%Ukbh;+xl!A4rKVfKzK`wKhfBU zdhg*1yt>*&^|}>J_vW@Eg_8b9SgLcevF}EL0|8UgGDGo=jz&#s-}%?C%(vvOktuLf zF}0X;noW^bCG3r0y?nbbEu-rl>4Ex$Y_jigg z$Mez*wPZZ6>%!eXb!*Z$JR|HQQ@gOp;6!O1%JKkS`#SC!2ghv^RK?iT45knXNc zr-XolG;Dg)Dr`ZdyBnlYI-~@N4Vy+v8k9!rocHy70?z{cV_%W5ftsL`U9q$-H#FOW;9F7Oszs}@)rPxpsMEiE@|NI~O zA9nxmPKdxcH-3aaaeCD4Z=;m@;THxsU%Jq6`%;CL`^#*ScEQ04X2XHeFqhB6$MFGc zfUoUHgcd}@kVbLYIEvi0W|C#s%h8sp#{R^T%mVA+f`k;^lCc}cHk`a+0p{nB1sxoH zHq;Tz{TFWgxh-bXCZ1?1Y4Gv#S-`5H)wx9glgX-`t^ zhrf!#o5sC*+{1#|lMyD%uhC|tB~A0)M2eu){UyJ@?kn<4E~8c*YC~1#I-)2_(6vYI zgMC(W{~vz&!EZ1TjOxecJ_x-WT&a(wjy~9Ab&~Bk?2EzbqEu0NeeAbK)`JMgPuSZ5 zy2!5f4b40dB>vS)D17=#p?u(dR^69WJ%I`ta&cM;i}fzUYhBk0@b35uNL(O@*@`G3 z*J`M4GnzffU|g_Q31co&O)s<=hoWe;16}=PZOBQj9N@T^pJVV+qn{FU!Zjzxhu{M^ zPKI25FhN-GS*mfg;a)lV7h(V5#>fY8oeiCmL0>)h7?FH=i3n)wx_EfQvv>tmx=0+) zT`USZq?c9K4B#DxgBWIK7*e9njrWujHrUt}Z+OWqJ~$~q$uG0%9WV4v5wQ&~U6|sL zF^K5Sc}kmbNqyv^|6qa{@!?I=7r56A*$j`!T0ToRkaC(?hqy5kJng2bq??k+nyIMa zzk=m|SpL7o@Be@Ep8)Lq|1ah)<{y^-|6+dUf5lj4vilwhjC%m)f0+Mo@BEkhpZYI? zF#rF(+{OI=k^f=$|L=z1{%>1yX}Rx_z_rlwvGaY&q*Z!{3On>#(mT>1h+?V#F-E7O#N zprJ>Na^NW8(WSrfF;DT@+t~YdD)wE@T(AvgSJaf2daT5-e^k0vc|Iq1Tqbls0GN^M z9CoRFv||ynLgGj&To92o?Kj1qGe4Oh2zyWvz4xKUh$ezy zgfbYAM}>+tXB=o;aY`HA0-@Z|UY-Sdc6}Q3e!%g*F;xv;sy&)AWg5`x${u@|`~XgC zNf_<|z*w&AaKp|f`lOx_L9PRtc>obS&utV74Am@C9y@L9hbDJOh$7C_`2`Z}7_hn? z;PY&4V~Q3RqZUiU4S?T5)?L|GKa!usIODqzepM~i*O(2UM*$!d9Tcx|K5ErR1si|a zmFr|+6T#Nscqdw05MQ?_3qK4>c9iJioFiQ`Flk6C8hKPlngE!io4;Ay3Hb1Hi_gZQ z#37-zko%y|=lXW{|DW{#{@?pQ*!%xG0mJOOr7!g6kq9d&3d5jHeNBnI-y!d9daN=a zj{hnuN7H)xGAI$PPIx%yKqjqnGRGL5N@>k?t%86)Zw^t-{$*>E(^hfeHfkM^=~Yne zgvFslYe;Leke&SKiebPD$}V^NLeoHAET5LuA#4I4D+lA9av4i^VrA|_KF%rt+ou>G0ll;^;6api#+K)tw!d;om%*sGDw8&< z# zz&2l53H{hj$+t^49NJVT$?Q_B^`~1lA#wtEl-Rl=0{xK`g{i7;9$9z;OUMEBot;6y^EdSr$_n-ft^IvXQ{{MTqi~0YP{Ld`}^Z%XjJO2mO zd5+%qNMPIpF#p5+e|zV@|9t-iVE+G$xr_P#6aT~R|J@P4{eK~%@%_F>0^=UI-S_|h zNBuuo|F1j2iTw>Snr((C;OY%YA_;Cu;g$KvqqaRZ=iU>8Z_}5|3#;z_>H80xXGe~| z6e^5I=R`}>ayFp%S^6oL@gG;0QVzGDD(xn|Gao}r(K@T16+Rm8lisEoPnODz>owoUJnuYq)ABOIg}ZNmv+pMMA;xtbN;RXqNFtM&o=r?F zn#%zFB*0&)c>pa-fVucJZ4FRT=Sn=&zBjl)7n-w>*wYI+T~i`3NkG;#Bdl&ljUax- z-rs+Lt{q)nmHK+sS8udEtt}E5_1W}PDMMxAr#pHPpI&9DG~W9%V4Kl&mLKv{A4jo0 z7FQ0LitI2O(HxB6)2n)QrB+qv?5moVNw4_p?4fF=Sjr>d9>lk4K z$$ifKLs!qOT7Dd%fcpc;&D7I8x3I$}yk_SoNVcrVFsqoDl6_mEmt}V~t}j2DQL52l zxoIxR($9RJ30_%w6QVi9Py(bN0;_GfIc`WjbEY*lLLx}$E!3J;#Cc$&hlH>5W`>4P zV9{#bg%NO;zijK`2SwOtPW=r33T{m$*pfQ&Q=JxqQG7{JlQ1jY2ISKjz8X3Ra>O7i z4Zkuwc5qpN70V^otv2*BKUfOC>IhW7n8B4ufK{>rB?^tBTN7{6`dAeosckCFr`9et zl6=ek|NdQLfAs$!mjCYrz$UR;!7fiGq(>5O9)(XK6}UY)=tqTUtHZXMBSwJdr1Fy% zPF4@s10Oxe_S3lqace7qRg(`NPU!^|hGzFx=V|bvu>W^_0FDr(-nNwZz(WOPzNIrgge$m(Qb0pYzL?yD$NHF6+z zi>qV!?`GkQ{;Cq~Wj1j7b#K>$Hj*3sB}D>lM2s92jQ*8LECkh!@-G z?%H0?Pya&?ua4;&+pV8|M#SS8&NX|_v2BV|y=1~(0Id=`ufGihbZ*C$uMr{ay(?AY z?56PRaRsmnb?{tmS<=vDK|-*GykhPX#QG5i6}JB5>pb67A5lU?JcXQ$dN1SQPJRoE zZ#yBs)`|&s1H+sP^8g#P>K%y)+-zB3EWWhy0i`F>5BB*L&n6KBwSo|uoaB+5O^467 z_{Drj)yR07tyVWYYIIb+GlzQ+O%o2C z-XDcnTAY--bB?!M|Lfm1_DBAQ-T%20hLt>bBrbQ@E}HV7oS}S2AS0SQ=MDy}4q|Bh z5YU`b-Fy`f!>OKKCk7Q?Uez7pr=d{XY?2X1%=#L zl#?@1sH_#sdrFeocK#vNulXGpO}_efI@Of5Vn(sq;^F5-(!Ns z))yT!uY0Who=$!xEZ(&p9R9q|l1tXT5W%W$4)kJBFZG_(i?B?@U37(mchM96(erWT zT0!oIK~vr$vT*^T35D~z?Sl!SPgVE>TMUru_BE1Mtce9W=<+|OytpH=kChM1a_~nj zk%CK}RrSUF%#}T(pcH{sMIB*A_$N1#*vQ~>_SH1qr;}@o2;_)@;z4fzL*1pLQ|g{trTSLV?uO` zSLfm=XdaGh3SOt9KJ0(we&r45=M%+=Bz_S1S|}RdHv&9gBc{*-_lwg?JOS7BkkED* z)S!QmGD()CKju=!?g@BLWlF7%1f@JfSBBSi9q=ENSn&z^rA=)m?SY&g^{|Z3*vZ`y zeQOL8q-c}Y(pk0;)$10d;8@cer}CD79()k6P)VIbe!!Y8qTBHqGe<>n`b0P;JWp)O zZ5z&CPLl#~9pR;}p~B-<?`8O*&igb{I6wA?B~(Kz>ms2hrt+Cr8<+Q` z>@2dRPPp-MlswL;g0MfMdnmtt3lhgu#|v>G8o)wwMLZ|al!2K+$>OkViH@(azvsc; zsJ%L)o zcEyuIx!EZMBQp_rI82RuR+xEqe(CuzYn_Mn(mZ3}dw!BH58oxnq8dt{z0;SqfxSTC zzr@zc@0<&nt|+$;G1mXA?CG6$aU9g=0_E+k{ZN+gyT$o*HK0?#xbwi0R%{@}@d)XP ziQnbMzA>GdaS|=lyPG&U$P^`|-!of?3NVv&Hm(M2%D|bTe-ms!_ANK(vb^fR0+`Y2 zKZ|1G>b1Jo!DOg7xxUjQl!tZx6^H(($w%&CR3q7p@Q507Mx7UZ=CT*yu|W>fm|n?Q z{$mw%Qiyiw2i(Y#w5#S_MCgkpP{y-KzIvZiw_&6T@c=qF6=Exrm@cLe6T2>gX@!;A z+(e?*P+dpMmgvXNJqE{xg)1p>_?=iLv{{^YLg$vf0{_F#S#U+!Zef^iq+y0sKtdRD zknWZ)6_F05Yv``Q0SS?oZbTHMTT(g&MjE8M8$9Rx{0E)|y4IQN54_J>`<=Ppd*A!n zQnhO(CnL!+SuD(p=~uEqD_71G9J_-?%2x>)^>I}@F@oO}EP)`$2#bQ^^-*hF%D5i9 zhZmw{mNn{%fS{z}DVfR20yV9OXB4a_{dCGx8U!C-eUfWp2s;PrscFppDE4`2`LWj1 zL4xxwa9!IUZTZ+y9)@4u3nz6FZaQ@owY((uGy?$*;l=nFi4;cuKfOdEwMVx?T%=Vo z`31e&qjY>>J=qKWG&%KVuHVqn_q6d3`Fsxm6tu(pe7GnzmFYbEA zf{=YMFUZ_5`++Z{TA~$5^)XnGy&JyJTh{CS96=u?0p6t>QFt{h`)SCXCnp1wxOwM7K(ns}9EP0En2oP?!Nzpe5NhO05TQ}<58hd|nCF?K?I18FD$byN{FAW>vrcsGg` z*|y$5#;7=3buA)%lX9#ndl4|Sqhzh;KL1(hYi_~LEF}RB`UCzJ1x>#D({JFc?%kn`abO}5 z_7jss=p@^eu7|j*Zb@lr$wd*SkM#R&ptWQ{Cl-B!rWj*sKpiY{8du^wX)w>gg>@5v z%E+42a$~L0F;+t**!i9$eZ5`8SGy&rJPOU}aqsH44Y**tTsu=~x}x z9ox?C*ha^;Z5tiiw(aac@ALiVobSF?t$i_8?V7V{jIjjcl}y<9WBKKNRh6@hKbUPE z6l@)eguAfPTz{w;S=Mc+I!(;@(V|6ooSuwJqPA?bDkcUrP~W&`-;@V8CI}GnB{c!E zue1uQtfwSpnf5OOO2Fya!F;b)*F;(0y#JzC{Hp0QcE7csy+AQaNxp1`?hQN3P94|> z>^A8+1fskGtB5-WGRXtu`#)6e;T@h~TaO7t%FQp$i{a`|lBCVDt?fx@LO@g9C@AsE zUf;*Kzzui0u>jcrKDtV)4Zh{k8|2$vdSLgam|s z&EU|sL&Egqy|Yb*KTq1AN7GdCsIY23X?aA+vqIM3PYRXUS)-0VV&q9X!7WUmO;-to zNaEJX$2#vDFV}Qs?L@MJJ7-JLdJX2xLu2x+EpsP^4~(YUhj&+4uvd|xue2rv&}ySa zFx>)RjG*bwUee|p`Qr!TT}NZ+SZeA|b=Rl|eC;Ur}6QzodEK5?Jm(9hcz zO=(dSQ8Bc8RtFblJqT+fm!{`6w|{TAXqW4yxC%OG6HA(*cXFJ1hNF#rM+PQr_KOX~; z--YZ#q1^6G*t?+o@4hE}rPxU_qHy8pY*lPcBMccbei)tzQ-l-->m|214(t_RbJ&!X zBG=@BY}IGtt=a88N)+Zq4monML-Ih)nCH2?MY9IRHlhbzi_EwiVSx4(J*_9xzunBv zu~+X?Ee#jt3sA zq?4`L!MDwOcm)lCsGPLSdR)`|Ch@nJ2Vt13+}kNNtmDjoe?j4-7TOWw+GjzN*Lb`U z@M`Bn(gkT&Wz}^|aU%yo3b|BQW1-a#ne3#dVi*yAwZF{*4qN@bS>^a zvtD(}p#;tjY4we|aZM_ohp*oAdn61e<6fCJare7cky_U;+1d758^08qMcsL?yp9Zdf`u9O$!BLVrcm$#Mu7bl2uVflAW z%LhlizGDMqit-P|Ry|Y0p(Me9I~OV}yeR>REs^6%mH^|1siWCndQ7%v1}S!nf{j>Q z=Rg493yB5j%oj8~p^~x=&koQl{DGXx9iCH>Gia~sxz&yn&N_OEOHY}=(-e5R>-b<| zo!%gtflrEO>cv@MBygf2a{b|Tr3`y6(oWmNEU-28$yL*hnIZs{?zopR=4d$hifyNpF?e~l*R4)J^r(mr@1a<*evJEdp5B1WUM zpEVk2+vWHNU3c>SM5?*-8b-P+)BU!cobE1^Uux|Ld365K9r1{Z=Ivqe&YqNAXKs`+ zQ(kEiKOC^!F>GvRN&)seQ@I)VWLSHBWx+?=pchsCZ{4pHtFX&Z{tP~nfQ&SF`iE$g z1{R5;Cc0Tm8*1Q`v>PvDO@7t7w6oOT>(3$s7gQhwt4_kdW|aw}IAw-c*lnGiyrNmf zC3F|?-(-FNWGvOG0e6Fz&lM+Y_KO!oy`lDGfff&mOYg8cESQ61vdq}ITO=JZHPi@< z8$`c});qSped4b9o^;+~U%q-UG{=>TS^f%MY=>sWA(sz9A!H9K49{jLG(d*ypFEsD z)=M3tO3M_OKy^K0I5g-cda|N8gxiwy#gvcGl5# z--r!YFODmhX6W9#n=+Oev1Jw&VT88Ogvp+j`X*~ul}Y9#gjBr8dY?Q(*rO?*d+Tr2 zfGvw~5%eePITV{xVQ@g$=Q6iLyk+NVfMl*rGQqy}S1kiiM87D(z(Wzvrxm%;Ri%EP zk;JR-YBKz5sIls9p+}XTh-*5b|0J4ga|fo+cxasO+FgtD=31+{v2FkKU5|cw%W*LT zwoia(yNOPyav%aY2)Wy^!nxX;Jf^RKE>%F+Y~1*iFh2B#gtYsXZ5}29N~30lq8g;8 zWraMNaKuR9q&s1vac~pa8zAg!d-AXccmaSWd-~SG+>rQ0`RwUQHTV?z=2T_@-pFCT zKwv&4Pk@uJLfL?)9$4as?|~2J8IbR7I=c0{tbEJ?$w|ltzjA<*$ZxSTz>En%+c)!J z=lhZE3EKr=%L7q7a1*iTp{HB8`%N(iqL1iXofH5>dKKDx2dXpwtSM+v zKW0rVE7)hMpF0yD3+hieTSGR+v^Y90VU>j!wOHB955}oks!nj1 z`twLysZq9bd4okbT3mE$ky9{uj0_QE_fok|(v&^*<9^VBgl7p>Dst?owno4lu9YOa zZp;cj!Pu$nwKO4@x79|^Rx(?JzlF7mGEsSYue^S3@|yyI^xlK~4E$DE>7`|67q@fj zoIMT^poQ{vN3r5g=4)0L&>vq)VN|bvPaYqDH`e$JVg7r0Qa+py}Gue z{45Flg(X4}6r?@Moctf+FAHT?cMA8x70Z2A$hnJEZ;w>!@K8Ma*^ux@S( zmEn}MLbv5>8cK_&vnK=ay{R|((A;OWT~>+c17xKAZ#)oQk zx~?xTFYrgjBOXaK;mWaCdI${_uXT75uq6`ia|w?-=-Q@&%OVn)ggoo)8xAuuwyz6DGdI3pqzHFnFsf?=><|cl&8e z&dz?BMu0*}bV&8#wCr`TvxZNqWyauTv{Ub|O=%_Yz2ESpc1b=6PaJfSV0cSPzECB9 zabIt~ypN-%#&SETNoFU7nl_rkOFZPoSw~&t`i2PBp9F>}5`PHw1Gja*Cf*HzBXnUm z-#2qDN`U`$yRU~pmi9Qe|9^}7^bF*C7wQXp2ewGL-|T7?Cw@!?n9SWipnD)M@tRKQ z_Z1i)SP(i;&MqNP_Z|j{PPrpF!qr%;Zjp`^p!19nybS+PN&vf?fI*cSHVLg3kNNZ> zmWHU$S4|GT?Z)eB)^{UYE>A9*mfk_?OD7n>=V{7i;W-K4O;oR^SlfGZ2j3VT(;yOj zDdy!@f1LiK;4au;#%ci&(Ki%K$Vi*ngNpKfBzl@^ryIA38~{T_Bq64eDh*-}bAgYR zk`5)_l;UxL9c6B0XLX5cl#-TOh73tbswOGqyS{rvXob6p_3ZBLSYEpp?CE-5J#-%Ct}j#*HgM9U7w;*IXsoT4-7?WQT;5V9NHxXv4`35pDP)0M{bIyYO-+k7GjJeJwXOD zznRHRS#=R@Z|yC^RoKQ00o%K*o*GjLDQt3ZiBw|;+QP5xrV-)v2n!3MT5W>ReS&JQrZhqKAl;vQunZ1~uH`*+|wANIe z@@#UO3CqpwT4Eii06q|D$#lZ(+u-$|PbD<0K}jYOkHttdIl%3WrGD~Mys1*VO2PCC z=$~Ms(e8l^byrZi5i)X^S)LyMeV3py1#x$vIdkG=+Fenx4DR_!A(l!75=v*~5A|=A zHD<_=bY)asSj0fV(3+^NXEYaV$o|}26t3RU(8Qi_F6PY;ce86!N|1oIe3&Z0dB9ff za){R=&8!VMQ~}yXffY1r!refF=d?IR(CzC)@;B}BH;b$!x(AW1M3YxRb??U?$kq^V zd+bRh^as2!$%nzJo$Fq!(v(Hiesq(i6q!zf2qpIp{g1WVnPAP9_Lf5sPEA|rekuSW z<56Uo1t^6Vn45RB=s{IN1ciq1-~w)-MKOH5O%yGB`|IE1ES}<6!VNYb!h;U_(;z8b zAHnOjA$jj_eL0-@gT@QhxDHOi#WQ=&$saDoVa&<%*CH74V8I^QxZg`+PkL zmaA3q>oH!65-M{%0S(FI^xr<)$@$>`CA@6b!Ty6B2nVrJ+f`9)*5>j1d{F@=Gz@xM zeva^(7T_~rQ0VUCI(i40_@6z>LM?-g6fR!TEJcv>&)-~$HfM~5)_iyfP`+@d>@7vy zMAXt=F(aT1pwQJL!rBEHIaBn`In%8+Q}Qm<=>qI@b@f*$bY*DRbSUPTkeXCYtybps zZ=a*l8IMF?jS07h+XB^9xOE~pbM5Ykq|U3b3B%hJ$>(%( zkD3a}Q$vPzp(KEQpsmfb0{4pzUr5P8 zOS$ob@4idv%Flh+ozXfcJO||QuSr{=iqYRcgmmg1oQSk*nu%Fho6KBe zMiRJBP>_2t=|ISY7_o-oP4;)fXBxf90~yAuH&()$7M{AW%Z;@yxRPbP6DO~h3oj!V zK-5c$%4ef5w3~N_(uwk4R48|=b=st*W1WWv5e+GFG5Td{Bvx3xo0n?dGno^W)Z0?CXr4TQuV(ds&b?HWCoEa)&7psmln0e~cS)MxNhi&u6>6qbVT{_tzsJHS7#Ib_Gium!f0@sMgwDO0Z}=&gYxOg-wrLv= zU7&^33Vc#ZDf^$F)sUo0;G9Gj)#=)R+f>=c;DqvA|FyH#8o9ox`4&X&&jt_4Vy% zUR_?)PWWr|K)l>5Q7uAJoUwfnf3qy>?<6XvR#67~AE9aPZH<{_xCfz-dQP~FsM z2V`~p#I$#K1Ql?-Y)_$d^nbodYa=Fh!s$y;&-v0f)8FZmqHS);mb(i>V-u+~_>+Nv%mYl>q9Ztgb)V zcJ0H0ktd%qfE%)c-V8-uvjpk@nOl+f1ieD0LK(|gh}pj;uv7uHP=gfl6?~Z11a+4r zZpQ0oXBFKg*ws(Gf5p{rAEUx-;@2-WsKhl(2rj37v7eBv%og|Y$0O6m-c#C}Qti$W zGtY-Qc#>usiQF*&wmrHTLR0Ep?wBsFp8wR`tasLnbG#%y&bdCCZCO`P>Ah8=Q^0d5 zh#3X2zY~Pk{9u(debz~k8oyti7)7dn{v!GUewt{61CMn_UwU9&oLSHUoh6dyisf*t z&PLX8=a0v3MkA(4g&=K=2@)3FK?X9wyD=>Gj}~oh{d}l-m-tKc)rz&^ZlyWoCfm<~ zDb(Z_bLG_O6FbPABQhE#|NPnWc|GV%ABOro8v8q>Dnh&6ZCgbM<}T^lho3myvC#C^ zDE{y<*&MvSNZFwWo>OGCI?hni@i};F8l<9@uH6qu zLs)k0wiaYR)%77hM$Qv6hE?~5O(+vlxBqU zSO5uE(Y@@yU6ojaI*~db(t^lvjKsuzmuWdkT z#*I+@7clfy2WT=!BVxCXO5OsZ42*vR_?`<*=RD@Xl>vc_T)RNJ)9+4ad-6sDSO((r z)?y2rpaW6{l=|%H{>%}&H|QLF?fG9apxQ$Y+A6RI=j#kWrl+!Fs}pd5*}C-sJbMRh zjD7$mq{@A1av0eAvMEIs=Bo6HPB`8X9>I85_21}k2VMO#kwL#*oadKs8jd2IsS;!a z{g7+AIZ=KvdByR@&ceeT7)CeS3$8e$dls^nyx~>s2T0x+b%@8uFL^I^#SO1%H-B}$ zq5q5Wqjl{|F~&+gTZBxM_Pw&_f?ROM@ulMC zsH5KC!!1$l4Zr^IVzRmMvrtk?aLnIs?iYbIuzjoe6$|XOsgeRKXnJuczj-!U60a2A z!jg~zOt21QQA}W)CQ@(%e^DGi#Z^_|10H0I&5|(7!FjB7Ik3sy_}I!hQMfD`^9p5E zxhI!ouUDbWaE7L4w$LSX`PP%K2>WN@VRWHWU6Jfw*7e0xijQ-`F2+nsfr_z%`%A^b zQL}%90|m*msl*$t-su8j<9qkD)!d9K;WS*?M|e?mTyfQ1WJMKy~Nsh zjgS3Dua&2by()f;1b|#p>MY+g1vQSy#g>5vMS;agWMRJ%`nThClidZxh)8E15>bWh zq5mAhkF~xj1hv`{*#4#Bs)Yu}vmf3m7l?;u6PbOKlf@{gjfW{FbMimt1a;Rl0u|$5 zg;;?--5;_0ieDpv|A0U}yMCa?`2Qfs_Av?cwz|pt4_!N6_uuQ`zWcPVK<9&ZBCdYZ zq1-{=f2`x)FL+iE@o)GdT2FW9nT z1wYHaoz8q|Pk|_vlA^?o3_SoE8%6<$TPC8 zZw(y#+DjWZ+ykl*QY%nYzZh^`NLISn)j)&hPTX#qmrE%SPr< zBHVK zm~g%93YPob?~alaXB9YydR!^O_ikAtFzx}{PhxIpHd8nMNF+WU;Zu64 zUwWav=FX&JkS&Y8R)?8v7rOQDCZjB0d6s4zbozfaF#YOwC$X~+@V6!92wB_fRotk1 zl)AoL9Fwd;C)C}|S34g3*eMk%;3^*%OtmwFVTR*_4bKa@#jEHku)b0%p_7~ojN99@ zW?ocI+H^z34JjV%$bv7d_E^E9qWkG{Z>M?s9oOMhw|=j=h1lUZ>wMt9wVTPx$FF>% z{<@qFb>-Rc|9!&))LuvMF-j%;b4J;Pt|I)qf3`Z=* z)DL8&y|?6QeOz{CZ8zyiTT?DV_{C_@6X5ri5`L+=&R31iufv#9vGjR%20bd`p4-!^8{pMSABa-ho!d{s!mbn;s} z&c0|Dq6C=3gV%%Q_os0aFr;fvYhtr_xpIh5r~m%X(Ddt*h;RNC`UqG6NwS(QgwL9? zaMQG)o5GxG#A(GlJ#~aLC$+vke)biZyXgHX5+^`7YJlJ=y*0tV*eO)=jO$0TNaie7 zwQvyEYa4VLS;WU0z8>8v?{v^`aXM@igdal`w_W#|^!&W)gLcaIAcQap`HFSd)T{{|(TPX%7E@YMF0-fYb)HHYa}GKy z^{sqac?$DZzYRyzI6vws__yA{*<^xp-r!adQepX0q>^cD$|QVGozOtA4b~AhMy;}~ zSzU+5deBjeiLGfa(%udGTGT{Vf=%(a#nHDt7^UU<%~Rn@15tz!LfuWyVVqK}uE-xY;6K`fVtmh>!V zE6mK{Z?5QbDVUBelV(zATF93JyPN0e-!sr~dPOz;XysuqGBv~t`!V~`hL!fuc5SLr z<@^aNXM(9dP~TBwCU5&d0Weom!dwko_XKG`lsSrxl%|Hx_e>g@u0ZPJhD z488nOmc|c}6ORsdV47H)s12PY5PmR@8KNjWv7<~T=68N|@ZjX4>1B}P?zX!8K!H0z zCI}*6;3fmFcM{mIp(>X}EhCtxBeqeorCLBgCS4l%N} zkv`EL(FYqCXiPO?ZBr>rBrJhy%>R%O0yVr-w*G>DySQf|o8uB&gO?ks7D zj}?RH%dA8uwH;aKtlV5WyE``wxBLD>?U?>#GC4TqH$C|4b=!FCq0FnWbgv*CGy^m~JP;CAd>RNc{a_F#3x{lo1F`s>%!h-eq>1) z9jaGjVbs-{lEbuZAK-WCT$`+cd~ygEH`i39rLSgoaa@DHg?mlvBByZ{3P6sGibK5e zU4QM6cZai4IJtHoQ5avlozU?%8-EEbGUJaRp%Sm@`L&|{WFOlgTX(M|aHKz!3 zFZ+EvsHSt%m+fuD?NaPBwfK~3Y(L|G5$(3qI?{D^)sYf1K<01alb+dlm6BcAWYr!2 zS_L-E?2@YES$<+jE7Y3rCYTo7{(<9-_;Syw?NB&^_sgGR6Y>#;K_nftZDf5q`^MQ% z*~I{-ke~klNY1ZoOPzRZ%(_4;R=1kag%sAQvXruXbi^aHc{#;#YHNdzvUF~rR8a#+ z<4F;grG)pon}o!NJ+#N08S0TzeMSs$f|GDZuZer8hNMh2i6jRN+0+|S@o8}JfTU}i zuM`pedxexI;fWG^!Y^d%x9K-bJdgE$e)LXp_u8Kty|*tfE~)|;t=$r2!vN}@{k9>BzWas$+Q0|fMbAw4*(0lybeK0_>JL701AkAT2jAoK!o zMzmQLM4kT*dZ<<9=SZ@McD%0-M~`c?Y$&USjZBIdHkK=7c9?dX*Abvv+s(|w`GTRH~|rOW$J8O0p!2uQWv zGGl38rhbS%CO6v&>mx7LR5}2_c8G09>B^G=JO}W!3|LwpmD9JP)4EYUIP`;kcJsKx zqhl%qF&pfq4r(6++G}MKk9vH3*GO%+x>uYw9*ttk=vCLYm4CffgXbjdXUTUx-&b!` z=xW^+_^%;X_7*K0gallymf0C7$WBtM5qr(HR5vbrO*8RXt_F*Jn;|NR4C~a(on+x8 zUqa7fO&r8aZ?R(8vhtcBBWsJ2GAEgqKa2VVLfR#+?5(Qo>1%q(mNi>dPS~R7MR2 zzZF6T=InpQCO+p76Myxz<_-YOBY^(TLMOZIUp?i$bAkT{H(Njf?++%JL7+umH`@g+ z-(ML3giJp~@s~y;ZWW8W*J=*kRa9$rmYX{BCC%KZHT&;`4g5^wG;*FE*h+>Qa>>Fb z^BWJXkU1%>7q&17zS56h|BPJWmk-K3&C5!ms2Y$|7PEsSk0(?xdWMF|DsN%nmbVE# ziXXVUZG1*}cb7?_Gm!s94vbmdP4_Y63d?A#Om88tBcc-#~g^*TRk(+qx zQ;RpLxa2Aj9@Y`mtgYOV`fq%xDk7jJjAg4cQHy-eYzxLK&sm76K-c&65}KY?f*T^Qfv#3=m(D=EGR~=!Z1s?S{q8+NC5fFR=_o;S?<8gfV#I$}mM?8d{zxdRz}zWvW0q#>L7b zzKz1f>ws|`8IDi8F^e<6x68C7zwQs<*(^@yaFgL=zT|#J&{K~kg=!Ww!}wKko9yTN z!?2cHp-U7MByl-Oh?PtxWcOkWo6O=kX7_GDQi9Oqlj`4_uyc>_;aAT;FGOL$>6$NR zWgrIB84wHGc?Q^&XhSV#0PO|&^9B9}@O#}J``|J%{Mv?uBKQ(g`9j+G=8~1X)s!m{ z1pN<(Ai3WT-Z=)q=ik6c>RRA2SRf)WM;!Q_O#VBWh?PD5|CQ|BK=G+3?EiUh+5-{% ziI6t;A`r8R#YavD><{RFwz(g7J&9j6)HA@I-f!#+e*(dr%Yj8O=fGG4u0KGC|6aKL zS@XZ|W9Rewf4>WO{Qv}913{mGvjb&c`?T#neT@_#*gHPwfB@iEG|=HY;<=~yRT9n! zqluyFZy(q8$%>|`mUO4yJz42E{)QdaLmGqtbL+CmZ$&mkiB*Zk+FE+m0ku{W*UFW7 zG^@<6zj)&3x(8TZd;?icuD>+J){Rxk3WMEKS;gj99a;9HN6!pV6>RdZHT@c`8Ne;C zr6h*~zcR|17P(V=3p;T5htr9qI*a*#apf{wPdqNa>gZ&`)PXae@M~(8U8-uGMMW0x z&+CW``Pk!o5=vG7ldBI^9NCE$&rG#r8NazgF~MfM-&$EYJm*_9dE_x*i{ck7cMb?? zx%<@eD*vDk6Y@(`wi_*-Z1z>V(5l}cIf!>DZ7N`v>eY%wD|fUY@6OWuoH?d^(f{tGae{sMCl_bGFCU~uXA_)%jlWQA8wz`@`=(zR@ZaF_~jmvQ7fpt0nCL9 z8E$mwwj45S;lIJBnR$?g~3CTeaW361=#D4Cq(hx4~}Fo{+|lF z{b4=>90Z6O3x5C{MM8o8Z`+VZ>|ctXD7pXrG5;T6Z@(kuC4SbpgM9vC;k>^XmgB1|K{r~ z$|lP)=6KsoJ75xn1LWESNoF zzLpaCI{^)8#Wt7U&3JX|(q5kaQu_F=$G~O{j;@vPp0l9ma9KJ7UrgWFPu6>`DhEM6tc*Oc5^{%77w4XI<IL}!h&e$TH-3$QNdbt#>R## z(k_z59g!%9R6ofy5i{y_@3S=2r3?SWsdto4U<)(amiUfHRp!XjW!0Mihqkmv_@BAC z(Ie69_UCrLFc2sN&wGdT0tBS?d=X_R^1s)AyOu-zSAO^2gaFq76sRZQ$3QL_@OTbj z_(kOPQUNSFNUi_&Z0)6RsCxv&3r%xtd?KX+gE29~bl(cKuo7!kd2u%4JkNCML{>(*x1fZ4xO~l)@(|=e>W7aWd^|YW{XwahH$ig2>W7LN>~DqvIwkd%#Dy3bs+*q_Tcr~`#}6ZQ zGsFh%P4Il=i{HuZDbL#eD^wA956QETvl(-NefXCJ+f4?Ax4?|#d6G-xZ2k7p#Ij(i zS0K$62iNkY^vq`muXfVqPT@<22Lj64T^2>_MQQ-QBYfiWLBl0ZoRccF*x>5+7T@c|K|E< z8Gta5sTO$u`~|)90sJrQ{DbL#&B8YfmAaCHd%iRRE_Eh4jf zs%Dh3l@pK_p?FGOW(Nc>QFC8&@rdG6hrXHA+hAOo z?PXKtnN)k=k^2wq2`hErLus!cJNW`;v6#VtfpBk_S+9St-A~}rw;%Tdx-F?`>n=eQ z)X|nb!5YRYkHCZjzT^0p#45ED|4kVYxaFGzKRnY_sC7dT!vN>Th=TDX2zcOw9?20o z{R!NN*+`4@g5CKh<8zDr;lVN=Ddb*tw;xD#z3a^CU21-XeiomMY%dk5N0)$J&E?S` zW3)zky1irSzFP&UccH zR_p@*n#Tda{qOu*UYrobKv+I-EGfFjYujj40A4lWSmY~a*ool{7&pK_z@AnhaQl7x z=bujikoo`M@Yi?C3i#|(*m_fgJ^z(21*3;(l_c@rT|@)Ut*rCasR3Fr3C$xgeT#I)aS^EExW%)3Pr zG?UUw(4cJM5T4_JzAzUu-HzsQKj)zhb9M3Irqe^kK*cGn|BUVt$W4t za4ANeiK4y(Hw22{C*n_9f7QBBNrGXfby)db!|5FhyJ&T^s!yNx*xxxc_srbzlsNg4 zwUT>R>BYVl|DxJr^N{>ZfvY!!=OlUl`gQ?YpG(P8I|kO=DR!!gOXlthTWdZ9V#F^) zZrhIK8a(jh*M`d$hWD6VEkgqqg=&fT_~C-NWEz1f)$hG{*4-o)`BG5uIHPWlR%<27 z*PHBY_0M&{v2lH`oZi*h65=zzO}m|va@F_KQ?=lqN00Vzq=xroSL)c$VpQH;BXn zt;U{OhkRcT^)**YC2eKqS86g7Qmwe@D$7zEaI4ehKmLe9tJxK^Iqr8VeHB+`(}Iz$ zax@#jd3EBWuc@}!a9l+{42#bSKr1z-CM~J=Z60&zW|XF9d)3B@LD;x*)aQ=HlHB*l6llx$jxY`Cjke0_?mKH6jbyrDkIRo7vRnv;Rf0iGnvNDFqJR zYc2X}P7g5uOqU{ElX_vGiG%Ec?0A+))nW`*ta)Szxj?>>>igG(ZeUVy?WtM5@_UuR zL>sn8!9~-YKqpTzJdByFh*tnw=$OySPsgqONQo)dp!HY!!yC9wPN*B8ZeSh42a;%u z$L~H#A}TbLJ1lvcTokTgvW}*%y^~%ZzRb@|mxnb&u^WD(2&KN^*h!2Z7Eh>=)P_d6 zztyyiHYd~N$)Z9M{qf4Q!z|G*s_G-I{dhGEcUtQ$I_t%CYpFcMZgEBOdg=Fs7TO%E z8bE!!OgQr_1ot^PQCjEQ&|zJcm@hEt$d}4`uTmJLpp;s@b&v+9DhuV}R?1D}V5pB@ zV(ZG5M>qK|0q4hT8P)D%#5dQ^N7fHT`{`^J?Hz(8VT$tCq#_bxWadajfP16F-Du3>U9KL$KmC?Pw3b>hp_e4+xuR_bOIYi$_ z@0;h3>$uqgpVfE}U9kVo(~ge@@!O9Epx+&s-b_63{-g)w7f=!j@MthZT6FMVbz<}Y zZs(Y1R(wG}d{3GjBi1QWbm~J}ws*5>7^3<=ZBYAE#N_0W?In64q3 zf33PG{KmD2n>QBKXiVzXSZvL-5--^{v8=h$KGj2gJ*vZE=laz`p=bp|R3;VvlSKm> zcQ+x9Gg@N}lS^LB%js~c_|lJR-b-nV808r8{wy%1ow(YCq7s5qu&h|JmFjbiPob0J z)fZ2ZFU?}#pWx}^jRvz*Y+AvV3WAa4XPu!!S)T%H1CC-J79?yS0i3F{oQZ<`G?&(A1<5qTrNSkjvPMyJ zGy(5leZl(Fj)$%Sny=@a8E(M9AoF}8?vl+AYy{713A1FL{-k*_J>|8#^cn^>Kd5)M z0QnM+)$l|@2@I!su37AQ=x2A?+iw-5;V=@%8Nna~WV)F(&Pgt4qB*`{4D^>9qdHtUV;tteuz2FY`- z`!{+vo8u2q;CG`Q`K>1_0Mvp{1%Ce54}HVqM9Ayq9GKzTEB*g=<^JDYc`Ok64LBpF z=EXX4Y{5G_o2vJz5NBsbQ~)_Ac3*Afd{pt<2znNFv8)>VLI1B$N<9I2w;J)<`fyR_6oM#@Da+WUxXO2j{_gKc(H^mRn8(*GI4JTK_Vm+{x?G z?Z*Ht42)dyS8ws=nN(G@UYcen6TG2G3b5xaf%VD6K)5I8>l^IypVrrL#*~Q+j>)~s z;4nz2tD{A(EWt89#ZzbaF*;gYVPz)SWd1Ew&DOb?H1h5JAOkd7h8<85xC-}$5a*p6 z5w`kqbHMDzBUowISMqi=XDO1b;xFOq`&%yTZ3NDFj8hE zC7DvI5~4jltb0L+Sjsqi@>%cErNqDz+Ze$#4rMJuet;|Brqoy~!fz^3^xqn91A!($ ztnW4))8fFo`L|76SL$sB;t&$4jugCVSW0-iNFFPAxS!WG24N%)s*M$3Mt^p4kjW9n z%p}40$FwsQ%tL$>6QY67%Llr6MFMyv6akZJ##cRwIvxYvaMUS4`&X$*G%Tr`@gG1| zT(&0*j}=K>fF?5_dD;Xf9o1pp^XLksO=L)E?-iv-)fpDx-FAnd=wuHgf4+rbkt)a4 zTK3JK%(bixa;PINFbBWw-m3G(%ek(x3nYO=^1B2XxCx3SrtQmVq6O2L%%$$4o1%jn zBLaJ2;GMe!&*`@eto+SR+OtIBO?H?)^y?mn6}uf8@>U8Tbi&dj4|lpUp?|H7!ILwpl4BW2?` z%RVj%$Iff%`wOI^(dgm$(t@E-(y6!0fM|plf{GOyujAkz#1<|#HtSqeh}!RNy*z|m zytUQeG-Z<|i}aJ4tAoEracPoJ!&i$I=w>Ch%W1M#YIe6`;!V~ECG~TE8|kP2es40! znHRu)QvYU<#P1}MuZ)BM896Lv2K7$^-m5sqr;FC?pW#72>KiU*N%m@tjTIGClE!L9c7kq<0r~2ks`al-@G3SC}^MjEFs}6 zxkF`cL+h>oc)T7n{`=uWJMG>&;t%`=08joA?|Nmx_h;Yz>tW!o z%0yDzl>}s|XW>g38uCey1H=kl$?A?0`^$PkTA86Pj3kV`pdYviQ&q=vM^p?anZ6Fo z5E&xD`72(kJN-BuZG+k^!a>!pcfM@wP_KXIbFI9is|lpw<5*o#wb|i~D~Q}m#H#HU z|C#tAKh*VHj(22p+_y668AJ!_23^>~{zA~oJj%`t{+H~_%YZXuROPM}e4z>^kNxhtVn;g7gkqJg6JbwPj=lhcb`{`dhmIo1;7S8q|g7*qY>ue2= z%;~Hhf(bITk%yA*!VmE?_DMrZDEVFrNRgw6{< zC~1w4G^PJwe!O#W$^4-!l1CnEs0ply}Ug-8XAIk+_I|{RFhRob4hFE zP9ZFM6c)czLg)VA(*N|Ds|)i|OPh+!VfI-drGogfBms8#HCNrifC@{BxZZx-6Z)Ac zZuf7`{)we(rKgrnCYGQiWoxEz$0t_QDnfg>-fZmeLr`fu9Q2RvL5!!+v#j)EecIw0m_?-r%X&?&S+Pqbt&l~ry@=cs6~GDKjF@V z%??#5 zbRdvzA*Q)wJhFVR9Na=r%V>*XYE#%3k9w>Tl+LNiClg!ao1t?X-KeaJX;dCB>wBmL zAUFB2ZA~3>b6x~SZZ##4zS5G%%2}?Y0Vv*_FJj);xf|#{;)nWn_hnWb6AMUx1@xT* z0tjE8k0-%DH=M;m?chNk2uc7o*UvjwAcWu;_!#_2{57+GU_2`Ue_pajcKF};=}nK+gxk{&n%^u#3-q>!k!p)gS5wz(08fAO^Vy zVxZWc7iN;*#;?w_5NcE+tyI$I@;FhQh@m;%7wy;{)EeF_xS)pi!j9#xE9=2FZP&4tyk$P|3H+jl7Lr2+Wd`cUiGO)dCo5 zn10^C+A1`ezl%X|T3Ju$>nx!NFUxGWb@Mhy)Z57bA13}wb^@iPqh-0 zAyG05ZlkOR`pbO-buUUf=RmcxB~7ixA-bgs6o z4z9HA-4!bzD)=u)l@>Q}@jK!>+!&h>!(BS#sm>uJ@%xGqHXTOaF{ddP zglBRJp9+3TTW&h~-)-JR)s2)mBHlJ}a%MV>@($qy$nxMJVQvoIVaHB|(L$qt4AHwZ z9{JLB19BkOfCOPn2nyM?$et>sjb^I!gXiQJuVJJ$#8y=SYeStRm(;OF732h!p~DlL z=fvo(vTz~sj;Gtdv3$XMbBEOuBYFN8C0@g5+~~MY^r>jPU{sZpB)zB`I0b9^1Q9ix z?tA-O%8a10INKZ9NpP^x*t~VuctkC>+P5A<0&}lcUbR?455AgBM0OuSEx< zuBK-7hW&(jY2q$3dK*S8Qmd(_;=b-~?FH`}*(~%Cmz@laF15}9H$ejrl=Ld4@omO> zH7aQl4YuOA1E`_X_=Ut08R3cMGWJ(?f4hihXlu6IkWIL-R!5#n1AGA0<_ZnO^}3-4 zNoG_7TNIpE)4aLG)rvC3SW0D@Lz_0proPgeBk<%0d|v;-15fAqrTz)p%k#Nxl_nef zsKf1~bc(LZx|5g>Unw}zq|rz10g21?do}rHdo0uCjtidDJ7=H26pFr5x{6cvd`>ex z6HAp$g>&I0AcaJh{;Y*WiBzw;d|JJd3gO$|HjzSOF~)>9L9JaBbAg z#lJg7$A$ae!s0AX2-XxqbX2ahAEKM!Ht3Y>JpG0>ENd@aTguZC(w?)Zt4&>p6i(#5 z$7jK~ocaBK`^&biul;3q(4UX3|3TV2{}&#{_7eczA(N<{8(*sgFts({jo{H2>YKHKp?qaI(O4ZI3C%S8X4>DE8WD-Gy`xN@ zf2;q{Z?_cs9!JtkWVde<XIZjg400#dRj|xi5xXj{8X@*i37YDG7OQl-q(v$s&uN) z`|K8YR_UhHMmMtT%^4(Afvz+?xp_6O$W-w1xRkB1ia}PnU?OXX7_ku%#b|)Tv-LgW zY@gG}b)HrN`Yw1r6#)##%lyZ}_tBQsnZy2yJZSBk)N3}4iQ{#Gh@bK|jyT2;3>7sP zT1J~=$UG6rqH71|*p(WL1XyE!&OEcT1geK{{?1k@6mE1IRC+0)9mtBUHFz~Ov2ys$ zryG5(Y|Ktu_O&3xUGtHEu-e!@#7YN%F~cJ#K}mSRJu#hwkIUudW%u)Loc6Xr9Ul&% zj1P9Mg8{W|9h|`Q*Z1o4xPDl#V@U)rS-^Y0e6m^S3L@j=c}PHH823!SIK|-dvl)WK z8%|?Kob{Dgc*_ay*J^Q2|3=tpLFJH^SO|JubykBw*>7=w_InzSW&X2!BvkZRe3ZUP z9+j=~))Gein@RnAY1@2WYZ9KocC^%dsw^m*>QyYSdmTx|pELWxiy`Bk*xYuI{~N46 z^GAR)eue+6mOB3fKr=wkyDiC$IDncHg!O;s@mJq2-)wuz55zA70zH4zW=#qF2sE!@ zm>5VHExNHcKFMjbIe{O{m}A~~NO;-p`6g{HKzHxRJPGybN=EJOUzghYIq=3CnfSkP8|LG*64Koce)1hKy{&%Im~sl@$F;p`w`%ZiJN%GbpO7H2iv_ueHyq zW-Nq%=G|eY46`9+kyI^XBK0B_S}2uDq=vo`j8`A2?}E+gHSY_Sjw8{=kmgymAmbkA z(=|wvVyhI+%8s3Z=0i+X^sODip*kT_xG&$zCnG)XKd1J38jyr4{5b1FRz?l(QhLbk zICbD{{p80WfVM%={ymQpYZd)~PezA-Q#FNr8IZ%i7o3I0O$~qO9oChTb@4_fl{Em^ z_n(^4C1)$OST!iUf&2)6T*RyrW^jZz_SQ7wY%f_y6CY#Mph>xR?1{Y9jaC|RHQ6s5 zS5LUFf1!5(sYfRX3RR!bCUst^D8cILH~)>sF5V(R-% zQ#S-II;PBB$HH}N@?7*L86~&v@>eL44s=D_AIIW8;E7ww zHV*db@r|aGr;2&>^9?`co0(lF{S(0h-lB!WW=S)Cku@707CXedZKp9gL;0}Nat=g8!)85`&s<$kHkfco(y+BPl2Ivs@5J&KR=STI@zHj?F6@aXIe80+eJ?)+Y08Rk*91w(= zb8(y~B`9c#AQzy|kgd<4#h}tkuRqG7FAy2pfzpIG5V?t+sac<`?|wsf-1R^G1%v%S z4VR{Mygl6dGxoCHJNPDxugYr9faru+&Tk*lObRCtsYuy&vrpUfKl{0&;lI%M72%2AQmmuru!;UAUw{i2uJMG2k zs`2ro`*Cxr>(P5g!#ce-0-MdK)0%cd@6>mbOV?BT$sIGC*(zMAyD%m%`?SD7_3=#c zus3QyfMhka$+#o41c|v{@b}CzorlvmQ!8%7#9~CU-0|0oK@xPl;43*g&sk_iShsX) zH-iI(xxbgctxIKt7zxnpW0S+`pdbZ9-lS~PNa^5;@D7V6Qdgag$#~F+6mHVEH04_x zQb-^HlEyYgjTrp;8JA2$y+GX*@ zroV&75B!D$6@QIdogmmk;@cr#WZbK-27@Ry{YPhEdk-ia`Ns6Y{30OXOzc3INbm6x zV#Ea9c87c}@WmG9;#IE>urPW_Y_+o)y}nqN5cOqVe)>hnjb-f>b@(}Z7GMcip%D6M zO3d%u{v+|MZvhrj2gCik3WGO^m}&OQjo(rx5<|3AnSWYU7;}7!9Qk8W5-J0ZW%ATD zpWVvm;et6!cZ!(cyfY>qTsqWwrQF1%|F2rZ=qIDB_Nz96O$&4aCo+-wVjujLlZf@L zZl}IMnho8MwT^oh_hxY!4g(4bl@n@A#e3*6pXXU8t1Wj&lTH8m#ua!2TJAYz^lM1h zYE;ML$7k1|QpnsqWaX2M=+($(0~40U@{)(NgO)E(UgsqhGHPv;t7RtX#~S+}1p9qn(q49lB?@Dw^nMX~N7VlIz;mTi_J-OC&94RLutJClK{l zF4&E-OlU0!x;SmBuVEP3ZJc7H3~Q_jrJ<#-X>m0m9yw)~zj{%P#+u#RXt3FaSbGsv z6m(@<%RM`#9}yg*@9|1z1GV)WFPWTEk+~D;>tJXeC3nX(GO9m;p^vSSPXfgl+3Euv zR9Rm!fA9K5y;DdZjlIYpX=Ah2CyFalu+5ST1x$%Y7i3djA(0fWF13uPb2Pzp8N1dQ zELpE3mG#ZAWV(h8p>@Uw{Yzn*T}n;;iGF2|UU2u!f9l=+at0Wr9u;Jos;uw=?;)8A zqxDlVr5#|uYj?&YF`1SHJJ%&w{uV=*BJdx1oY4p_(~-&V0&OiaR!wylH{XtjEw`+pl4_|d+JkK+mRvpqd!C;Hqdf>KA zIy93ZKHPb1{uh3s!>f(L5lj*ZSM4>zuv?!~H!)Gjn>ptOCui``wuUQ`0kt%#2yY$K zA4+FEZUHAUojg%$Q)~0M?1oryihSdiWQ$M1Rd;$Br=d4DmNG(eFv1VI0YcLx2ib~b zpA!A64L(5Lh9#aO^VbvxThP$>$#q|c$|<)-^QznkbCojdW-L_p#_e?($TB+wgaf3L znOP7Oyho#~i}ubYb?AhAVho%l(yS$8>?H;UUbX>_aStv`1s1ep*uiXJQw>3zQiR*4 zZe)BQG&Fn5(=Q?xxmW0c82!JOa~l0fddm4DC$Ok(GW7RS<_uk#!pnTT?jM?r!dMt*A%Fn;# z%g~Kf1_a=Ko&yXB$Yy#Cq!e_1jtkv2BLo-%??E5gz6XH5AIRxDx#F@9jRvZs4H|Fv zgoNhVTE*7R7*2Thy%^lq<$?Gp{ai1>okfHz0~%gw&}QIQo*lu#B~^m_+qAb&7yjZf zTy$nHf8t$Xb*Nc#xi^Ttk*}}XS&PVp#&^(j%rbeRS1kA(t<0l2IWBYnliXLErgw8D z8$_m>i!Ox!y-mkmH7~oVSrAIM!k9Db%TBgfOh&;J`l-rNmKAh}+@XaBggM&1Hb}AS zQe;Va6ldSw4n6mV^L04Y%*^9k2)N(IjyfFCnMRYc$=hZtx1a+m$?s^2!nSykv<1{bW6UxBy zZyru*iI=-z!&P<=^1gOlTFHwym7{py+Hz+9Jg4=GXDNBI6F z_J;QuWI|I%BL}o3+m<43dy;$jn>}b&Q*LGD&7^*0=^jiwWzd9=0SYg`%YiSbzx+?6 zz*ms{|F#nBOE(Az+SkuM`=wjesiQ|!%YPSCV9;0joqza&;MwToJ525_VEdo0lYa9p zx$6EI!oTO+v%Fgw-UL1fVb}F~1mp+3VLSq#l|Z&Ms1zWf{}(y}eJiA<=z{^|Q1_mF zCqf%PJj_#;?F7Hi2E1yG~$MWEh z90cllxP*p>`zO{veX!KY*Hvg-2LT(VntXZGzoCR++DhZuQZ?;NiXaD_>$Fr|`f_#Iw-TKY=o_+CVyj zZ_1LAQOw8pMO!i@JN1V~wAKt~hbNtBkt1ja!Z5+uVQxfZSX7~V%NQ=cIltLqCUFhY zUl|jD$S;xV#YQhk5o&tZkp>Og11=;kfL?$GBBMPek*p>5kHZUGgEhs;=r=%{vhmKaaCe%qYs zj>k&OI>iaKX6lsA@_CMVVjl;yuWPYcKg$rM)R{y`ZcW+j-G&l^wwGLg#n3HKl)-D? zNmfJmbwVLxB6~!n)odWOoRp|aS`fNwk%F}Zk-R;&deZ-9 zt{l6z?1$vopO^8iQmh(E5*_g*Per0*JWTl`?RC_B3Emjc|%2lnjm=tb1m6Mzw>hr1&Do)EyKi zP&E(j{Lg+NSQh-9VX16!Yvacj4@@%Yo=;Vcc8AxPgUn6zfcSn;obb#?Uw5iu{4iZt zmrLhNmUek*#E2Es-a5HE^;7N7Lk}%Y9LHHph~qUkGbh-_1q44keO?%+Z;%)gZLkr| z;N&J@i|2mfEywr5C!OXtjc#cc*`^XWmzN~0(Fb}*b4WO&%l?tS>qKDP7`Jh@yN`dm zkP>Nep+UaAj7kM?xRZrqPgq2gSDIL_bmy$Wc;HIhD+Y6shlrKE+&&s}ouom~=*JY( zHQT~9c0^4)8&icd%MTWONhOTS6~q;zr;3xyESylnunL?;y)5?6bdo@sLt>h#)!P>% zZW`!wqwz)g^O62JDG{#EkJBjgBBcg@8_Svgi+z)@0Eu~N#4rWZWknF^Ae=?o^jwu_ z-=n7WFCrlz@@U6#_|#>V%P8@xU8Y3IL|htr$;}U+D#0Zn^$m;OpxiX#t){j(L6D0% z>X&2KVF5ybf41q2Ic+4NRaf99jnGhH=LU%=vxbY~@v;|FIyWrckm2I)M?`6ihGmko z1?BC*9c8c=hgSMUUomq$^SMafC|r@qP`zELj($KoH~ zk;E2&ZrYJ+fPMiedHy2PDgY%(XGr>0lph#9FQ)|e!ywDm`vA!N$CZR&br#J1#h&8I zAGZD{j$|l3_w`9OkL5OuDV3JI9t9PLgYbB&Bg|+yScrde0?n1iF4bwc=x6Uy7Pn(~tHosML?!HQ+20?J;%R^&YG*h#x~7$7sY;G@#m!CJ)<7P^$s#yb&Po0M$^7D%$s)5NbwUNDC2qDA|GBc7$`c!Y&VLnveI_Lm)Y z7#+h?IA_^00Kbptuiz%KQy{NiIqOSYLlkG|#1nD*3`TBb;30C~#E3MN>5{eflSNu8 zyNy}E?aK^x13_O(ZY{oMg;>2g=Z9?(cqb=cdSz)4gF8N@YkZ~wSkntWVOEvvzN*O# zUPF1oaZ=Sj$6zSUFR0m8g)nUw&(77?$-pl0cAfow0m@Sx2~S?bzgJP`qb9ttQW;MM zzifZP$@p03rRKM|JAY-jua?nDD!&y&?jACrIbZ4Ia%vVyY^5OI+F(w%mr-oP>eJnS z#^ZqFaWAK{K+L=Cn^kRI#>R%vhoB&5z^X@qpQt5^oeHIR3J9u~yb~k>*;}uOAOH^l zFLdy~-q9NY0Pjx>X8^YDnHi9)hx_!INy=#fk-s581Ey*}-#7tM|1{vuTQC^#HTdE- z{9Zp{9iEv#lk0Zlwf{3IfhfO=4+4%E0OVH#${+dq>c#8;dSO7xD*@-b)#p)A&BcGA zkmKqTm@<2p0ucYs@+)F0SgG`66jJr<21?S$8N(^mV{Njn@G8+5MjLwwIU8zX>4I+M zq1-)~zJnx5rUs^chi=Jt#eQ(oYK=@xG6(A`UCF|`F5KI*=Xrc|V=_U%V3Ng6PPpVw zIP5DYe8)E6@64N2DGlWU3FQ(`OFlr|&&*Ib zEm7lx_w1vu6wF@Gcb!TtJ9_44h;BI(S@mp?P{o%b;goZ3on%_fc-?j6;HY)2(8gR_ zj4AwjL_!L-PnMu>oFYv-B57%5GSvhzMU;)}qrP*=dq3n7!EMIU7n5NJYrPsV{&SNG5effp>yb;3|@KZ*$BlfeiZh9@|4j z==;e}nN#kg4(6x14~NV@j)$^7*;&z^rdJOYouB2uqe3&E^QReo7*A%Un|x!t|1I0D z@K=2vNvX#Zr53XJeKQTjoWm4B>7v`GTu#3p9`z#O@kl_e#Ukze;_2{p5z@hvm2194 z@9n9L7CyHeROE$5CKC7OaF9N=@kOBt+~uCdwx4mvppxkK8pwE?QsBE!rb1G(V-SLWR$5O7ohoEXJ5c3W`-@ zp(HP{Y$LG`1CJlI$Th|8R6m(#qfuLk#&C-V_>(%U(Rq&uZ6uEb)|n=WGT!yEt=G4O z43Wu|UNUwU96bwl&l8MA$|G>KzbBH07QENDu#+uszs2IL^cv3T*x*0UyY`>K&wh%< z2&`P&i?YfyWoS_Cb3^MHc03%FJ{GPP@^mQ{9k|RDD=-P{n5_{`w%1)Y%)OjC7v#gf zVRR_nlB&v4`ecZPOdr7T*ZO|*%qWsBicguM7}A3$9^3i|h-W-cq4kcA<9QfL4zJ|p zu<9a$W90jJo_5{1>!DmuFqalmg*xS;7%khioHGhVEZ^0CkpEzw1Di-K9WZ3S&y04O zAiW5D;)4KMkI`p9JBp92fGp4BC@_Hgc^(yB`PTldU5^u%|sJRdB z=<&`$KRqxAgYmDTXuHG(hf^Xj=#TRP+bs1CCFuon<;TV`Eac6(@+ZU;Bzn);6Zqm< zDs5Xo@d+nX(*?Mr+Ih80Z-Y7NADlFC>3vkD5gb)7w=fl%obC5 z{)Q9x(;oUve~sj#Ti+Kaa17^$l99JnJaJTlAY^tv@LzKS!22S)!jxvzpvd9LUt76beCkJr^WH znL6Ksl^!rXwK1u}g>Q7yE~c8V%x|Gg5lY_}7c6j0{z{4G+*2-Ibm_biqtNXcGmCdx zd6PIH&*=z2R@C%bX}!b3ScFz*Jg0eY+$pwpLR75&tvM6j3B`{z<4Uq1Uy8)k+s8b9 z2cpn+W-$^hBCJ$bS5(}~C+oD9G)VCjnEk#SQQc{{=$>PY`scw-lpa#4esp zn|4im#uttc#9~0l@MBo-HJU;eczbd{djou@fzRU4?-A*MBkQ{wFy|`#ZX3U5|L%Jf z_vQ=WQ$1)s`Dz0|;`FgK@iV^tFA(CZcGY{ycoyoK=hh#;R5vKf>0=8jz(?d!OUdV& zZIoAewq!|};r=0^SRltq2Xw?g}UnA2*nztciK-`99Tv;V8-QKKM*Ix32Z z{Kg6g1Dq*VT(fsPZk#sODw8D=THS65EEo=#Ai%Scn94-}Z7(|nU-ST7bcpBRC(DlS z!olnzfQdTZAbM=26U|;`q03yb!DC3%Et6M#80n?PV}Yi5FfDGyK|tM*`O@_+U)N@~ zKkfA);YIB#iJ-LfQJY%^!(sWmIV08|Qy3?^(q?NjJ=cP}M>7wRuVE0=V~3gS1k5$V zqXpkoh~BUnsnp@*%*pFo&L-U&3q9SfuD^0L?Pr<4n}kL-qg8-cv;&n0N`-s6P9RAs z*M{xAs#gVc{PXDY36Hx-L;vn<7~g2~x(+*&an(S?1?Rkc$#{@;W#zh0Z%b(2%m=*t zpM8R*wqo#iGDyFC>1z%l&yFOFN0`apAYrFz&LBSXF34Fp-g$^kmGHyx^(Ei@+M0oe zncI(|h3f%b-neeVwsbft=H4vT@drZYL0A_VdA!56lV2LSlkX|B_idx4k`qgmyC%QsIC%+l1M=~EI+!WvBJ z$hI%AQ9F&^09R}OMHP-dI2)HPrNCReP_MbU{K&+_cNewf);t~Z6Bg$e}OBXj2+wslug93t7- z%if+;z^#FPN5_|N+~X~|VVTQ0iEWg)ZtLFf@8aOTH>~mjKD}+F zxBoo%y`$8n_I1#qk7L-0?cL>>98>8FIY^QRzYCOW9l4wQg|E73Rs5T zc!WMN7qU-9JLG=w{R&8)-Ha8lj}knFF8!OjHPVnW1wDoM9&@ToQB{+cs1&fPN zSgAkddiJ+f-tfL9%3paO+Y;@eamVmEL?~3%4LW%xTdG)0g#~{qw4Jd-S7N&3}&okokze0{=sN(=tTG0C`vKV9TR^Sce7g{L626u6ta63|22FK>q_5>6pA#TLHP zq%RL0GY@TdQrOiCxfhcH>CD;i7=QLgtz--Q|Cv{N%uK4{mETziuy+R~?O?Vj=mRWC zwre!U1(m;omN#ng?=*gUA+d6J;aOwGyzv=PyxePgEm$Huhl;0L6xP~IvkdqWOE!2b z^z}xsH=7F>lj?n-tq6G;KhnyDwH9l8q9o!<9Pf1Gix&i@ARh0;+g;Tqo3gX!9FVT& zE)7B*^r(|Vgf%wCE{N)PtUvvI2f19MQ!x$cg?KT@1Ce z5l!l~sx!#4(xrnmDu}t92R-T#X(1EVms6ieu>T0L2SK!ByjN-8&mWE=ya9i;efV;sbA#sYWrZgIOFPBLCL1V;3!f{14xjWF4taSJYMVn8?u_AB9 z-c!SMM9#4C*L&zwn%{?iF)aw4g9jVaI<044chZbZtjD9fnc{-;4LkDfw`Wh$l7; zEMfytCTv4=d`1wt#$Y+HA{6w;ojgi$Gq)L$*&eyV+$Z=+_2At-F8eG|$?E{|>k?M~ z0r1&5t$9`KZN9{Ou6R?yo8aYCWTCVh)0#~Qbiw0O27a+&vU7+pA^zGJDHR8WyDs_aYgBC( z5rh1rrcc|Ebg3K+tc)Q$?y$GxxY$W|ux@C;=I9J`Wl2FR?Y>68Eh zc#R_`TG9q9SEI`^;eh9cNP6Y`S%#gu3fSj=a6E#f&y3U{+^1vClJY z592>LdMWs-+p}C7@iXJ6<^d&xM&47Og7g~DSVvRe;nR=v}W}j2l zSy>_5E1&1@8VtRt#70R}D7-6t$d$5hV98=wtd+oBzWVN;zrgat#lr?kHprZ)bOqid z8|5*%{N9%x*SZaLa&0G|g;!k@0rI|E)DAkJc?xv+V&Z;D9a}^!9=TZ&|H8ri>WNV1_2plm=RcHnU+(wql+YG@k`hds$0hLXpQTZido0wVjaKCh z{$+6Vr}ul%A%w{8fBr$t#JHO4Q~6RWe(}DqUNQ2`#dkIyOU$KM&7fI1yS>}$Tur^W z4MBcxmdnU8lht~C!+)e36KH>1(ra`DAKOwMPc2}K|4K#7?4UlmkZX%w>Fdy|&IEFV z7z1d9Y%*A{+hui_`(O8)3{PUff2!PeI`8h}dPc^!GqW$;aAITOqI*YXFm8azqxHmi zHVD=@j=p^934!SHFPKQExLPTlmd@tToYoyc&P4|kD08D{iDa$Q|EWq7n-rbLWE#Ny zZM-l2$%{5O2L+y1t%UMzaOI5SJ8g7^WEOvQA`;91`7}z3igWUXFSzd-?p7E(;OC8E zs?NMrw}evYxFL!&)HLK_M(%VMI+@o94;Yl-p%|X-4R~BphPdphW5A5z?gyXdS)C}M z4~9g2vz?J>^P~Ar@Z8poluHUhi7w=o*(Ozh`swV$kr+#Q!Mr zX186-Ns?m-w4f@8hRTK7JrU4h4zPHM_+{7j5E8w|{&!pHyY=lz(5(jKHPlT~-;tT| z@m0*RqKZw;JQ4?=`*|(NxQq+@zK?x0f9aJhOQXY0jCU8>alEKf+$zOIc!8R*+$coG z`2_F!W%TpPc?c^LxloSEsk@iG^JF|47pc~;B9>xU*thE9L@hKLGBFn)m=RdvQ+A9D zhF+ht>NnY);d;^aZFB#CW-HV_WFd zzplT+ewe7n?AG?2qFF%0B*O0f27VF}!p-`#j7YeKC;Z)(;Brb%m{kN-^lxBrk|y3L z`RN`#?0NO#u*4<3XlS=__sKWe>^il2<_-X6{BJ9^?0>2&G?Z(}wX2hQY9C0;>&;P3 zs8{>JRmF85$Y<4Ixx!rq#9g(qZ|~xohN!nx(y;$!R!H5#CI-1t43|2?S}F_k@k$hE ziOv5rywEdw7c6xGIv?bDRZGQAh!;!KrvuXHMI1Qr-8P&4?os4?BF+O-H=o7V)mPmc zpr?Q@ZOI5gFb@E%0N65+AH4MdKvlcvd^wIF3w)j4^AEpXReog^fZqev5Tq~s;h$i4 z{PO*_AI&%|_Gfutr$r$Dbs@oh@@@WX1;GAS1~`5kVSv8+{--ecpBh8Qu>{B$<@yN3 z=uL6Vz5K^k0>uS&O@rj@BZVnztKNI$qWzXmXv)8VolAYRu*m7ID37d2Yt^mls1(bNWLxca`evvzSGv{=Qn$PyKPRt0!^#-}0TVy&3i!Fi{;uLXDO zee@GJ3UBLGnkBj1DysiUg7+eg)$+BdMF~qoj*_@*K=jW{wVCFjP6t z(ai4ZfZufB$<0iV`H%LnJl$=ZlvRlr+gl2#;Qa!N4$T3qXsp$8=vDRt$eX{zu`Jlq z({QK854yy1QghzN{fJ@x_U=co)|oH7a;@UlnWLa`6S*aaU;#$CbKIDN&5nB4YcF^; zOVIM;;n=^(+B9Qsy8jI)hD1#fpBelv&UhN-%wQNkE4D6JQw&bfef9CWYi%YIe7ydA z>+dV1O-AeN92M8)^qDFrPcY7ceW(k-!dL6&JlHb)a+^+!|FcoCfuZG>kGy6?0m3@o z+D}P2_QmW75t7mi`E1;#w}#1)i}$#Bp}3napp?MF=YzgHKMnQi$;6)prYZ>sMSUyncC{ z82pGQcg)1m77KIgKYK<>PoaRuYlvLhohQ z%){z=94dUsnGIxnf0BP`s-YPZ%7qUr3p*Ju6a5ls*2N*pMGf-DBKg?YLRs8#0+hn} zaak8lMUdLx@T^lg-<{5>KCf%ogLr5Zey$v+UCjBRG?XAM^*UeY_rR6ZsOg+TujQK$ z7sI2S%t9WV1%Yuf0FVY2#e{EK$$M!yD`*931IYnvnzV6-rXdWU>qO?25y_%zN?lJk$@NhqxuSffE#;41zs(;yiD9?}Qz7=xeJ@N{1 zRK7+0lN`)ZQc-Np5@}y|m28!}HxN4Qts?zQkb1|15#pk(*VeN#&5mCr8LGDvUH@rl zxR*?&JVF>?Jr}%H^{L6sw5TdN=KwyWrMJv|ogF;ffq=B^ds2`iy53T~U(?YoFkoJw zmJ#rery`Os8;v_#MpnIwdkH_)H*^D0YK6Rw%b!#U;lly*`6q7-%GsSX^pwgQ8Bxl! zKJ5*yViptpOaJM&lI&I~eHvYeqLI3DX6pn9(G*DapR~dboET1`zo4;s^WtW-`-Z9J-0cn zXj$w0FzCLV&r55Iw5Q>QGWwE%o;h0g7ncP(aw~~p>IXt>`#1}M3FobDh`DxV3|qd( z4w;D-y_=x~ckkiG4h-o6=N9lW_mT}b0wKv?ca0Re^;(0hJh^BMJ08w;ESa-Ws5b~S z`)|Z=0sBo7$7>c9i>XOfUhy6by^5*a$=wrB-RY?{vxQxLY*F(%JvtlA z&HA28W1!?mlrOB4(>v%X^Lyo;pl$FgQ#EKf$8nq?GDIfUY_2JK)=~~>qk^03N?xpZ zVLO?qL8;Qh7;>}vNuOm-JhZ8R_9sAF{I3*Og$$qwpq5E+;XXiUR|Bcv|SpZuD96H7)OWX6j&L?bqSOaHWZfIKfJNTVpIczdTdc2FL*-NDG=%yj6sbt$T!b9?AZHQjH4Y8z{Gv6Nsz<6R4F9W2bS=e+vUPu(C2vV znfj$;oqsv$iRwj}8gn*AP?hXixuFZYsml7Q53ZDUpu&Xw_x2hJt;uGki4Pmc!j`Uv zz;RH~FK92h8V{U{Tg7R}j#xLNq8q-<)vcw{MnP+NEf6mtVsb1lkJ_@VeD~ z34ne8B|EMkQ<2V$JO2+7y?6TvfqVc~f})>Od4QJyfN1$#3CN}*00aPTPxoHN5|Et* zeAs?mp*c98Q|{c58J?)WkCd2M zxVLZlYn3HE(CUE%`~JrHJuBgMwt9#^@KoM-kuDkD=_fwGV-=f{oql!9uGKvnO3yuJ zop$Wqin*V&Wo+_bJl(VvA6w}3lsRBG=kz4`=)YES1pZ`gL{(OF2IuIkF_2fFxJazS z#{POkI`hgHoRIVosaKNZDb6I%#_7bca@S)u-NLn1hU$%Z-@pfHtq|LH2+6igr2+$1 zfnge`Rn=8QG<_@Dv7ufA$|=W7>SCyWC7eB8=KUOX#6F<7%y67&^PO z+#91*GmGW@b_H8QBNtc`W2J3H1V5L^-r1bPpT=r#W^(`BNV1?p$e?Q=!g>o8m)1tS z{n2U_Wruckm{->GYA`y10;g>wj*+{56{lkz3C?Jy4X~QIi*KEPq)`yR?%5F>jJ4!q zwmF2#k|c#iQHlX0o;C-gHxA~*{QTS$Yb8^92~l0gkerl>#u9d*@*d409dpMK8$|(| z6hY+79=xj8Jm_sGw}#tRdAo{BQ)@ao3c?*Lh{##{ey9Jh_tFPpJh^%}K~iT07oxku z_QGmRKeFtw(M+VF zD^tWkj{ScH(wteq=dCZ&mm;t;hY0Z92L62dD&MUE^yg)Z+D;@O7(JZTK>ZW|y2vjz z4Lj{FN<3h;E_s9Y8Q`(3&L-qJ6qm}NHfNYl^6miZ>p3;MhWmHQnn?%FgdhCk^^pB4 z8ZBRfOag{Nsp%m#2hAl2`taRZbw<0=F3O4+S6%)t*h#XVdQL-aP3+gXkhb319X!Jp zW>+taLP4)Ir3{TqE4BXAlLz>%Hcz*oYDU^Igi=Rgj@_v99{lL9GF_)hl?fNM#^>=E z>S#o9SQHkRcuQu42^Pxri%qzDd%h8dQlX2&LRQ0UYx1lQXB{j7b zUxx2J2!a|;zvQftH^e!^!Rn2#mYET#?PCd_lng1fr0ZA|_{AMTXB0^U=RqZytFe-L zFpN`+XqIqMhloLBPLOGTXHDNt(GBuZsV)~tlA6+s%i13%ZF_?=aC>olpClpGRNIxF zK(Cp+O=GMo0m*GVBJ`KRFJwzmqAkUHS=I(G=e1G$g>P%NaZXT!Tw*7IXk#Et`-k0! zUjAjdlO2DcX@$M?4phlkN-bE_ap8)tF52s_&dABi#SzhO%hG7{&d+I7`t+S7o#FMQ`>Q}W?myK%bCVC~*~ z_lsftlA$)JOdD{74CFbJN{F6u{y>JDoxGCcuF6A{)!r0NJg6hZpTIa_Y53)r zmVmE@F>g1&Rh`PneRCDZK?Ic%oLlC`$4MAVTE7DF?cv+n-=M#*ELh2uME)66{-}9^ z?dSIpDb=PoA?GS%Lj6jN;td3zvs2`duMKPdN@jj$HRspG-D8Nt!Z&^G;Xwn%+VFh^ z``eES$zS)i3DlQGVAwn^l`ZvEB|Og;P_ikpBGN_P&2WEwpM}|oZODdWZh?LR^#Iw2 zk0hW;5Ejh;8?w{@6Wo!YvYhn|@+BIKc+ z(MP5|rSF5??oOfnWTXR@`Crbyu>K?cI4XfAPHx=gpjurv&VuWcJEf(2tP}bTch=~f zR_jH(%%lq(j2BDx-Q*gwvfeNbokO@jj$%XB!;5LZ<^?{DN*Lq+_VzP6A5vpg<*^9( z6T$&w~R;p9gMzwg_cUq4%03I?OR9uonOk5z)$i9EhSD#yJ1zN9ezFm@ z6HjX#axO+?LuQpa;G`Y?M5oZ0M@!W)e8DfR5jY~nr$2vjX(kxhG@@0AygbPD(_<=c zGc_&hlbRgPL_53qGR_+=)u2!3+`1Q*u?MjFU^N%?8}vor!bqLW-%kc`j7#Mf6-8CyKQAL?0w0liZf#+%thpv)@5T)a7a z7fpG0*K}YDZ_Y98ae@sgeTvx}cF|Occ!lCLCw24DFb72Dt=ZtLzA8C!nff+qHYi2D zucK}4#Fc2!5+^vS#uDF|BvR#PS29}tXg+5Z5wJM(vQr7*cpefVJ-)o#hhnc&8y`a#xin5Z5>f9Iq2Ua@oW4(g{jhJdcx}Mxq)#Anxv)2Rdbf`_wB7VW^3|W=; zpkirkZy}iyE-)~eHFS;rHU=#&HFmW#SfZBq@-d^iy=C$HD7i*;K>ZI66CcT2 zr9{eOqJYV;NOe;b1~WpZ^|9O+b9$fk_|<8?a~^%xvtbM|*W@4)4OhJiSpTB-N2N`= z39-(vezN9~*-q?B+s#g34OdS4GgfI3T~ZO5V->K?!}h2TEe-+@87}?(5Rqes=mp8Uv{PSf zdgY)M<~F+c@*Pq{l`deP^{l9ps9#s|z}06vMSi06I=q2y9zj3eqA_Bqqxm|PMYvbE zET{I51O)meB<2|fc#Y6@5L%l%(l=j{2KT(iz1SK`n3N+loQ!xj%AIONju3rCsl%i3 zKdGa$_!(P-1w%H^>d^xu(PhpP^vhuP$G3P;QM_3xa&%DV;|{7Gkl=WiP|-Y1zi(k# z6&PGPh}I1X;zqyoIPF=agbCrX3W1_VqYPjeXLM+y$Na_2a{YIBM%5{ypx-HT2T!!r z#k4G{e+~uPY&vV)kof%?xM8wDU+59^`Hi?$=GZaHXf17t5!>gI&HYS{mfbj9;eSpa zSq=S9f5unHFC}bVRVugrRn!U!vQNE&Tt8=#T2WL|wvLdzx0vl9r`b5BCwIr@2-3uaxK=So6pa`^vN}Y$xwp{1pu;5}U&6UB#$rtg zY?odb!DSi+-C@!qZfOdA=AS$`^MwvvQB0q{guRji^r1_{W#r2B=k44oyk_EWlX78C z<#7H{4tD$eG52+dr)W~FOrTpK(+SCv6{|(cvvo#=vgHFW|CJ!M#(zcqe91B(qv{Un(OtN4;3rPQ+{{$HhKnCLXBxE@s zt`@z4fUF#V0S$QwYT0i27(V~PInYc6<_0!?3hZ4A+5xaMIUZ6kKt}q$ytlM6zh&WN ze=ct0?swHxp1E`2@7ar1Wvo58zvn{s0TSdTUTd9=3gH$5E;~z5<1vTuYLxkYu)9_j6IK=DsW+)#kxXE?ww>7%JRv7~4X| zuI{S4FNs7Khbq%L%b0qMU6X>+b#L?t<}5K6P*dutPC#HGEvtkg6zS8-L)mal1lz^9y7)2P#WK zx3yc9&B1FS`qJ+_rsV}_yk}m`?5#EZenWhAMRYS?7Ms#3vKT`0;TvBP^LXs-`BW&| zzlWAoi&G@joH4&?L*gTE)0TDk1BPCh5|&23SUivO5o@y-&9h_kOJ!o?_f&J3yL41(##2=kAeowF(^&PufskJ+WDlUi_5f_9jIIKEd&ytgS!M0=|#k#W-3| z0LcIVoCC;rKo9cpH3$q53u!Y6Z0mCZAN+lI=T&wz|B)o`&qua~-8O(R4}iT7BM-kY z1*@$7Uwr*XIj&6sLW%A{P*Z>mQ_B?)dIQ?}V6x#}0N_T%=(;{_7$E<{Mn(@lPmAB` z4t#Ew|NAcG3lllSk)X`yduI>IdLNJ7|EsMJ8@#LDUA{2o0L9oDPeAtKXELwbvZDqF zev=~ltg3+V{aJs~z1h3%_I)9-32E-cR&?Je%O^Digqz}IJli~+ay#b0+0*V7+i5gS z9OxSd^tGYrys}?3Wx_FVaFf7ZUic5H&Yn*)iI*yfGtG@J{{1vR$F22Za^F|)-_p7% z>|%hA&P*Q=tdm|acP>vc5?*@!i~Zvu)f}~HrSY+Q1w%ReFvd3{JZE@B0hudwdbTWc zeG(??P8`50N1iDroy=h|X%AoQB*-U_4(Cpho`~8^3sGgG6m4_lrqzVqk2Ceznw-qc ze^`E1!|9A~u;K2|SJs#+*P-7PhJs}ReuIJE-!t5&m5i`Vno2eEsh}}IeQ^-a7=H&^BipjaA%AgCHWCPUXEt|xZPYcJ({fOqfV!J}*Xru+lOZBmb=_W5!c-!mCDV$a54NfvI;tGdM5n96^ zBfRocgmjV@trXxP^K&f42*o3GsQhya3VH?9ysRYAQJ)KVq)C$vcC^n=yj);pcVPF|vH% zbaEanT!}5-P|WIlGVw#HPT7~b8|{>xMwN09h}V+NSc)$?8J0hu{ILR${iRyJ z+jwAYc;V6$M2OiN?!>U}Ahs9$rdFPFHKI`HaS^#zEQxU+o;vEv3Q}AtM6m6;-PUe< z5vY>zU$UEtX8o%;e~n6Px$$ONu^1`#p|CXh04^Uu`0W*Uz};MVVh6p~nPhvW*0z5; z$t72;uq1uakhmWorDLp|hn562>8;DG05O$NEWZWk1ztKLy*m!SgG(Tz!td=)$$o`i zgqAg58IwKO=h32&)EWZSV~5|B3tgHV@m440E zkT+>~Vp`VpF0}(pv{q#XWRqT>;94d@5s1ET%>64jO!7C~KPC5@C9&tr`isj&W97;s zMh&(LUnO|sh^5I>uD}O~cAiC5^Iy2T8z=pmPdKnCwlrZ=(@!|{STv5~mbvv+1!Zop z8@kn%f3sGMFXi5>CC1|2*=pFU6{p+BF`x!1r*ryk`>pOl0VUk*M&2Pr33Al6deIb^ ze>@d%yO`wB?WBl9D+{z^)Ft>=hx9vYsUNPu3Cn6SkDHW>$y+dC4ul$pl*P}UZ@CyK z$o}GzhfRD9Qegf+B|PmGe<$hbdGDS@WPAhVy$JpV5V;740sD72?f-?%fA@GK;qN5T z2ZHdQ)-3-gaCamlpWKYv4<<_9fd5I-|14>i&VUcWY=uwS+y54Wc*lW;d=|8M+Pv*P zd?UH~bjKk64;~L<=mG@V05bSBfTwe-4*-4u!0X4S3BhN)-e(I)-V*))lG*vLduH}g z&ddxv-uWQ{*mIeZwh02uGQ|J^gr`C$OrqyWyReNM}^ z8c=Pw4m||`k^j|31hFqcp(OjDo@d~He-imm6EC3uQ_KY|6lFvI|F!G^oOe&)liT~h zwM0JUm*l^tsOsi?pEm)qnzs+trz5V9?0Wz-1;9XFg6#Yrmgv|LeTHD0Z;{dHV<3lF z2mh%~i5ZB|`!q^i=`{MdmL*9(HA?+mbIH^7Qk55pZhz*=_95LqgQ!kuty7s8jJ6FZ zhSP;x`M5&%m#jriY*<RHI+ z)OYz#m5uxpU#&%+W<_&AP+TWs>dIjhFZYuJrlYy}T=a4hfY?hRIum&(*e=cw>-bL| z`tBpS+m4$^PU#KTWtI_HlL8%G&*|KxYh7rka>;sDj(44Cmd@{B9BXTLM4*Ak^ zu{SwtN`WJP#mR!Fu1>er)Wrv6Ha_MHdDkve+Fx>p_*^ws@kSelo*7Oknuhbo1OTkt z`(~(pM0yCdz&(GLde448mP_Fcs2Pr zGF=!U0L|eO?Xx-CwcKtAaVpFP)9?b6^_LXmGqf)~((?2rGxlR{{1hvN4eQ$HiWm`C zuN8j$Zj#1)elWf7$lY4pMR37}oA;EDq(1|_FBYq$8?P3t4g%_!=Ev4b#1;y+ucfmovlEAGl=7Nd}Qs#SCbGd&ZsPX*h1X_^2M%M}q zQ3k45@Tq@u(*8-AD>DARw>h<7ON;+7c>Q;F9*-tWqSxWthFeImZ4I(L@5l#pM2rY@Q894j1|a`;++vvhzkkZQuosHlBnJ>bxApjwiL0h-yGs z1(oul`=U`CYM}%Pf^^2Hun@n4BZCJwBS%s#X!O2 z=x^V$WsNCQ(wC1p1*JC7vU9g^x@X3DDs@Yo#nzXrL5kgAM*psJlOprhhFJQxg+m30 zrwGE!(rzJg_RQk`mP8h;eD8+e^i?X0Fsl8z{Mm{ATYJ`3Qyi`=!5W$U&)cU?*0um7 zlc0Er&$EZle<|etr{00q>@olXaigW3|K)S%x83=h+!Ak0P!*sIyDHzAVgy6H?`gow zQD5-*He-QR~<{`wmvcyH#fC8{J4x9^`&^$Vyr=LYw^0S zM5{yrJz#4)JNt;_u^oH75bBQ?#^}<<$12D5rcnydM0^;=8|R#V`mZx+S-IjtXi1pT z`dmjwh97=OCG)G#=ERAA(qV{E+uw`XWV^zC&Ktxw!ev68yEijVh+Wb?#F#!w9O8je zFQRbm&Vj(2zIy#bH+w*E9(#|yRr*I-)*B<$T33@iB&Ie1$uTEqIo)q$$2B76Gf%QIp)WjYJ-iYZCuS$-sw6q_|jdh2bv@X;4X`a{Npn4MolTO5lBuowo?L&6RpMc>9u^ zI&#~0NYdhZu)%iAzv+0<`X2D7>fjY4%i^d#{RHC{_rZXM>kH`=*ssmIkCGfet=1pr zioPO(Flx?!+vv~wzK`xwpr(8J=*IS@i+r;^bB+oOMO$w&n3Ikctouv&6AgmoN(_XE?hiBG8rZIenNQf6nxsnV;o(29LntxeJGl1q= zH007_x9yg!5&wy^!Bn5Pv-oMbZd46z0)6(c+VT|uWre*ut z&!fg`z9`I{eHhWt=`!;-7#J0gngcF+G2f0>wiD$sxg!@?sW|F<*wW@!TRgrtdf@4F z4Q}&XE}@va{}qT8=lh2>CrtBw3cd!iW%Z9u*ph#M37S;hs(oXV-7~-Fi1>FS$G@48 z!DZN*x6jl}R&+)2F}$@wQ9f`vD+_LvY?Y=#X@M(-*oKu^R#l!_fgYs{dq?nNS4_}d;i1uw1 zACOGrDhV+J-)n|r+n1q;mrH4GN(@!ME_S1xaC1Bj1yUiFmG30Gi=f&zG#<9;?y4sY z5g3uQ8ui)$=CaP^!(nZ-;(f6usflSwjl*w_u;tlSB(c5*Qfy(_&9lWabsmWivA=H1 zMhU-B7QZbFVT2vd(r1#w%T)EQZnm@TaqVHI_z^1TatP{x!m-nh7^S^!w=9rDzNrI! zPFYXF=ua#5b+V0T`HH@QlPeyWa&y+E{;V49MD7Cm3!0?UI?>IsrqzCYxH!6KzZ$)U zT&7pQLw3esS-iyUo}@UKJ;O&`iC4ayQ7OzV1_)7iyy zZ$WycvyX7EZawUAARII}yP6+Cf6jYl1-d6bElWS*Uz7o?{~qhL2A%E+_zZs)Xa$R zLRuMM3F>;I(nN$m@0IANbk7Stbft2XRuilg&&!h$5-#jV|{jykD}kk_6GO%5^E@V zZLQZ;4?B9rJ$pkG7m9iB_4VtI8a!U{BI21D9>&rb5SE%3v*Y%JaQK zlSqM9bP%!<*T~a#;1CpFUAf5?179Dgaj7bZhqx`~aw2$dVB+0nf=k-%hCBa~I(dh% zQF%Cl{mb#SBUW~YoK$}k^L@XW^n%*8o~MnvyvZ3t{<$^eBaXysLx?n8I0XRim;qjPC6m{(P-`{({jeJdY*I88$4lq7OKMKw@xKEf>BpF=`!WLx+SH*-y zo)p|V>umcx)?&&&by(=?C|=^(Wo4(M2~+H_Za9w3|?P`)aet{Kv>WpU)^dl2YN8X&|_7RehSnW#hsQf6#WG zmi^nu!l@ZdvZbRUwtcmo5!0oGhP0%Z0zw)!(3`-pk8Qd z;wkI50G%ctvg7%q(^H{lyCDPwhOn{A%rQ7ZW0`dJ@^3QVv+^j{&&|9Ib-bcis&jjV zzrT&mS8X53@k@7+c$vsXI#^qfC>cLaH(j@vfOS$G#zSrd8!frTmZCvP!YKy6&c8;; ztsxo#xr9Uo|BZ--hd*z7b%A<3$+ON}W$!0UL((;<=Rv+L?!^0y5)wIQjnp$m{nCq+ zYF8v#!1XEs3e$Q3bl|Q;JU5}IJ&Ev!FvD&nzOxkD-EvFD4U@QZOPz46P)#o@4opJs zdplZ>&S{Ufa1j`6;QpqeOvI-_M=+8P5(vv56>~?V>`nDJURAAx{F3e{-Y_csTZAt{ z3QSQ`NBfl+x)Rr)4`X9xq7q!!L3xxf$X)p|omJH-C}`xO=yX7GanT@p_auWb;zqbjem&hVbD)#LE)N=&Tz!2 zHB=*s5-6yE-y8_P@%AUl5K9OW~Kb{ zOkq2Im;=P{HMq&}9lu6w`fJUah(-p;&D`IVRy$WBq*HiRl-$Ke;rLOV|1;}<{rRF% zkwM?5@wmq$Z6kSH_;AcVwiV(gPY5T~+m?*SnS#6~VU~8C2}hJLmFNP#m{fjn+NaGtJh!?7yFv#ARW0)`3cql+2(isABrdDuTLoFe8WKiKWE3yri@0g~x(b)O1b8f|%dG~F7;+f_hs{kd~pFk_ASYZxP5F~$`t!Ykn$x|U0 z74Ez!AQNpq(}4&(EAYa6$Clv+|6s7z5&bP24)Dl66W8>_lDTBdKmv^>V?OfQ?PESx zIW2n?Wyx#!I^1GG`wW{5h#u@1hs%p?5SlXURqoo*n9K({d^m~LC#-gH`~BXA&gX|d znx~gpoh1ERK9lfriq=FNB9E@0DOBmje3`P3)h|kT2PPPWmfroJ6!Vfo))vWQbu0V zux@JjqGU^Ibufy1B`(MdS6`I=I+HeAQ`0Q3dyXiMc;NHK2wb0(E#{mIAn!WiEq^Q% zE5BuB5**3v?c@!)`^3_mNy3f4)#oXLmwSZQNTaOe^&%ww zPEC@M7J_ufB{5*-PJoGkvM|$948C7CWe=rqN8Ri51Rn~iqCR(G8i$6w1#Y`}*m)5d zin4wf@Go&&RC&;Zda^QlymZLbxnPz9^+iYny{hun2lE!6@o^8r{ zV}>1u@w^TCI@LV)u*Ci~&8x-!4=$Z>KhiF(7a;iqAh`qebie7&NPSKmjEiOfw$DKF zJrKT40_Yl^P7Q2sj8cJo1wY5-o-HFcuYo|O_in^qdu6FbOK55HD|czczD9NXA${=v zvG2lQFT;Q9B2L#w#6X+*73eKFBHYPwv0U2=>2c6EHuQ zzMJDKE{+)zirQ2sn2El?8?12KNA6DSn4N)RxY_*fD@vW%``ReODYExa->S(Us>)t3 z?`v(CHZa7UUsuJr_F2+j#}2Lk7W<2jl>Qty_taOG^!B~AVcoZUJ(i$mtSi= zsrsqllOj44k{_bu@a@cbi@MBSLPC>CQduJ6mdt{uUJjhr`zqHm7$^AXpm+;Jk=D4S!IEEmtq5ISAzrbia??!e_HYk{ zZw{z9O~&Gy2^CgmfBJiWfPT4Ftq%SoVF~p7Gl6(qoe(1Fz5v$}$&!q04asieU?zGt z%aiQ`BkI~vrXKPV`c za;#Wu_=GtLl6gdew}h{Vr<7Dn{BWz4j+-qLJ6*CwC6RnP4|HxkFV?0E21EKZJqb=% z9!h%4$LXaq)**KFcvCn`x#?j)hp$_D+q0rTFTka)>sj*-Q1<~MD_F_DR&m1D=TrW% zgooDMN$eoi%VK-ULC$ngOnwNa($9qG7@OEb=TsKcRXG#f5KEafE>*~|EUF=1 z9ztbkEiwM1x>e`4(0N$nxD>)y%aEf=)k!yn?5ItHMb(L~r6Z{=oOW?;=e(syWzJ^* zV4KZ~9j=w+nD4K7gKV&;fN4ak+d}i*j{EvXy#)KS+m0NDwRp#M-7D=|N2c{|5#5aG zNL0%Y_{#e>K`gJ_T_`88_gsynHk+w(sT{T4t}L2+D_B&7Hxkc7JmkZbd;3ldFxKb?O5a=J`UeDt0=jBku- z%zE~2>t6S)^wxd+xb!}^)NP6?gf>LUw6xyWsZtHvKRucfBF4C_maDr`*$A#d8(@&K znq%1KR;w%uH`=l)k@#?1<-}8kR-qCQ?wZ1iP+vG=7lxjV{Hh_Tjd$yUTFDj5@R#L{ zdx@<$)178YzlvXQa7}v1d|TqEHbHOl=<&W>q^Vxr(z3N0oj^i^t)c*dQiP2^wF~On zTgBcp*NR&(+f!_G{VU~f<97oayRpNm#RRynhElg5Nz``^|KVp*W`#|I#J5ndJn!d5sPAdpvwSM>1~@PTVO_7rn+KCi zwY5@CqwIeCZ&w5c<)XiJ+38-UqWOvS-nRVLyGH^E8q( zndp||3#%=J$rJ}Me}SEzGovDgX!pdXpZ5669UrHr)Z@zLvhxjA2PWs=${V*kOpc9a zi$WbkO5#oCYMB*L66_Q?2y(%j`gW{o<5MtCDg`M+JyH+;`g_~XoZZNO^$qs1Sts_i z_0`zS-lB&jr;gC`!?}eWN!+@5`>MO#JN19_-3fA~l2k5{ghngY8NUCGG&VMUxZai+ zSKXSx@9EWd#QgVL33^u;onLup z3Z=1*!j!`ayL5KvIUdWwaUp{ISa_meT32Y|%Gds$#$d4vg8y;mJwTmf;S&P&>+cxP z+&CQUO!r))^h$(xJO}OOcFdPkYh(Uk5?;ClLHcmO~IKM+4BeqMkQ0ryM8<7zIzn_}hUP1jsIbDS&WMy+@UkAn`cUOj(#=?# zRv@`GOz~4|O0E73+N=)qhQBbDUd<_^biy^G`gC^ey&R1V9}+PWe*I*yu0~n2}SLIU?it)SH z6NtCQRbZzqYbB1A1%5#?CgHtJZ#oNeD=9jQ;Cug6(n%YgWj|t`1ZS2bjj55IXQJUTic!bc%!gfoZEi4X}v^>MT-C#I+q${pa9>6iAtbJ1cZ>AT%Ac*S?Jha1gh%1C%T;fWcED zK%hA;*l8ujtOapc0Z@UucAM`6O+LYVTaE6M(~lp&YA0Pn4AvqAR6@TcA&lOM-@2li z$NMXPkJw)syfDm{@au`4v1OD*ZR}>xPF47RG5|vwq}_N~XqtqVq&7|z@=M4wLbo5P zkZpPXvLmfnyZapf8w|>aA3@xE1rKRLMha})(Zvp9+&y$h6M)a2=(JD9{-%S&G2bMP zG4O0m(xYLL-DH_pfFBf_)(Eb?^yw(P-`c@A%7FURbd3YWkBZGQ3BF zKsOUdD;XO1JMyA2m?*yFg&%~Na3%LQHp=2^1bWiojr`-?b+24{9=|%e?P8e!2ElqA zk=1n7uT3JOU{y+Vk5`RY%v3YAeV-B`95IRgh3~Xwu~RA?Zy~3K+x{}Ogu$EjOY<5x zbP}Tp?3j%Gsh9eBkxRzvV}}~OwIQq&uN#B>K#~cb%Y3x`^#T1H-m`sE^P)hkI?3Y2 z(r8E>$0p1t%&rgb!of60HZwdu;TrO5F6RI@oz2kK86jV_wiYBW!2!IY#D!yyFwwRO z=aT%OyT+1zg_mG8mq8v;S|jyK>nQY^(&A=|D9b5E+lIVe{19!sEggowvFCxts|1vn zMs1zyiRllD<);kqnf2h%RCgylg+JAdHG#$I4l+-wRpNr=W5dq5YXsBo3~DcpZb@ph(1vl8P-#hF99Urk3Z#J{S1?kpzuwn=-g!ByVg zH)_d#x3@l!m*A9=KL4C86FwM8ts!C?Dpmmp-O+p9FjeztD} z>oWfi7P#TUQ{I0Biue$me$@p%fTRpx06^=%%q182nV0hXnYu8&M<+B}{h!*TJVl2B zhdjW(={sydC;(!>*#w*Wb5|C1X5Jz?PO2zih)fV zZxVwGMe)%+gm`)V8Y!nmcW`g)J}E*%sI_61`?~Xek92J##4~z?I4s$0Y#dp~{=sdB zl1(R+arv(ZpR>rlGj~|}SOy)@aPIlSx!?xD0Z!CSf}AZ?2TG4FjqZ7(gaolvMn)Kz z8XcC`70<#Q(+iTUO$(0gQ~gya$2aNKw8Ke8!}jy*^#Y=CXRjYRhL9Vq^?~

MyXSLTaW5aaxhhN(NmhL;^65+>Z>m&_pNyj(c0dC2|Sq#BVN} z!wBjGd2+LT?KSJ3#7m6w*YA{x#3T-#QlY^Xd_%y>ap&AOf%{v6*UWpSuw$SVwZ@qv z-@VwbD)ujFZ31j@tIjVsv&4dXLboKvp`JR*t2wuMm@{GI1uR^b1Y6KMc#nhk28# z%Q@A(W1QSc+}#Vv-WhN%q~0+0!$cFJ=2+F+CG_hh3xq`McGWnwK-`lb>Vz$1J|5^` zf#FNWA_%b|o!?{%*?j(Wt}^;vIsJIxVr)y}NV?rnz(w~drz*V4JLcBiQabMp>96^U zTYd*;FZjcu$gbfl3X^(;-k@LMvE4a7T~m)$ZhITbLjBl0#(aE@O30e;%NmcjYEIBW z>%6-E(}N6_r?%M8Vc=8Q_Xw?QI#$E4PpgXJP6xN_*YnB>GRF_e;$tOddA2KOMwilq zy2jqs*U$g#))!%xbX7t^FlK(9Iha-IQfP8U=No*|(GAS3(xPi{Dsp&N2V+B~HYqns zWo*f?;OlIQ9x64wwx$vzsew5HDbd)M7%66-vr2s8I>ofaz&mNUDAg9}vn+)=zl?*p zph3p4(y^|w6KsE5!>ie<9lr+pYG3YHMq$+;cL6_BPAUJ!FM^AAx7#pEhBu(!AE2n$ z?w;(!?n35gfMi_s3DDgGfDB-G;=g@rA3*s*1j{XLYrEmy-Ph(#vd*E)@ouh(37sutUDKZofA<z%pjhk_lM1rH&mzKoBBO z$UFCSk)2ErVE*yVg60~Oj2HVC`cJW$lM1?}=FiFswp2E`iaN6W_0&v9F3hFCpST7~ z(I0T=EKu&?@k zuB&(6gNLS8ge0MrIu$F8xS!LSg8(+;hcclf4aQ%#oGDuDqZu2F=)o1$s{=OehV5A( zO{uil00XCMundax_U9cv%0)!4ix%sCXky+Fe~mh=jNu_ri@^xEA8pwBmTeb`P(%C` zfoS+aoIk+Z$`>&(%1NVx2mLoGdg>vhc9R{fK;izXqu_8_Wk*l_9BcVB&JyNHOxxfzJc#hQtFz*4dQd8*|$Qco@R+zvl#}c7+*V@IWE$j z-?s?e1tb#FS+-~nm003q+628n0L#dC$oJ3w8P{^4TSy1yE|^y`M>*kx{xHp-!R`Vk z-a53TNV0ip{FW2*qXZO>b-z?$*~K8p@X+;qN}sX6_|u1>AaOt;nExqX*LZu;4fe=n z^)(GCA>MS{4S&+e+`9bG9d=y!TdvKk%j$XH1M7As}mb&x^(FzBe_|2*AZ^7 z%I-u2r?ZDJb;B1)|{282G5Q-jH*cyo46r`u!!O50=7Qj2KemxU&s{w^wt_!~nBl;5K z%rZIL|DhWHRL#`$z7G6(F-FP)H`#vh@khc>rtz9U(JXS}0V9B{8b2E9f>NY^c2Qa+ zmLw**k9MA=#3xr45$QgXr`Ex!A(C?N;6B_Xfvk+WsQgSCQ>9LqZMK;~gVB)l$)z~n zkE`6d##Xeoe>g5N<_IQ6zB=!0DY}plqS@uW{}Ke$0*^|7b&#e;gQ&dacV=vs?=JjR1S*G+MI-1KigC{z*`F8X2rY_eQy zw5A&1Rk50y?S8e#3+$UU&^07e+M!44im}+-V>sFwmyhVn41Ci@!+Jn;Ejd6{!5pdr zi$J@>te2)V`O>T&lRWLu?8^0)La?I{O?m`!=@=1F{8Dhf^bi-{^5CpZ{W~v#>B|WC zeJ9z&=pq?CKVE!}wXNGNR18u2kUM^suZe0QQ^y+k^DuuoM5zy9a{II(?Y45B#ikL_ zNOO~HU=iucT4wjCS>=X_LXkaV(4Vj2FvpiHeTBw;-Ok_m(+khRW6hwC8@f|mQbzB( zj$AUE7yJfTO-)`0(2y)Zgc^5y=&9beT!?D+DCT2ZSC<8sr%)XzUBUV zR89e@`W85Gl#S3;oj@zZc*n)zs?&WztzubfIcX*sT5zUbOPh_$=a!P}r@|s#HZ{sl z&3XFh7oBu^;@$lf_1MIpWrhwvPCm@Lfa08)+o0B(qX}Ft0I&Y{Bp>sD^j;J2c`E-H zdIKPV%gyKP&mS+ZKp(mZ-ur7HPs--E&t|F@p!i|)HeEd9@Iyq;I~N}YUY{*$9MMv7D;DA~P}^pvK{AXjv|zVpNq^P@kD zwDyDkZ1tof-o)5(n$Xise?mf^C?JtgML?KNWxMwo0$a!#nS<0ivt6kQTs54lGWWe% z9w1!RI7IZf8#p$6_v3wcKZ%6Aibc3o<4 zo5azV_%!H%^W*sC>qIWGU+)(;oTNIF-imnPD)9dR;Xoe0NwH?9Wke15@U7*~4~(bF zh_ahpHxq0>psV+ejNl9+S=I`d1B9`rURbWkevz`4$HrxR;`j;~Jgb}RZVnutYZBuZ zoOO6qz~DgaAC(3JV0B}h<9dC|0Qc#3TUe;?<~UN4OG)KztQX@c+oZW+7DJT=*?e!b z5ObIZiw0RQuRl8h=oiUzgail6@xv{Qy&^|q=7+C{%jVMl@c;jR>i^y9|1YyFs5X%$ z4JwoIQC(pzscRXMA?MZJnr+RXu0T<2dDCaF3!W zgGt*WJ*sTw#?%h4X*EH)kOVHFITIHsg`lHLi_tcWM7#VDx?6e38pi-$wkzI74<`D9 zgHUe`qNQhm3)ldr2&*SxK5Boct1bIa_L^MK`X~1%)e`rfRT`{^TcUr2S<<)ZO?&WX zK{o6))WQN}83O_;rZqbQ8f$2t!pCHH+^X0m&mpbD2lcx)PP%v#(I4mC-kYXRYQ;It zrBo?2HY1B1%6mcU)5(Tb9AyTGN6UE9>7EQ|UIgM~FM+!x-<`;upUBp-weanvQkYEO zc{eii+L*KSSudpPt1eR~*y-9{XV;btd8uMFKX~jd#8{XZ0@gAKkXf4{c@+N^PBgko zIknEPF~5a-uI}O0UCg4N@=KG?1p>l#;k`OS#djVTsZ3p-&~Os9&dv)bsMdJ+zyfe1SLW@<_>ptNDsv-wUa;~&?h7xp* zQ1)P!OfYqlJJD7t{zzYefj3Q(L3&hYUh=fnJjfA|;jiB4Paa4v7@w3!h#1UrYgd5Q zqm`MaXc7f`xH?d=YY*@X7=Hm2{BmiM?odDokh+Z@df4~>aQ^Q<@Bex0|NRv#__ZaH zZ9v2#?FCd1?Umu?Bhtnmq64mKLcjacu7a1 zmeX4fpq=!yo>q|wjs8bVncAb;9icGXE&FVL#RKA0$hM%h)qp+csPTuQ3?ifi%z#Ls zmx~SQq#O>I8#A5YFk`VemjfJRWSDN|(r58J57uTVf$VmNY0GP(a%Y&9&sl=Q)8_Z< zZtk(H^bRE@33wm5HthN;dWGV?u_8)kWO@QM-HR1tu*VXMN`|l^g-7MdiWuz@G6xh{ zr|*7itd0NF#8;MsU38RS{fozf6WRb|V}i&j{L)8GLq#kR^vb^c*cGVSD(&N(F`*k%P7t zR$q7gQ0FD`CbRmuLEFk6Ti=8;fry#nA4j(0C%{G@cmFJQFE7YM=?L!@ZknV8$dX#5 zQNKT_j@hBCIu|ty{bB9RyZ}(0N~a&!#lec%4fjgh9mo4wZO)aU#*%?EFs`gJ%z5|4 zjf+xIriU7J?`duDT*Mq^y(laPmGL&XY9PhA%%T(>e(^rv>1g{8zyJNm_5Z|f^S^(E z@7v5dQWMFm-Oce|geymMukHx96^fFjF`J_42@BFfw%U5u;EKz@zPbyFPhhHz(UxJv8)weZ6R8!7D>+ zg>o9b7+H?k90w)&VFAx3cN>Km@VBT|%cn zYu*vHHXq&rbpB%SFy-|<{if_}Nk}1CDGvDPbR27QFuD90lbFpJ8>sT?D>j_R&kJ-^ z!QzvWX_>E=`M*0)kaL2R?T8L=2#Sea`B5zBcl2pnP3wCF4_1?!T&Y#0o^Bmqongot z_VbvH2fib9LsTW&_dWB45h(&eIZtyx{Qm4OO!cI&iminErHAFG)d;p|(+7q=Ck0PQ ze8`0Gd(^`UJhDneIFe=*+BD?Z7NaI=U{~5M3~3VJ6p`=n245Jl(8-0Iku`E_Qh~PQ zH=oZHx~pfMDbZ;#erz>0tMi$TOdFwUG`*eCR=S)IoamDmzE=1^kHdLk{@9m4od5Zc z@BhNL`u{7;W=72G9*-b3ay7-Sqvi&jy^}VZ^k}&eCpYp?M#+GTk8k|Uno@^6)AGGawuX(MgN;FH_}SDI#9@yFn(KH+w?EJcxp|K^A|3KDT0~xSQnft$9QKng!~IZn!p&A9@x=Z4CW`cDw0OG&{LHB+`9_~+61*y0 zy|Yq{K08zWd&ZNc#}eiyc~gf@q*EDdyGyJg-J;T5Z0!H2mp*Ngzlt zmI$3kz19_{8~GX9jYQfKe)9!=_#gJpGZ@ad`{T+IJw&e|dhb1mL~px4JvuAe>MQ!P z644`sY?SCNN_2uqq9hSS^hFS&OGHm0%JcU(^Lt*$jAiDzKd<)9&V6RiT=(~TpX*%L zNo|Al^yjSOjRv*3efuPOXQ6gjsM?l5;7v^L^2SH0RAB!P*#A34|0j9$Qxw?$JGWpW z|9`Rn_uu}1K>snr@%|su^%2uqM*?sL0QwK;|0%xzrH+1z0s4P#!9@N~r2oME-o+GXT(k zK>ts1|5xVd{SVy#dw#)0{!gO+K>r_xIM#pMX7z%zjs)Ng0Q4Ww|5Kd*d^G<>2AKbI ze!)ckPgMUG2lO8!9P2+5?bwa8js)Ng0Q4Ww|I_UMK6?KH_y3+>Fp>Wg)&GI}|1iU` z{_p8xzdGwk0M39@zW?Lo=YN6qj}f%QgYP?YlIb-Lb=W-pG%=f&d?&3Sd9b=^={K^6 zEC{LDozf@4~*tS6mfup+xG&=kXH@c3xMk_UdG~Et2NnMm=_27uRWF^w ze~ksT?Ljz;rwXYdfIev`_1R1484F6CoLHC)?u+qh|91IMpp|vYUR@hx`e8U==1VPFPa@v9U-+=!=&HGqr3306_l%{XfO}kN8pjM;6fka|&y(Ep1Wj`bhz!rgz?kpP?lr+ohJ zZ|8sfd;gat(Eo=K+R4fSv?5KHj6ZkZ`eJ4F=9VvxyHhc1cfNNR@_rC*LRIgcW03Fx z{T_ni{(vft;YcEZhD0{=ZQ!m7P;^4GHEK}Rq9~$c0d=}tXVtwk+mFo2#U+}YtZvMBQy_zQ7 z!gfBlJ90GXJoYXS7VNk0%pG4zG^2wtNcZ|Z+Thk@4qp@=Q2On!#uUPovv5-<-qU^K zfotq@C*4v%1o!{}pPmFgSK&2&6e_(&lj!e*Fy)>@{==ZJ__$)u*G}Wj+kb+{Z5(C&=TZ#?@}YtXRQ7eW}J5-`Do4(x_C@ z=YVIEB_F9(xYTQjj4pckQYFs^5>(}Fb?vefBC@z;(?v%ji#*3`< z?+n=iDa!E=+24xPVG0_YUFtgmGj|=_y z3HokU4;^++x&P;H>;M0*e_;M6W(fDPyp3+IU~h`|%_YSr(pU4xTB7vavgP?|cad=6 zLA@VVf$_Mgo$dQc&-tEQr~E7MUt{CK`|p0iQu7`JznIwrb-d&A&9Z0_6lSTFU|C)ebyWN|EKa6)Tt?$f7vDu6^|#Y;D#C_A~isx0n? z1p6nnLZ;S93BxMaxwr+1UTj!Z@5kPB`LIUHWXG7MEIGa%iK^vuX+g5X1thEFRqyw7 zahs+$dTX)tLDUBwmi(Em5OCzJ| zj#mRT;~Z&eI`fa>4MZk~Gi$T%J&n#OPoHwA>9hY51>+M{Pns#Jh~DirpuW5DRO^SEzN($L74ZEJeE*-G|C2fD|C9pyf6p(N$p2q_|Nqz|Ly+)_Wv-#+ytn_8t0+Senw$I?yH@KcKV3Sq@LGR zBH)==8Lu>*I-Ma^s@Cf%CW?ZjB{MARpXtLg^&M(5R*0VkN|`531nLqORrO=J7Vi}$ zyOfhi_sF@a&GS@4PjKYo^aV=l$ap5SdU#W@L`ykx^R?(Hn^$t>VwS@rNVOK>(r3Ow_?tu%)-BNHAC?1!e z#>?RL+AfHy2t4!S-C;~_?zrdXHdcC${eE7Q#-<8rO%%tkxUcu#-VkIF2i#jeN1_xpFu3T8cUU)Naj~ATr#cyy6|GNR1|^PC8`n)v zRd>`r49w<7J=(F1n&VKjUm)ih?&T*du6l7vbdlWNdVrb^c8ztPXZ+=RBa7+0(x1Qk zEN|jE_24;E=knv3dV1ZyyktQZ`BIE6cMA@EvvjE0=WdKg&(rb}`nxqELk4FkOjLG8 zt{+k;e|fsK(SUfDkBQcc_0O6=gefG#M1*MYJw#`rOq&KCw#WFmYv9ULDV6w{&bA)y zEv{uY$l+3ebOdY4ehqi3AR{{#L17{Q4JIw(CLl*pbp^M{b#rsbN=hJLQb?fA^< zqB4+HJ@+(urEUh6M<-cor|I{`&8!kiQV%PxxGIWp<-Hp))DIjj5wBOqxp(~zLN4jn zhdvMIvdOQJt(3350jAVX!$I*J23dL#U3oUE)+t5f<{KjO!g@Q`!zA0`ww_3ek@?+; zI`tdO@e=K`8bm@W(?KG%e0^b*wRE^_h$u0(AnxGM!q74}fz!Z&v*nk~xhd|ebnOLI zC^S>u`uz0-q_DmP%jkTucUOGS?8=vsJ9~kt!=EgAv<(J-nL2`NATP?E;^$34_G$z) z#z9>#);d<(@HE1zBL*K0%FM^F-SmKwk}AzMYnla`l!sw|mY<%2WxFQ(1R|*h)Ve5iszSTC66KX}WMXv0b+GM(g9=zS&D)BRcX{ScRu zRpZaqu~Oj*5yjK0$YO=Z2ijp*$J{B6U5Iy z9 zrrAI{H!G~lpn5z^3%J}C2%H(0!GQk<{QoJ|f25B5A8`No`2`dCKY{-T&i`VHWB=cM zNfD_3|1SdQKcN4oSpSheI{zgO=>NF|6Zt=p{sZ+t%y6v#uONtpvyKGd3^?WUKmWgk zPg4H_)<0%&qnk~Ueu)gwic>pm;(fYBp$naGb1i^G(#(+HbNOVq_H|_?&^I)BgF+@L zYF}v@PU2_=Ukl94Vvxw7;;glgBii|>01tLZR?v6caitEsp=IkMRM&K&>qQ<`;i1^q z59Rb8X}49no3JX9u)FVVYGcE2K~(vV_*GF`ggyP~|tX zNL_6&Z`^thdTH8X+1~VgAwZlJv%xMQ|`-0RC+IuaN#*`~@S_ z?`J|wTzF9xg~=5+)SSHdARYuUkbkD?RS>mQabt_9)$HLwtBwt`VJF%|ctv-x#~u|T zg^Wb~#)^`+S+$pi{xw7NUfe$Kr3-h7ID3gZe{i>xhBEffM0%;w;qd7?DsznBI#euc z|E9B*)kFO13XTiBCASbm|9hREOcrH;TS_-9sE%9jlKW274peuirR_1hqhkg=UexM- zd+!cRO|R>F!4IMAPj&uR_z9Viy$B(ToDYYgfpWz>Fa`9+54QTU%qT)d;(HnCNE>cm z^!@&g(83Fh2%#U@KF#saYG!?83L+qo*Nm+}nKCktutu=+XCsBU-Er*0%es=v< zL$(0^AMpRDSpShds{a7}zvmZB(akOVTP-Ikm3YAv|C{)*SJUPnSmehcB8cP zFi$(Xhn2ctZ!J~cxPEGtU0&+WBVNBor$olRxsRLG1zm4=aq;b1>N?O>O~T-j=6nZ~ zCg`!^#rOf6OxOc5v>;NSefYxC>$39KUL7vWOVxUnV#$bdUk;y*Rb8W?-^*Iz*Cbvy z;JSIKEUpxL+Y9gvz}<@Wm#Fi$K}F&9JJfE)7x z_>)FO9L?#6@k+ncJg7*R*QCEK&J=xMRj6Fhuw3IDeqVg^a?xm+uoDH-4>)o&Ncg)L z`VZG-Tb_P85U#1R$@Z*qL7y_&;%{efgGquSycw*{H~Ta*427_lsP0t_IaAtH*X`}# zpAf=6YG*oD!{QR2^q;%6aE&CJS8MlJyaV}%nhAtcEDq#TaB;wCIm4ef%Wsn| z=xG|pzSkMa%^p?m@fN4GunDiIo6ZqwBoQ&Z8(SqvA{&AznJUA(Jd*byk4y=ESFqON`7i7rH5(x3lk#pU+p`9(j2KIn0V#y9Bc2vl{`dyhkYcyGk zZ;fAUF}l7w2X-)+PJE>N&Xb20Bw?S^95$cQflbdw=4e7HgiT?=p>#EB8DjSHiLOkc z;@oy4)|P66A(jP;K>}5UVDUdH`V>*~@@m<>6j#mhw$|RE|I9r7MShz~gE_;Oqs5jc z7TGW*^9N=KcBkln$=>fe39F=G4;TW&O+usz1Sj}*tlZiK0*U{_-gyN@xomq}a?S$? zNDx7Cm|@5gBuLI-$U$<>LBdE@!T=IQV8}@ZkSw4G2uKEJ$P$z!0wP%?d9HU?-CMg( zori60aaW!BJ@s36ef_P~|Fu?iFQ7uPgAeE+FY38@5f?*?>9`RWRk*M?2@O0iR?Sb- zkjv6QAR!~t=J`px!!(@^C!TyZ^kdBdjefJRI>$@ug*dsaIsbcx3 z3d2}X*o+F+u2YzhJqMFGAGIS@oc!?e#Z-d95;FZ7+rpg`^Wvp}odniRTzB^sE@K-C z1s)fuWGdS<0X@&m=ma%{oLqRo%-{!b)gy?fu~LWR1TF{#gz&(_5`@|Cpkb{z5K z$E^Vy-OKDEqc`erD<*5mL{0U|8!RpfE^Tk5qxx`k5%OLkR^G+>QL{TocDWAggo&>Y zsZL7k?iK)1uI<-(xA(7l=ps4imCD^mphw=TH?M^HQL~A+uHiTp3ovedkpgp{8Eaz| zJoR)(-g#4r7hHS3iXqZ;wep*06=!?JSlhI%X(hs>*v&O=?9}hI-dp)b-rN}fq_#;4 zQZV!5J$g7st+X*AlQLnz^bLk$m@HW{gvWYRwkuF>lIqUHDy3&=qihOo(!yTraa}bj zuHpj%?v^h$5x0ed?er6~yd@eUuCisjT*ClbdQ^7E&O@ojJFP^X!D@2!<)M3qX@V*& z6(7O-K3o*2>0szn6J?I?SL3%!a>NN@pAtnj5_!KuX7iH`TPu|GG_&01<28CgGxDqZ z|NZ#Ke#-vQ@&Ba&?J#P4Pd=}}82Uln=825p^0$24P2Nt#;n#w38;ttA5PUHAdku!P zMEYrI3ZZ~zicJ!*xLAkjHVx9SIYZr^If%zMlff#DX+Uy}r{l}H5xdPW6_uKZ0oYD` z4xCKQ1UD2@Zawigf5f?%M-9*lsX76A7fb=K+wVC8v~vWt2YaI2+xypfK#P_bvL)Bs zTiQxF^I-a!w>x8wVlhfNu$Pon6^A0p$K%XsGV0_g4*^*tSh&}|$+`%od?~X$;7BE3 zSlheCL!4wsUn2?U;%MvaktoiBlMn?VPWm7+?|2t=(nO__03~rR^Z1Lp%zcG#dWffr zSd!9c^qpt{9JDCB@+Wf$SX;(S4l7y;us@>U)ZUdR)~5| zi9fbGP_Wu$CV$;vm!9k;_dUsW{TgP58x2xhlPuoScag}PwL~tJSjZ?IYrl^ua-%1$ ze5br@_K2}d`2Z$x9~^FMk@J1Y5H*e~Wi(e&_S#IHFU^&j@(JWRzxT-=u&;3bQCFyZ z;sZNEf*2BQw_^X6iqFm}fm7Cd&6^w~>vA=KEOJntw zNCer&FOL8Js{Q|e{Et5WF9n~|e4K#ZrFlgDE=Yf4C9SO8ofKSNx+>Bks$&RB%c5PR zdevFKYwee7W%-#_XJVkEfqlJt(w`IckRvaNB?$2O7Nu9T zGhDCm@$rWGdt6J~;y~HBDbF3+$%$GWGcF5M`x2G+hHc2!HWQ77jtk5AA;n9O*;kG2 zCiQ(^06$n2gJT*>@u3{$p+!g5-Uzoc)m*XFm-~$aCcM~9*8j>mm8Y09)>dN$T zbM24CI&EC})8taB$unV((oGtt+VyO)1Y}VdOBvrG_j=`@^yC}aTc8NB5F~~BCDE8S z5NnlB$4Kal%*oBYf{K_%uwAoYptz<`;~d}Sq&y>F`qw1MXr<%ZIHMWtR^CRz1`U`; zus{xCTx{0buK^tcNJ@wf-m3D^st%y*9+ihBN&ctphhe4enN&FA^d(540@c_w4yqah ziQf6z5qH{A8#ucDhpzwqI{gRw(f>&t-T&*K%Vq5UPu2g>{XZ{B+xxE@O89bkatFnn&ptg97qAdVW)b4M$tFJs7R>o5@V@5((|i?tLRdQ|_;f=kNpH8U%#ob!MG5l!fuc2hH#ua zNnHn2EVf$-)rYX)o~^eM+p=ZO3|xCH4hvlvPgdKLTRfE1f^fpQ^$W?hUEm~`Hf&qa z&AHLNnIJ6x@NwC1*YH11Pf;j*cu&P0w%ts@&o4m+ik_Mv@P{Y7v_IWKMoJaN=EK0A zs;f1FJpHq>FVOlQt^a>v{#W#e{zKpY`R8&O`~Oq=AD#cZ9RAS%59`()e)mey{04q; z{r~6h|DfOhycBTm7@4|tVtGnYy&yfwt-vSvrWukMy=tt!70@2Gc2@Mjaf@3!k5aFF z7Q7$8n*L@7Hm`*mwz+q@n^VHh1nTZhUN9XfO4kmgLj{S^E?k{Rv%Uz%CewSxrKHu* z8>L=t_U*1nt(>J{nf|?DtxrJ%y#71dc`<>86@mEGPNeFi9gXw8gmwf~thEd|HBn&5 z>V?He4TsQHIk&wJZH5tZyi?>9>D0T<-4hE5OER+R{$91E^&dF~r-SXHc=I znQm1sW(~N4`(IgnTLX#Ic|CtxxqyP$dqoH_XNwvOmMaWF z1!Ds$Cfq*UP^8t^+SWU0&|3bCqvUj}$OA+QpLnM^M%T;rrn3p>VMg!85bmuf`$iF1SVV<`3ArhPAa=Jv)6(z9- z6h1V#ro8%k@&bxUrGQ&L>p8P}cFOtb-kb!qG)q&9!EpnG;-a4GRH*45Y{Gi{5Ih(^ zfmg#6bC-@VnC}BB?rtJZGex$s_9>IK5$a6RM3urLJ8Av#nDD8ry6%XyD0A zH{Gn8ex$uQHez}wUfg$21n<+v$-DYn_h~lC4XcOcgFcQ5EP~R)Rp}O(@qo4bNw4)j z?A&ha%<)*zGb)z&2os5Fu34#sn_-s@mKEuXtF8-u6&w($%d^vXAsnh;*CPF`{}0E=KacVKJN7M$J%*orS9bDLM$fM==c4!MpzJ35;ox!`%)<1^STK(SX+9ZX23Y^y>KR1vIRhT2bL z%~?qxtG)V~r^|a#me}=;U_w3_Ood1I8`u`>I$N7J^V_WZcuLR9+;BVIdKp}yDpW}y# zi;4>WXZ!#6^B?rX{)I2&1N@iU|F7}?e}n!*=YKB+AMz+nnC*kBK$!{!HR*^XCPlbG z4=!9w^2$J30?Q*#|A_JXoy}$@`m8v0w3tp|L|puLn4@lHhKYvGxZ9^HzjnJP70njI zxvRTDB#yf$>e)Iba{w765I)GN%&MfYakMm}ns;mIG$VIbu+*qtTIa?~Yyvw;Dvxvp zkEReyXHnhqjwT?YMAp5j=~9xvM-Hpor8uax7po$R=0d=nspF098#Aa%ZW+DiYbTtXjUFHyisi!0U9(f;L9aY#46~l#-VB-D!0dfHC zW_tmRMW`~159Ok}p|7bZp>yxGj(bQ3N!%$yg4L!>ozhbRSzYCH5{foSr_(Iv|cfj1t@qqZjFr10*J zSW)4g_^zzdx_b(1Ky%EDXdFJ+sOETv2#{Iaedhu$AY^% z1cEjY+?~cku;77);1DcWs1N*czQtRbuw1nd|Qs_vt&wVe!URtgsjJQ{2fnJ@6)NP`C(*pX=9E z<$k@!^xI;J!K9@o$`|eO`LDg&pp`;a8;3i7HEx=AdFx3u?Is5q|R5e*0m)+&5(+1TkCa z)J+QniY%O^&ejhZMWIS4+PoUFds7LHl;6?~fovTCQuw>#&)06|lE@c?w+8wxjO*&v zS{5H0x^AW1?j!|B-qLevxXqn8JF$@mz#iAfzKpjp(~6zEI0leNdd2`8AsHmedpQ zjJkFrfECB^j#QW4K1_C>+r`A>*y2^m@NFbh12>I%t5Gr8&kjI6GOGk?AzDIRA_OgY^HeAA1Cv+eG zKf!OQ|NQ@U{TC3q_y7M0`MN|zwLr0Q@0DjU3rpF&zPrrh^7Y&r%P-r{D0I}>$BLH+ zsgqC!oZ$IM^65%n+yXYOj44OAKp_?){=K(PV|TdyWE0PH<)!2Zg>rlW(R=_{&7%6x z>xOBS{^G;rA7#Ud*V0tAaNV3jp(qI?l$N5=XPj_iu(FlHYbT>FBmTM}4?7G*Pc<*= z`rPyk$?fdf`P@0-so+CyWxNw|l^%bOQ*7HYdS9Ol`xZj@mA`+02kBS?e zeww>aFiFL&Y+>7I# zrMxHCfM*F}fja}$?QD-v-#L!Ck1&C$jK$?@q(-k+e!Q^$LrQ@8;{n8NZU(t101R|@Cod^rul{fxo_UJkhN z=X5|uj?TaNJ^jDf-yivf?*0G&2mF`+ulaQn_Md)}d;SCb7X1tV@6PRCI{)+E>;F$+ z-DI-P-O#?@=&4oL(WRd+V*eIxB1$W9rq-Gj`f@HbH4riX!>l6-?fu(8%|BJ=LH z_wSE=z!RRD>Pcrx2B}U3x$bw7+X%PV;)^ZwO+oIi6+>Q`rpz5*Hy_kWGQZV^@tARD zCyz1v1(0n1S%u7`dC^B;a29K^gzLlS`oNbO6&oQIq-O0=g)AP_d=RMHmEC(*Jgwv; zKlq)Kn%|w3Xg!6OAKMqeF(9Jw#u|4 zKUj;kq_kt^E565YC2T93(*qn+7}$TJT_8>t%>2S12(o#*ho5YT8?UFyH6No@k$zA% zffPeQ-C`k-J$F-|PH3R$qtT$w*4|0<+DdxU^@C*a#RC!XKE1mF_*8I8`v_SnDZZ&W zRLPVskR?xP4Y5{Y61IDkBS!vu^FwedJ~!mxCj5*(GJB?;N2Wl69qs#S63ThE^u(?f ze{aBL-my55o2uJqyvy^tT4|z^t+Njv$diWO9-CY$Gkpx9YPo{=yeKiX^^P??Iw))= zBLtLyB6LL^?GiFY|dY9(LDw5}*9ZeUJ@ftFF$t7KEkwE?27|RfW?WwOY;N7vH`SVizW66B2qg6?vybW41^rG2X=@`n8HY=o^;pf6w1z46(8^VDtsDvg)Q37l7q|R-`~ITi+pEG z7sG*V)yKQuU9TLaTNHH7)m-)Ucp=F*j?Xcql~e1LFt?=dH`y0hQE@hi$8-ugQn-32 z63rARCK+gw7U;|LKY>9$IAcIl1~}v>_9P? zR*rpF=Thk;e(37BHlvg`mWyq}*8Kr}$sa>r+hFfO6d8|4Z$JISPJ|*}N@`m&_AmB- z{?Yj7pYebAd3lBY^8fDVKYxZ_9{>NT1N@?V{C}`k5K;wOsO{j zr`Nw-TS!2ked{a^SEPnKkV6BzcHxvinLBwu>FjtR_Vqj-Oxx(U?d`RSW61$HU=o}h z<-hK!X0~I(jIs$WiCh?_opaoXw5s93BUF5-g%u<9ye>Y1*I-H`+(h2iz@c*?XjTkG z4>=y4L2unOFDzc5tiFTr9RUL-M zp%u2_s8XsiXos=UZYi`WsWsDH+tFkydM1I?JvAzJFs+iu;prnerK5ur`X5<2v(qpw zy3It0F%(I~K-67p0)SF)X<#$$Qevid^6bY9W^E#A;Vio63RqZ|{Zn2f`ZM$8KQ(Jl zf1K=9zF0`K=!ELtbuK9B`<$gJKT?|uT+8{9MI{AvO_kX;uZqupJLCpAW}B%Q5IUOS zu54_unM0HW(sPhUn-~ie)y$TUkZ!!ULKS7OQiK&on9TP;VA&{f$d z?y7IkRPn<$+&x0Rn=w`vNp0FIzfxrxGP zYFJyZ?52Mm7+Q_hrPIN5h7tq)knr@_c#YESQzM1HKGpFJcS-)P6Kfp*g2F45G@6Ym zgu|`~6n^^Yi_+ z$L{O@|6O|B*c2e_#GyU<)ONJ>HiBAe#J`q`C!@NCjxTWOur9LMV8=r;k~(0l|O8+sQi z6OR`y)(tg?E_>%TL7Dxs3k9>A#yt0<)C=lqD6jlR<3~o`DLghLjMM9jFuAq?dcMkG zS)b4|JH2so@@U`y@7;S3h4wJlVsNz1(~$QDA@%g|@v{|?HIhihI#95WDB8lwXb@YVSu1kEvV3FY$5 zwv0_pEJHWkKk#%vloHEEKk*=x0^x1{BoQZYZHJ*LISwV!4AJ3liHTct@p?zSWN zz5Tz)UwiCd(Z9g`{O6AV(7D;6l1CfnVB_%Ic45WuwmxYx-%F#Ob(S&@+?tru!>-Px z!1)yL`e8w3RMw=w;Bkfls<}6GItkHjG&1!8JBbjR$K%-E4o;3km0vz%aBR0L(!Y;F zrWP9c#9x?YKLHfqH|;;$RqMHhR8yt3Px>ZKrM_7r)G59+eW-)GTt!^@wdc1%0O7*xKODWI5MPMJg;W8|B^Y^%MOx0(#a< zs&u6^;(T_f81pyw&hjhD?%m^*bSNDTjigA!&`770fHa6QGy)3J-5oOY2nbRFQiGsF z!-#|eg8~9lLk%Sih{W$akLT4nFV0$?1^Qw>|G>T1y6(NN@3r?8Gb9DbiBk4NFFQK? z9I4j>-pxC;s6I8{J#smXK;e4nz<#qqz4*b< zx*fV(dLsw1r5`dkKJ$)KPe;dLLsz2g$a`E2uYeDqwf4NfLzmp@SaE1_1NX)qTQQ2M zEd1RpARwms`Z_zzRLj3LZy1qgMQc@b|C!T+Egb+20mK3qG&KY35SOW|_t#!o!3M5W z-4SFdq6np&I^5m`U9lR8D#fu>caG-qt+^lFj2Eh}Uw2G0T#qbNN@v2Zk{*1V05*<5 zDpJ3bklg6BFcj&iQ-UWtv8?o~PK%hMGQ!?YRa}i=IJsGK1oqc&ci6wdzhj{s&&S_4 z$cLg5?Qa3vlAP&RhU)r3Qe1~sC#R1K%S;r@w&PmxZE*HFTqRO!b;V$rca^x*Z|Gx- zmAc-D0&v>Y4dNHt?JupxNp7yG(#`Mfi

Q$py>C#2d2w(L9Xx=-p(9+6I0qAku!vuZ=6F zbN#xdqg-Am{E7*0tG#@rLVHj0(2VDYxtCgA<1B4XW3>}E={FoWJdRaaS9Q+9c-wWb zw5pV7q+PCCEqFwg5HRv{@OpBWtW%uamKZ+ZVXH3ghPagIndFt19 zA2F`7_7L!ZxUCy4WC#aw?NvSdao2Tj!_d{{<%6R}mJJ3SAv~b~i*Wgi-T(iNfBHTD zk9q&O6bjDrhNr)i>YYQY_UW-GpP1U=gj`SJIB(T9`6=)nz;6|6Z1}QOLe_I7 z>qJM?#|IiuDL))Eb&^I=aJU zM4kCaJ(SaoLDl?A#h?I5h?lQYw|dj0f+k?dY7EP`RBtr+7)1-FC`n89S?6Z*X?SB@ z*M91$mQ>udn4`(@TD()ialsK5=+6>ILR_xSZq`mbIbi9?t3XS^O-}_#jiOT6e2#22 z?pxy)cp*`lRiK&M=@W-asTmx2(MvyT)itOC9eU>s*7LA`YM4G}pnp@(7ZNDT9S%2) zHepH9G!5%WGx;=T)*5M0v?I(tF5q8q;88ME5qR5$W_!q?tfgyzTC!FDz{b5J8(C1> z{<c;nKQCrv(%%(4 z7=vX=2?FzjH`=hi?|D{E&w4{5oLho&aJaVi*uf{dNAYZtajS0Q?U*{k=Eh=T5;NkOjb*!|Tl(yd%GX zuLmi{_b>EH!0`9ii0v>S?LGE~iv46`xCaI*1)Zy9ikA2?Mu|L=7M|R?52E-Fb9pO; zn^xvges|V2XU**P{2}4&95Z2Vnxu#sIAO7!P)K@9#X_BZN~4)Q%N)x+%a!=>iOMCo zh8uh`)<+~ec>T6TIdt%N8HgY%eWmef6sLSMLxLv^vk`HZAxadoxj1O!^A{j%pfuKI`MAN0jPQuUw+yV8jY-uYTWUj z!%-Xj+4(!nqe#0ZgT^Ym;D*5u%Y8Im9dRNmz&Z4kYI&{DsC&|`b@mb>FgzqEi?gup zCB%qDF3Jp;SQ~R;h(GYNK>jf*R7efif9Q7YnE|T)nej?Tuq?K~Hw>9GwJ*^bPF3x|ol7@XN)$ICR!m~dHw)7e$JgEN8^~KwaA#82jBn3pkvaOlR^pb%BQ-WL@p^f` z?0p&HUWEx@;{y}w@_finGdm@`hE|tMD4%(u_1Z=U0v776k>Z$>>)?ynq`9Ryh1V-L;5v?MJpT$ylU=*Mhp(uW>j@-i*+o zl)pCp8_i=&@wQo&pe_(;Je@>)y0rNY-xMz7qFH#TeRI zd9e%CmGN(jng)8(P|Jw2hP_JPvO%up0q?_y`E6rAbx@P67rvu(9_L?pD?QIr#TdsL z?~;VJ`C{!oGtD|$l!>ujsvCt~;3m<~a5^Vsr&dseF>;D_XFBLw)Q?$YadWf2VCCqb zeL+i|;pO)J*b+QOBek_&~76UIZj_e#UI)Z)q{hNiGeGw&Tp~ zd>;++_qKrEnD@yjW(z$DJPW;XoF8{5cDL2_m-GAX#)y zvsKgN!K^&<35+q)H5r83sUw|yZe*P@q!tF0a%>@z5Av7I2>U5NVd*C$;KL)IeH9P| zaEU0@A%2Sd;30T01<#9h68-SU%J8XUQ~eLc`se4 z-W>pmk#jm%7czwZ*9gGy6EK_&DEI-h2}A(^p6xInfE2*LE`T0f?`JOx(V1e!6~m{o z_T|@3t?#+(HGttkj}9EeS?%l(Mk``I6R~>8#7j5_C1#{D!DJj)1|f))`S;+GF|JzJ z%Jb@%f(lUmq-E*p?YUbg+v9PkrmeTo2l6pm#bqhZQWRARV;|kgpV!%(`*kUHF1c4^ zWx}>dgzamR-J@z1M7N|Kla^7R*fRdlV(P&&i(P~$b`5iq?6Pn^XnQ%5j(d?YN`krV zZrJ@5?udtiwzRsNcUgn#H(^`2WZtB&;;ipbrrD0NXneZ4 zjQUvtZre>&AV;e-DG?s_s6<=pR?_>h?4!cW!L*M{PF}~Qnf5pI=8B;!w$lE{6|lQ> zxtS0*8%(=`-?-SSsi-l^kr711_0#0kXR<{ey$MIPxFlEd+~s+|=mAR@^G;BGRrlL% z1H^70@$xboV#OaCJV68pwkd&OFD53Lw4>jmnguA7l_L9y}R&!`@Zlq*r9bT>-(NQ(QEk@k~~_lSTurGPffUqZ3*pRl$V>*%Eb)wq^%q0eo}8ZJGDELLgaxh zmCf!3QqNpjJa-Ju8*z*I<}!~J9TCs)*RzJim?2vyWdmIo`JiN9?-LU~27`#XC^!7Y z>7e8U6x{|GU$Jul^ish07$9!o5b!ztvt;>-1E3A>0ra7I@43Mklz>vTfIWD-`VRmg z{y)vri(+#n1{=4?!Y(*~ewQ*N(9{s^F`+k#odB#WNQBpg*mz`P{m9VQBE=AIBVyK< z1!-2bt0ViX@oh>2$6_w`kNCC0>>tIvo*BiQp+4(+rnCF6!_RdxogpU=fxyLdGT{bsc=dN9r`@lm&4*rwjT$s{<59IPRQt>+r^j|UYgexL5wRm4(bH-d zsCLLR`EamQIdVulljQw6*S{cYc^6kVY5Zp>Z|HB9_a)J;L<{q- zpXp!G0H?3-7SkFnG{gO6#l&B+!!`+w8G6`-Z0BC^vW?3M@kF7=Ce>HEf__Kuv{#{C zN1BJVn*pgzNSY|cCl}cNHFJlF)k<-9$OZA^?8!wNx)puhf1eI*BIffND&y8pE$XFzN^z=88~9>7MGSIysz1mo)$Sq*smdiPbB{Q5EW zj*d{RIK#je;$5=eZ$Vt`@+g>+qB8(&n5p0%ituB263kNI?2$3)k+IjVz50tW-wrCE zTD>kmv803zv($up?YbfON7VUFs-{*qZflwJ|6c}pGf*$jpJz5EV;RA=ot zUYWkrJQTR94Yj*za8XDwMMmLMlptB zm(q)1itsr|G;sTRcA|u`tHObNex(JbS%ZpqtYajnEwoSao_x)@T?g+gpC*7Q++GNO zKv!>WyAzAx+Hy)QUM6WBV5*}W_vXpG?9Jb;`w9mxnU7SW2U=BAI7w#q&;>d?$mt^F z7}|~vnM_Lq+`A!De&+3xb}P-i)hK+(6Wu%J@T%%E`hL1V$h$$_ZgVmG;5Yoc^I=(E zZ{=>PGA56#pD!gL`4L zdekm^Z3SLq`)f7%7jpiE%Jscg(HoHu47K{xi+|f+T-}tNl>QJX?Jne)+gTfrhcU&7 zxGIU(P%kad)e_l5tN|fn-Wbb9O4y6X&Mg+2u-!Fq!#2GZ!s|hUQib3bleuIPj)&5M zC5s~j0b0;EK#8!mgl2*bj4X)7p-O#wQ9eDLzUs98T%#{lb@jNN-90-|diXu-a;#O*jej(P=CrcNsWh>ol z#35)X<^*eA8DN@G{EA}HFjjFhC_d*&4L&!0+5UnO2Dg=?-wgOl9IE0? z0%9%#zMN9eary??Nqwd9D>xtw1!!tw?!f*?jqO9`!r|xIS1b*W0gc^) z8xv2USc^hq0$D-)JuL$&rV*!U<5`?5S^&MA-(JNh_-Oz^h!B=TJwnz}zI@C-2}qgv z*X}Ttu~i2K?FqGvBPh`RKB9Sly1QE^Zt_GWe^6sPq;LFqpaXsoj++7a$HjA#1q9k+ zfihSYEc4zdxA3%JPf;kd>dw|9r3lIhXcr>fhqs0iCi6eT*O!nOsf(V6T@}rT?5{}` zE|qc5VNPUzS1x8}IT07kP#xfJ+TAzqsu04g$NIz1*Z42@iibC|(AKWsbGqcX;H4gH z$I4Sqs`$=1h>A8T)hzLJ%X66OvkeHA(&Ot^;O3%!kkMCPJ@4T02W%N`0()33+{v0b zwdiOcRn#qk8pO^?hu=n^Bnv1=cz%R?V5@FK>LC}&^sVHuQ7J!u&qne|+iOz0TUk)kqd-JO+W6;MIuQs-WdGqcv-0;rn|P z^LhIBGw-xUq37%+e`iuZ!*_Xo9c#@PQ<*}{FH{ItGUUKTXKS2J6W8Enx~!E82Vj!4 z-%gj>bXHB)e>@9G;G;+C)`WPbp0#(4cWU`B??ic*I6BB=m50Fc)+!WUtpzoS_1k6ud>oMb?aWAY)P8?LhoddYkAxV zn_}iFKAxy!647o*`0)aC0vWNd^9--;14?Kiw0fDVd%IP?PI`I3;nD!@-LuH6m%fyn z@Md}iOIJJEhDq1lw_E%x+c5KALm0p<7Y~w%i7T5~$b-8fGhknaKh4)_go{k54e!5;`1yj_|@a@*mI#zBtaCZ2xy2^57?j}6BmZWUyx)E$Tj)C z!w*KqmIJc%3__(G?wFMRQNJFy)BZ_>Fu=oxn66q`{NlP>^yh4s0;h$<56i(y=u-e5 zhScg)c~cP7yKmm~#{aZiX9|by-?^Frh3AMvz~HJBsYqnc?2POPO`<&^8!5rgVM?&W zhI;f;cjL-`B92P38z%HbE^O9(XN9m~KWpo2t1tN2?v^mHzAHX2FeI%A8+d-`9e_8^ zDT;46?Cvjy64k&ZPEk|+R(6`a0j*$Az{~1(yS3(Tb2K4I-{1NcJ{j-Y#e4iZsegr%RgnjCj z1K=kCgdtL2r3ukCy=M}?Z2kwL=^go=>3yST$OE*ZiyZzyXtXAazlCQBf!TiuOtAkW zFs-ZrzD@x5z~2B`FvPds1OWUcKo}y8Hk0L2tFbH@>@oUM(1+d*9+4-#kDp z2Hg|COzH8U%n$VG_y3?ztT}*B+i&PqfM=wj$an1z^y!8FZ`&fEZ3}8V^9R!e_CHM1 zMJC{V1#tBH9ngyL^Hx8wf zf$J^4wjpaLEH+we0tk*2cU;NrSwuIRAl1igNfAzP`ob$S87Q=g>%0!LJxsSb4yKGqWO|P2TP-4*bCcFCkYnL*AUon>*rcut) zI*7Q;EeM9gLLJW2?X{CanHkRstJh=!I&Ay2qmK4mAU zH`gc0OE|z99cn7A345xtFh!p(Dmmub5h&Xm;z@SwEt4DL^nD5R+g-iIsfrItf zo6*+*cR7xdnY_-jH-Vbxvk#`lyp0X8y-m3t-n>2_LsNdCXlsbQ1!-ycZ5u+|gkG#v z`9TCNo6xJ?yU7t{p)PAVx=gGKYD=1W>A2WtY^pVIY#-*o$&oNg9jZgNm^=Io1# z-)SMW6BcT;oi!EhH7x(p*ab__i=UFW#*P|mWzEuaHMEiw}nJR9T z*{pivoAMYdl_TL|)#8SwnkNT>OZk1z2Fqh0J&*~TD}4ID;2S*5^?B$4jgh+H&n&Z$ zjVl|14&Wx>G#T?JC~w04X;gAp`}x=?Z7e`M?nk*vzf~!@?yW_U?!^Z?@T7EVza77S zDq{Qr{tGjya7QmGwW(yxWm0dY_y<-^JTfjFOVAP`}JHeIAFi5 zg3&P0xhGObRrOL%@UJgbHYxE-!&`xnAY;`Bzjx2b%ac|x!-6!bs>Z#o1v?uE z!?Zk1o8(IHS~OCn%9%4XllZ%?mwTMiC;JM7cSCLp52$VdI3bm;SB_Bk342l`R(UD( z87-&6TUUwnm*Xtaz$-aVc!k_9;vVjM~Wyxy%gW}(XV?-K@mko$=v&fn=JQO+1OS3LT)hH5MRg=HaZWliCgE# z+eTM!P>-YvPtI}0HpclA*t}*0+8!pIK???@8J*8O%X0Y2>yu^-Y?GKan5t7US5mYB zufavP*r)zGO6lju_q(eHI1wGed3S}im#lK0K>qkLdgCteCfjv`hyJe1a!nD8tFmwg z1mWQycsv2lPke!%`2VgFpjFv19~54jjQEN|&L_Elnf*+w()Ics_=dRv(#I!cwEZEW zjvRsF7t^7Gi88IQbeh1iQ;2vj88_Yz#_1E1cR5QQU<5BS9sqBom9?_isEK2!*v~WB zEK0HlaX(zDnpqAnth2dZEn8eEc8NT))_{12#f50mnJUNBy1cwJ${bcTosB9fXCu&_ zme!qj&NYG#5Xy91amtKO%p3vxn1{mEtA@1WjFi>@*eK^%u z$;im*d($p?sYq>Q2a+*}scOt4b4pJp<9%dVF6ay@h^t19#gH>N2qS-e6!E5xZh|es zn!hx}zH+HuW*(g1gm)nR3)o=zEoJ6P!WksyH_1_JH))#una#b$?WL)GRY?i&PTT?1 z6F0eszRDkx*0l0U7$y*yzGPQhOLzu?Mban(@1#Rd?XVWMnM5U#K9``5zkjdba$0#O zboW6NYOX zOx2q{)L`Xc){Xy9Ox=C{^ZlkL-v#XGBE$ozp8)>v_>4cP!TS$4N47s`N$g#Z6W`h% z06NU8FT@_E0hq&v;i*M?v{=7}LPknV2a~5!MWo_G@Dux0HY|)vQ*Ad%D3tf_oZHze zPuJ$xuzQj1yc;*(zbpppM~O66L5VZV2kC-*!k%_9_WB1%+Z}9^*t56fKtm$4Mq-x> zcHT;1aZx;_Z$ZpZ)A=TXt68O(D!2BZG2r~8*dje6^uYFw$vF41Bi_rnGtMNBuNYMa zig@lDPT^?{9hJhT(SzVukZU;V%E{}ZqP z$@d@4EcBz9`7JP$|Nk2H;p+y=58&zvdE6_7VL?dc^Qmkc>Q@4_L1|1(%2ZQDpk=dE zn4!cVsk<(?cuVKpoA;C$nh;T=fKx4gw)xxGvOH~3NnXpND9h)ozt4b1=p^s9!cQN( zCJA78Q`KAW-oZeZkU%5}O8~Pq&+1f~wJ7(E0g}|4P?~fBXQy*}dFz!Z*Wby3?3xkV#FIqiL&Wc>;OGKh-ew zpnNP$(Ro-iM9Jdm z`{A}TcN+uR+;Ph4p)6Qgi%@RhWUXuQtPAL)JbR3qL=Cr3<5GQ|3^OF6H#z zGW$VBmh;s41SI$D;nlN|EgMzzq<53KsfN>YM6-$jnLMNwD+#q&2gd3qu7ZaBL|Ox0 z6Ddj+51qh=8O?hzPV4H{8>PfGY3jQ)(O}2CS?E%Yk$M-;q*fM8+lKHi+W%`Rk1-GN1Rjhj+Nq z{z-_>l81v;+`plGg90k~@5MrFlVqig8x?h+^$_Ruc}Z;XZ<{Na;RVyh%t2TvuEtdI zdZK2Wqvf-n5+^tf7|-^$yYP7a=uu&CS0Mq~2JU(1LZZon@CKRW!Xxp074SMZgS+rc zcxLA-T}UT$XZn`orA9@NkxvR(?TU3mD7&;GAppvj)5$vcptEv6+srI5yT zJ?r*r0y{&(!3ldfOrZ;rH)YFKloGkou?uN5`SAXo)x{j-zcdthuB}ix6L@V2LTVsS<4tE1KUZ* zhFy1f>v()snVpha(T#O-Ruli&sFy#uj+cuJK*DJI?T=8j<#qMPcfW`jfd0;Gu zsPEZe(-T5Ckz(F|r8?hM)SIS%)CNb|Bzp=+37IBgTukW_P^?&;FI;DVPCJqpc=zOp zX;^va#3j^EQK%ZPjb;TN<=PRCRSf#{OFYyQ3HuU=&sjNGNl=Tn=?=}@*1gxm$lp0k z9uQorv{Vs78cBNfMUX#xOk;;|LbkoUdrtd=nl?VlN;@hf5MKvJGs1UoK9hh~O(1H= zN1{r4GJQOwV#0uC#EERC#^&ku#>vl`7B_JJhX(~;uo#!^i?)yQAoEA*>l7lPFX^(& zb5(I_RO`#o_KH$?uA7Bca_-EiA$Nm0syS3poqvBD4dz;;_gbl`uz-vE+Tn)wQv-#s zSt14=S7R2Ash!eXjq#oXw!uQLPw?EkN%JQ|W|DMHpDlNWrswS=#v^et{}mZ;JW9=V zlb-Y`H$t4?g!1R0x*2{r4QkM@|`^K?wE-AFaE0M$B%mYUo2KP z!fc|Z-M&B^G;x9n$)62hK60swgw~rn+4!;rvarH^I-))<0XO|FCXo9QML-1a&9l5j zuN_cby|&0TWGQp2EE@Hm*=>5B6GrbxG_|Lkp&^)J z`aKqyXA`y({^t*8vT!?h5Dd9EofFsSX-Oi{Kyr@4RgQ+@qQx35P?HS-Aa@J3R&`c= z2=-kg#vqTGv3PR;wA9EfGEI^2X3vDx((_C-coU_~D^G*kvAw3%^K?Llfo-vs+)mkO zWtYfaf<6E7FAxnKK{GhXZt?Y2RkzkV{?gcX`h!M4y&QC!4}-sp9(i>%V7o$E`+!|o z1kD;kQ@`2fKX0t|d8DTS)Sv1Ss;zq#`MkO!%|3%wCNI-dB<|OUwKg#_fJ&FS`Ujjq zFT#~`OAI_9O{@&iO_Zuo=n|S8XvowRkwdLtZWZ%^7qkYS{3C$e zgBL(6VDJ$D7}OT|28;lp^``-JA^hJvs32C|mlFSF!twwfdQCr*0Y8yxdJsuTRO^uo zhTFW_E1-q%wRfEzf{?8C^x zYlq8WW;XWm-jjZ#H#D9~#*~{kiPyJOtKcU>Fk!p%8NJD8>U6X8c)}w+hf}eD8?0YX zBt0LwOwtnX%(Ros*(!Ps_ViF^6_wqpN*e=)HtBLSEBVO49%cX9p%A#VvV4R&3sYw^ zE#q}|{&gOB{WC&%(HMNp||Iqs&aF| zB;Yl)J?s;Z3_u?|1q9#z1LR6@-)g&%V9)_YeH%Uka6b5b0f1mm0N{VmMSmOqlvf!4 zyIdlMm)Banej5nG`_K9QLnq+q$GKh(9>>F)#~2#m)ULPo%t5QuPhH9Xm(YWLcbe|J5=cP% z9xUSl{BnQ!YI5%18R_*8nWQkUUr3vOz?6B8wL8=-B#ZtTrPY1--b34rCkt+;p0GHjbf*xk%)H}z zCyxvi+?e=*>-_rL6MNBU&796!#*)6m{>3DEj$)=3>lJ^WU<96(IR@IiM6U7}Fb-qyCo1c0B>SnI`fE%tY6_za_sq(vrNumrQ9n;~3CknrxC7Pa8sw zl*4gOy>L&ImVZ=_#d*kU3h_5zOT zGl5$s=vzZWD-w#eMAsxRRQiSqKz*)R>aZhJ2eb$|F|D6w z`?W_6&zAEoiSABeHY5o_TCwUpBAwZ4{1TE>7ogKX>#@4`s_tKwm+FRP=k;0ASF{0Y z7Hvr?DRS+xZ=0zHY=3=hR?3b+=1(Mz5yK$)&Q@2z5HQt5V2tm~uq%3y3yTHGJ?dT? zFwqrD#Pj-f9Bv}LizimQ4^VnYAsa#yY*i4>$P%y6lm9AAUwhfN<)x0~m5%bsKOQ@_@i;+guL=BDu||iNhQhl3%}79Zga!Lw7ml@W`mv9f~si!jc!UueqG=_ zvNu_)bcGPj(B|NoY3us!Cu#Tqphx%<5)5Ipkc;w)T``DKQrvo8<386>qf7`V89RFB zSAO*TDuBZ{p|vC4*uPVQ@pSdHRIZ?23lD>I6wA|*t93gvg|9*+>IojwP{}9B8z7<&8^yW z@T5-uRTeGspayk?8P~?fEFQCE#3Ve&EML6!N2Nwy8rvORZ`MG5^EYjURQg3hO73A) z!(Y8cab^O?>-znXKHX;aJ%w~dr*Aid7dOv zlC56{oo81kG%-SRypeu_!Riuh{Gf75*luA!doSzxzN4 zbR~Wy{{m_?VH96<%mzalofT%HLqimepT?@^ zZ#@!0%aQ+ERaV~hZfBsru>rB|fSbQzy6zuF$@vw&c?~JW5e@W8Q8H~01ibgdHTa}95);QcOe4rgxzCJsKV*~1Fev<}^WfZfxb9)paS3dk-D5@0!P zt8KjGXsTDh#B8Tt0)c3?|H7&B@IoOLqjA>1aUIIriMu!L7~NVI;<-e+j3(dUA`hWF zvPMwq$Ixed5xN#mO}_4%mpo1ntgz?&jTc(1+y(VtuQ%;#M{*)&GB_uS5v3m^ zD`D35XHpY|ZO_X4pp`>liOoDS!cUys1Xs(7$qTY;uawFsRun4P3NY~gQfOdQF<9+1 z@<*6vVFoJF4~=IYs=Rg@ob(m*B=lWjbD6)70dcX$vMjiF+Ue9`_S7@Yj)g+;^NI`u zJ_>DZURZ)APp*JiqT#XweOpm#r2g7N{?udhMq=;HgKxr}`?Sc{5p@=beyf(yVc zo`#!X|2xcO>4X-ao5Sh3gRx2cEgc9={Z9^m)=(U?jqt9V7{-vuQFo3unB0zIHfQgg zj*2Eiv>aiX-Y?w?EgqWoLmdW^1I|CP=xJ2{#9bs>jsrP|rt;hsLdI*cCZuaeW(`p9 zdb}gwdxKAI`AoYuo+bDn<4o53!bNmET~h zmw=J0^!3hr;u-V8_`dh_3a-1tx#HcMv*Nt9;{7wFwAi5==8Kf2B2y~#5SN4aEtm_q zB>ARvycJDp*_+n5UQ;;9&NPNXI8%3U-C^-{e{e#E@+e1@V^v`2VkWgokY0WugMBvI z0U!gqM`D@*{@;3C2w20*VZ_dP_cWeCCBcuA$}h}2#`5esa%WgpUK4gldaf<$EHr1= zTm|$+>txlz4>re6=-ny;-RP(_qOOT_E}}z+Fur*w0e!+M+DiquiIsAd=rh z0y*z{r#V1TF7S*ea0RGYiS}ijSPVDvXrvfg6v>2@kw4d|&6g)8vu!k)y&jLc9dF7S zQ|YuKCT1suWrK+Li|u^Wa#?L5#s44-5Gr#`yQ4X<{|$O?5P9jGCCLmJ!J9*| zF%7h(6r}iS1D&0qDUMwI`~5q*ts~T(?0Ew%zhe~&_sa*F=*tzRwcaU1Omc~oo7AH{D4lZU*Xs-lG0PpN@!H+MR zpIywW)rd6{ZsIa;E*si4JY%k+J^AVh*|-)+!SgjSKY>ZmuU?A&LybL-QL1r+>^>Qn zse5vM-08-INx&^Uh>+{`RtqtSSV-_X5x*aw;0b=r(EXu+*!Z6~8Q?p9pVvfyS`U65pS9F4S^q>{ z0091t-|jmU9l;y^aQJzC#N2B$Qw6>;;p!?2K^VsIDgUAy64bJ->Su}0T)5dfX3VRm zG3L^w4h_CFLuoQzW(9_0-NnlDov6NvAms<}zJf*f!>OGGg+-Y=FTd2i`Q;zJ`+97_ zLEcBQSQ3XSVm4#mjGV-@{gAVPcEyH}Zaie^YtA%EGtjEYiayv_;JQd2ZkhmdiwmLhz$Bz9XKnMp zv__Hxiz5RG`+C?>I&r6Yi?tv}4Ggv3N;! zeoQ^Ljb>U!st3~Y@6eF0Do}{%uQX{W_0Gxo4Fu4@K{jJ+J5^#?Z4-9PkyM&j-XBAe zdJ`c4e%}GchJ6XBaC5PlT!?jYbFC*b!sU=Z1N9}q*{wN+{j*+=<5X9xKY%wh>Z z2>bZsn*Q&(dEeb{e4p1sK&S+B!$J$lF3SJh7>Sht$*&B63+gPO7v1X>p#J_d2)`S| z|LVOGRq$C)`IQCyzoECoS8ngNZz|Z2ZQ~^X9)K_PUo-o>6$eZ>IkU=bAo(fn27Lg0 zd=CI?KRdNg_@6J+)t@oOwbo21UC|aqQ9>iq(~We}Zu(SIYQQ|HUibptKg4!S)>m&{ zDU@c$Rq9_?L-!nsiPlx^isrO7_B+E{!q(NGG!sm9h^i5*YS4FgaT%&JHb466^P6_{ z2NyX%srnSKGX47);QFcTrJ$Bn>SH0Z(mksb4u&aSwhjGN#t_isQc9^ke>@)|5tEL4 zKpOez$QfmvOMB2{8WO>!FWhuA=}eK`Zw)$E;`5;MkXx4p&JnS+xW`;U_h;dyHz7xm zqgKn7YwA$CYSJHrlX`XLSkcRsX(MD+O%9uodTu`64%-H8PqdS}-QZFJ1odxd6TL)| zj(Mp#4Sw|Psn669nyr^DTepD4&N%+iP)2^=`S>&&^R711JOM)gz!poA(7u7^H?i>E zae|<;MKeg?F|7L%g=ke|_J){JS6rS17$X`DwA9I=MvK~PRi%T02CHFku~!s*{y_B^ zq#P)R<|P~|CAtDzDZ(jYF~nLQb8BM@#A2Vl>54>8go>&(2ejnedWwdmT0hf9wA5;T z^9CRF_yOlaJ8Sh0rmD=!RRY_*I0Jo|vLr&7vg=mgg|Tv^2y0O@G=xKpr~3>%*#@t! zUS5w=a#ct*>$$^?A!Dh*#qrxIm^nkOH0(~N+{tM$_>4pPjxRwi2qS42ZXNkOGr5FZ z#t<2YWem!%JAMQ}?(S!+-b1ec3;$0#ufb0=e=19tV*8`ue}7SqLBy_+PcEab=FW{jXZd zJ--3?*MQ$2`1Hyxfpm!Ab!wU)Yn)~*kTLk!K+Sz?kQ5@+WcD9g_M;d|U+}YP< z)1@u4N{X3o#>F^6%t&x#&#+u0iL%qwjbZ_%6oDBdDn-)vW5#(}GN1fZaF3^*E-tC6 z6VB$wN-smr?C?lFI4ELLJgk43kG|jft$eR2d@*HG3DkBdBox>1;H2P4H^Y?P(TLjS zC?REA?+J~ZthH!};e7P7ev$5J7N{ZfqHocjlfLaq0z|sZK_WTzd>y6r<3s`qEc`4@ z;KhBGxBHvRu4+tYRYE^_uQ+gpKbmuf;eIBl^f)Nv6wN@1R->B5&=3V36w;mI-^miJ zS^CFVfruRwZaC+<-`(gA6%=I5KOA;s~C1-9+){MfKoULQu#aJ|T5=AjQy#x83w zM|nw#hE58W_L$7*uUkO_*RBi&iP9$Y3b5Sn#;KJR_q+i;@IjU~#z>25o$%UTI8SF$ z-Y~5v61b}>AO*A4`^4Q+ zh5ynq?iyVH6nj@z?VtG>0QmeYjo7nmiin`JlM11bmX#};o0KWY{QMQS>So^gL!x$| zG@XOighSUmjoz%`s_oGWRH&T7G8Gx1%&=y(2laT(uJ7QJVsA8%ltrk189RUW8@i?n zR@G!PL-=}^Oqw|}wjMPNN;xTm$nDJ(M~zoGXGeP-AXtFmH8EInM9>fETIWDM<7OPr z9wQoNE=vv=DPZ3yT<;i^*>$px1IHIv>SC;Du=535Wf+yu)PI|@_}3$r)KGOZ63!i68T z>zM8Za%jkfVp{3Mqtg&pb`ypjEiGs!k5(+gTb)FvNG8%Kxet%Y6ZmtzwP|v@7QP<{ zJ8qt43swpip$o*>9qE%AS?$LF|r-PPRNy0t6B)(-Z+hp zpJXfcH#DRAfO69)KZIbEdfcNc(8;wt@tV~;r3ptJU!U;UaEoh)c4Li>8qHeg^p(VY+@0mvPI`W?iyM-78Rw{&q|NVtPU|uZNeUj~r+TyI;#aOomfs*r9UL zmQJKz>-O0zna-wC60wiV{_oTP-Iw4G8V(e~Hw-P6gKzmQWoAnO$9n1}zXg?bG;DP%IR0v8; z1~+uns$A(s7BI`A9>PjHk0($jitf44ON1oqr>qSpH|>U$WiE9*d6+^6nc~|dO@b~B z&dvJNxbEJL!>p{ul7VbnTYD29p|W~lDW|1TrlMScmS3cg+>OOgj50z}nYCE?2{de= zPxO%e^~Tpw-%en4fE`!aKsE9Q`J6K1k;Ma~=wixUPkeqVnwZ}6(0MaL??uJ6e^~!D6e<4jbnMl2 zO4vcphC2oYy4sUOg$_DWWJCJ`q9u?)t=ROki({p{`4V-8SQ0^c*C!Ml9sPa$&T^Gs zuhyLY<5c>aVWP;K-dlq&zNtfUIu70V)X&giwI%wc1U9u$-Aj|@BVwRxx>bKV_Y*?< zwmER1ttnX)Mrh3OF~1eDX>*lApE`@t1+f~ilwwgEnO4m1`;oD)PnwGIJgO2sbeK+5 z&J0efG6Qb(sBfh2^T13C6d5QF>uM7%F3x|o<^Rqv58!Wqzo~tbOX&No?Zx;{5qm7U`HpxwYsO41$JW)u6+}0! zo%kfggy=Zw^DuO&h=`tlRQsxeBbGBIQ9RMNKNkv~-6!^Y1PV!I6}~RKGbIua+ddp5%Y3OMa;I)JzlapF zjc7pPwIc89En9h!{6&^OggU46as7Spe0hG!G_$eZQ}3?gkf@HH{&Qfxq78o8ke(%5Fx z*tTtdGf#f^|M{GA-tW0y?U}u*5ML1#mL;V<0QHn-}Nc_k#DOZ_ud-fE7Y2yf?2v!!f*Wb`xQPJ0mKCj z%Y)gyNvz2?ZQpjD=Z5hMYN;HDtD&Q8g?_ehQF{&i+_L=**nQ8ecpYBDFVFtH0Fgcj zc=*7F@&K-I0wdpv1_8(;f9~BDV9EsYg~*j0O!B|hCE5f{8Gv$8@_?uR%EW+xC%|FQ z9^^jLljl4i4@Lm~->uK%0nh}fppF57;cZgj>*pQv9hvKW(Lb}v`YQHwiH>YLt_Qez z!uiSzKpqMyY_B(B!-UJk**fo==gt~1p8b$zbTDXN5?UfPu|nlINC~>yyL~Mr6i9#+ zES(l4qD?(QRqc{#Mi!zXmMx{U*_1Anqy4w0{=TRHzR=ZYc0W9GS zZms)ZonsGaB!jP$V_Hi419!eN&7_Hw&vXA2wpkqY9{g+pCq8?YDXEkN30LjLQYv$f zX~^%lpW30%QhpV=|K;MAgbW&5h16f7GvjE~$<0ve^sqxoLlnCYoXSix{&W=cl8cxY zGADv4@q$2T7JNt?QSf1bPR5jc*s@AdrDaOO6|D~EVf1znV2*}stVOs@FPrB^R|D;- zL!-q$V)(a#=LynIrEQi!2luqrZPxm8hR0iMx6=cAzi|Dvux;=M>X!4Bsr(nFisMk< zx+mNl34K3w#wG$r+FbNgU00`KSta$Z@S(%yWm;xVk)Sj$pB+yrOO*_t=q06|CB zqCQ(Rls8LMXO{#qFDl6U)m_aBhGj_-%hRUy15Ob|E4H>Lv3dwGy@r)Q!36API$Bve zEP@8L##3~sl1s|yqho%*1|{Y%4e=^9DO z-|ya<+~o}{_+M}J)Im;s5)MoXI1hY03kj!V0>0wJX?kqzRo%R(3%Bb^#1F5ds%aAD ziF&ep@UI$z4Bw683XDW={yrMcf{$#VqehFX_MHebaakD2VX=fFwE7qIsj<1?y@SU`e_#xBLO;W3N#o}yn*MB^Rc&d=VbX#L0k7zd>m5NYZdq!AF zEx#O60FPT=+&b`S!Msg-$VS0i=V%;;LF&C+TX0l4{k5JoLR&x_;))yP~2_x-%Jp4kip)!eCc3dMu7-Ge-7Y!#ZNc zc}?HG_80Qb$Y^GfGe%0T*yBFkpzg7H4(s-zLAtdkk9a(U@}Vu_^5EY!H~L>R#24xD zN<-Z1lJ@K%`6skC!%Ep%U?$DOk)M5v$kApmzMfsQ?eq996P9PkM!VhC7i<*|`_SC(JI9xo$s|D$k5(43Wij|&49|?OFwSQX`Xt!U@#67B|soSv4_Lrwk zM`(S*%aYgOYJOB9(w`f4&M+@>8CL7a8R;ciI$f_}ckq83auzDFa_%|R3Ra=nkA72@ zrr#2nUnk3a{qlo;h-uRkO@?M%8ohy{V*b}#N= zA5Zm8!rWK)5q)`1=XKb0ZS5>BxtSHlpCw?@QF}AvN9{Y~_^vl>x+#gUqKsW!TI2bm zq{DEo%|9d`PU?hN?I}r`Si@ChaR%Fxpp+xizFTl-Kpx{Un4E zT`%nQV>7U}HxfvM?|re2OU+d}R(nKMS3Jfvhx#*TKLXUC1G9Yf?jvjHNv{)U-$ne%L;)0WfE{0 zxB>|cevCF1eyYV_{^KVHDuHQ(2O@-H;07A|J1RRe$JoI z=b^iSaP$xcGVj4lm3V2h{yT5D)cE{$qjeDEwata7bLLO0u{A_!1&~vsYnNtAupH9dL zZMQ7Jlv|i03;b+PekxdrD*XAU*r_ldQdDn2!`Ab5qvT78WZSpj)Aun><(}@=5LCE! zt=bs+NNiXSXD}(P8e*eeZzzdM81xbUqLLz6*EZ|L6aL<&;vWYiS<>v0*5H^ob;%8lHRoK|tLfYaX?oXc(&|>KKddIdi zdyFxYLUkTjPQ)l_HYd3VR$H&@mhT-D$NNIdoSxvLtX2C47 zW%2V)AuPv@H(K_|UbI&L`UP4`J=^qL`mZ{5#*Y7y21%`jA@b^yJV3lGrJF~(=7KbSup+x_=@0siKOnSRS`A~&vjr>kh z5jBWeRY5?VmxFe79ywoH8nyk)nM3}^!2LVRtAIkwsWq*|j_#3N9!Jg5{yz_ucQpjT zQH!42*KE%L?}G5VcklO@oE6qtr!_I#y!wa;I z)F-R$zkaO)D5ny+6aK$&Ngo)g{jjH=2K=M?#6Q46Lc{Niz%A?9{C|zgJ^#W)di&Xh z|8eoXh~yD;nqRCBm7m?@IqyP-Ay0@b?R(lPT!q}4PgKZqlGH#J{1OD0{O(tkHLFvH zNx4`XE?wFn(vaIx&O(Ua)68iM{EUNL^)*4^YFlWo16_9F6onD5InkmrCrJ`4-AP$1bfAQrh5P4 z#u$CLv^?V-X0EQCz0mCCYHt)+%y{4))h0QB+-%psbr!vie8SjRVi2RiRpR) z@_zxhq0gWegQuQfPe3e7>nI@d-otziQe3}Y19}oy7lclwz+!$02>^ZApHRK0gd2nQ z5tst}U;T}!Fw(DbSr0ggiwpj*8J`Cpi0B_nPlYhbfYxoLQz58ZCX;vkH-BKX=a?c7_)6u0nO`yz zos%qhk4Vy(u1J`%v@n~#<@CiszU!~k5C4A+xBHih#_3g>jo_LR^d{g=%wOzWKEwGYW!?ulIjTkNP)TdLen_I+wdXXYj6qHP@cTuTv zuXV{=zv13vWvtv{VPTjzTZ?)wvav7KIeioKzyG;7s?b)O36sqY7Jtq$##xPhqZ?)_cR%=`a>+g z$eXUa7%%BlPKgZ}3QNsvL?Yj!myiZOo%XlfZY!)g%tJ#8=S3U+eB_%**GO*Rz32fQ zG4=Qv(}C4-gYVY1qitoe7hLZ7VXlVI{9l)ANc2+i#7-A{nGy}lGFLb4h{$T(<_gnT zyQO;z9&v7^zq{eI3>UXAv}4y+E)sUUauMG#&d```_6$>^JVAN98_hYTM(5ErG4)MN zC)R75K`+Gg6qZ^t~@>SPY}2{`vQ91x5V|iJOqTR!n0g)UL9}E!QnM??Jfq?wpS0XAGoRe_! zB?g?_{qJKjGFZXAmafx3zn@1Sj#p2aueiaiZs)mZ!dBq3WT~N zc26JLB2U*Ofj$Q?o``mTQiozQMuBMu3KlIfqgQR=*@Ap9kP;X9& zHqg<@)pX{Ji7=(TaSZ*s97DVoS(xe4+|CONBBOJf{b7nJ$~iy!-qKw_N1KW>q`?oZ z-+UCL)p4?@T6Dh$Im*2;?kTLZYjbES=~(Fal%BX`Yi;~AiNaEb>)FQ^Y(M;hj0AKQ zEAmcqV7T3r1^Z;Q)%x?7F+KI=mYk;NL*$ai_AV~dAQjjqMua`sfFuTAC|6qDjqc77 zB;=BlGnL%&%29@AhgVq25z6~e&wfmBzEX7Jovx;jI<53hxJbn~O{Bu`nlwQbj117y zCcB?lsZGjc3N=vHWw{44UC{I3l` zeFAPhIR$~hx{j;+f+r6g+h)bp6T7{O`W)!$a^Xw|qgMD3<;erG7XNfcE74Y^ywPU@ zifP>Z=axx>6UMQb&!^PaYKpm^#s9=>e7-}2SC98zW07tCp_hkYWkFBIw+;lmc_V<} zq#C1f>1A-#Ub~8foDhh>Cfl$|Z9kUl^l=0rp8%S-LTT7=pQ=Fz$`>Mt4;}#Xx%!m| zV&6Z>K?TtCk#xvkLjz-Xpu9oed%HoWeLlOH0t%_2c8Msm27CS#D5JJBfD1#KJAcq! zs{${6pTDX>MDq@4fLDv8`Lr!87#{bk^}HCA5}nJk2(N;BviIT z4vdgetj_#bdGL-L0QM#X)N*Hnh>l}(QLgP5fZi8mEAGEQE#0u&|5ata05n+u#1)Vq zbcd+g;ye1Zl~Ss}arPsYOWO-?$RYye8QQphm2(bekNZ}}o4_%Fe|XjVzHbD!eTKix zC#aIYCy}gTt;>g-dQaxNy2dU7$vdx2Jtr#1_La+v4n`v0$>v(WG^JKVI{9?kQ zNj!~BE39m;OBlMQ_{=N8ZkJ-NWnDRe>d8wqa4(^knpo@$vsMv;2xX|qXotHW{2^6Y z7jI}&;uD53T?}QtJ%Ylsu95ffbr*(!z+t24>|WJZ-_?3@$#=E$e^|ueNGj}y@qFa|qo8eRwdV~)eyFdx{Uw>G ziN$rgn~h(Sk2glYBy^1{1wj7|XD;c4gDsx!L?M9%(&@kIB|L&FdvXp&Xx#GrXR6MF6@~Kc zO%}e*nS*y_#7n+hc3BPnem+J^$f}-Tf=b+ z4m#LGFqpQ&1)oe4$Y;G8_!WKSVX437uq^TF#@b2ya9IuYIhR&Y%?ip_(IPhCLSz`I zZl9prMVxe|Wrgm9JHQ=%(4kUAT^DQ(LfCMjv~j)R>#%0=zDk$T_=Cnc2Dmhhn9 z)IHEI8J@7`q4RPdOHmR5y#|@xQMZenh(&52?cW{b?(w;HQ^S&BQtr?D9f$nGB{S-D zc|>^^FO0SAdQvujR^GXiRta-0O=H6j#A`6y2bF#`F6 z6bow8_@2pGt5{vZ;2VG1Vh=B|YUX0j2;A*bZfll#vqN=?Mdb3J7j$Emn(6N4<&gm=MkTiNDIUt!xj`+-eiKQL z(-*~LpgdC!4{G9Fn!TF z+P#q+f@NklAo?TE*jRkOjf&(imiR)EQA-A8v{6R|JGp3CCx6~5EMy8<3a*{(7q7nN zpIO*e8xA=Y^V(UBE^+OCIjMKG+@ay^r2%`jcD4xO^(8WlcvEIydQvG}G8ub3mUH9y z{u;j!yA6Qe4uv~Ekdm?04{Gk%lv`7W1hB=_WR5x3X(6zhg18EQ%z(Eq!E1eK!>OeF z+C+APkOx}_ht0#)@{}`Suve=;9fj<9cq1Xmg1|PCaoU7aPRaBm(itRbuiD3mSys^ z`}^wxiN53Rc$@LFnAh{r=a2zk!|!m9>pbU>0&PzJ1Q~5v^4`yJxii*yGFpe4 zi6hrJqQBMXA-riU?PlRKZA)M|7d!6#avNfCmOlDN<(n{8Ulw$AxugGSgHQNod9JHy z4)m$9k+D`Xqf$|yYfJ?)alN2s&`6wJeG}-O2+UAu`+y8oto(q4d?2FbwgrGv=xKvn zp+7(@+T`KKl90gHasLlpeW7^5Ig6wIWGkH87Ijf1zJFcr#=jnOrzcCOA#q8le0sqq03j7%hiE|;7qXS-`wSsdYLp&6 zlCX>nb>ZYhZOSicFZlL+rTVFDmizC_q4;NYKkfqAK3{zogTVegn2dCtC4NAdAA`0h z*Dv&!g+EJpQ(jtOj6vX~mu7UTo>X*JgEucU6ws$;NsRm19Z^`%j1nE&| z)`Bf3qs${zT<9~`>l+@|x-&jKQhHVrka zxWWF~?go>^dB=2nfvn+I8)WHO3cX(agtIm{9RlLtXp?{ffow=p)dRI!${J^ummiTm zvF^ZnsZJ8jKp8OGM^2wwY_401G7Tru6Ngi|2{Es>5Kjz17Y%ySrNSLz1mK&s*C`h06QzlrCxM@$C9{ zyQDO&L6?~>;{A0zl<~~2*zQjTC0fS&&a^HIS@*nfgOYOYTa-O(sT;=8VSdc@_Q`)2 z(SP%e`~diUT?pKAzq|Y-5p5bupe)YLwJ@RDX5I0q;2+*ts@mt$_324iUeiO6BO-w( z6u?_Rg815T7ImLK&R+!@3vGQ)!S(A911H_{Lst?Q+{(+Z*9ga-GfXJ+(^)eHbgEX%RRaP@ zVDO6iG%*FLDH~kBT_}Dab`atXG{Zg182f?8w~{BfAZlv}Wleyla^DPmdYXVC$aA0| zB=OE4H5-58xETrzF>B&QXle@N|FN|SfNuid5vgwcc}n1->}0{Fdo6Q;yDSZ07<(ZH4P{XNqp9P(lC=~p#Zbk zPC64hw)X?FpN_6_w}73mr$R{%uOEPrC^7Z5J@yCo9q>8xgpjX4h^*4+`UZE6ou8`f zJGj>ySMG`@z6v%_nvI$$PQYrn*C>IeMFP;K7H$XFY-g+O1khd<`7_$aJ*(;A;?Pws z&COpqtVyv9jxO;a$&nxL2xl%KBbR*<@b7G0+cC~HG^1;ls>qqsURT$D25YGxqVE2+ zGm8jI%AP$a1 zkF4>Qq*gh_kfu{&x}6h@Cf{dK3Q?W5E>jXqj>~^ppj42RvtCo0mP0o1Unwm-!#dAm zSG{*Bjy+V&3jA)LDa>Lp@j&VH- z4-G4mc$;3C-&YH{WQ)`KFuCoAYtrC3S0fbMdy>{EtQc&@D+vR*D0n(eRoWQI>DNSN z-{j;9jXL}t#mB`muFMEi>jjpYdWpQ9ZBPr`_fgqg$_5MA5qrk=t{+dnt;4!;dxs(C z%)X6H)A2@{C5^X8=}gd-3{2KJB4MeH=x~T_6o3*wt&-ns}DMB3L^Pdy$1 zPd!R~D~J9fBEHwLYRLRxk0Za~000}b1)K^6VW$CkA&DQcWOAI)8!2FU|F zj9A&X|89x@vA}LGL}LIMaCZao|C|T;W^rZPeaVP?<Dn&xc>9Q>8m1UBtXAYbU_$?W{r+V-=VaHI+6n1s?1(~nC*z?@H z)Fa>1tfI1hPGf8JuPFr^=EnE)-Kc}0iYZh-gMz&cz7KG5 z@BiL#P-PXb9s74=)u2Z@W;#B&&b=8N{fPeJz{>IK?VQbb(0!-4~#6aVQQiS@%Da(UC%*idTlhao3%Z>Svt4Zmjt2&+wKqZ z=4BN!^00D15{+bbAEpXOcgS-j3d;2fHI{)U(uorPN;xs{#3(KehIyYQz~deU7dZ!- zR|0?hL$7NGg>XCoWQue5%hxwL92Ncx$<3mTvg>6|MI1BH@x|3|ZNy^#h!&o%f~m|l z%%!Jtz$=YPHL2{Yn>9x_8V=;=q_mSNRNKjI!%nDm@+55_@R|6}-?6>TS1=dWf*qYC z#8mJw#I#XxLXuJ>ynczalNagWQAju$9)AHpAxKJ65tfNVQIh!zTW%y=oO^Hwe5rwc zHuf{vK5u&T&9z=yk#td(lKRfZX3o84DLGTgejDr|5$#YRMr$AGkUEXgd4}HN87Yr} z4be*a=DKUXIdi;6v4%N;hKx|*``YSylFfpA@^~32sJ<@EZB8)O*f&$qN7)NYB09`e z>^Mcna|N=>alhsUBf7FZDcxmTKFnht+|z27$qr`)*yYQZ6K(Hr)W1QTfQg4s4PAK8 z33+6unzUB0Lw!{hs{kdn`>Cy3s4h!r=qjLoi)!yE7R{t}e#&#oE6IC{L0&uc;O4ockP*cS$2mM^2I7lD_1 zlY2`2<3?Q~>6Rzn=#7KP2&)&fXnVQCW%s-EB-8?55@nC;@FdR%0FY{(g8=s*SEMOph%5xn zNw=KUf6XtOe?ro?aEGN7+8>P0h7v0@`P=BAroDtKV6pB5I6*4@*go?-OfW8eW<;I-Il=#A9Zr;Zq|KCiIWJ(WH6gJ^1#w}3vZCIsQ=H}g0USkv7C6l~lcMt3Iii@$?oMo|K2@@L-Zmi7AhA zAz?jm;{=%5r!PtoLc;`GA2UX=WM~|U!OunzakG0~*k+=J<(e#) zKZ=e)4G`#r=WJp$u7>6u#O5Jb>`93a^&s-+25yi7pbsTJ2eAnTz?^GhRIQaeB`)Mt zS*Oc+6Qz9dw$boKhlLQ7D?+Dr-P?&+R&$qxw$X?s@55#3_)_ylqCdN%@64A<@Je$i zq6TEaxRUM}-PeJ6da>~RHbej*1iB)hFZOwe~!Y?CX#HMy>q?F#dPPX8kw7} znC`jZBzuCN*>o&QQj_TZi7xJ2Mn$W6;gSf4aFz zeQ(j^saF46dN?UGO<*E25dbkW6o3c2Wgbvi)iWc9ukBu8c$mG)m4#KWb)t6Uqxe@c z$$##y3M_8O2ZCECJ?d;q-M243N50?~IV@>~-+eDiZNLpv8*~ANum8mcM5F@!b14Q_M0m7&1D|jhNn{f?|J{3#@sC)H~dDs%V+ag_l0X~(`*MPx^ zkQ;C`be;RLq{tVc+f1Rw2wwGJ@&LSV0;K-8Ld={1;Qqe@lRr$J0DT1DL?|!IB(KAr{=1+VM>OqMad6C#s+dNTFbCHq7HeH0PNz!&XSPIO()YatV$>Po51loVj}3XHG@Qj3vUlEor9Q zrbTZJ2(iwGxU+eVjgVYZGa!cl*+XezzSdhI=|oFe4J~1|Z}Q-Hb)jXIO4Vsl3P~WA=uOE!JeKoyNdc{6C0DYU7+geT7%1NYnj6k6pF`Cy8^cU?eGTk$h0M?GFMEjHJ^)cI8^?TDs8mopt&JhL2hCF4^kY-a4-$@4r=R{; z(v^;s^A$>wNrL<3xNG+)JX5cd+I5c);bz*eHd=#sQerEn5QxOE zMtc0D*VPCGX${gG8MM$^8k3~| z4fe%~`ivFTEB8kR%#m)GZTUfb*+i|VBEJ{7#3&x>hhJ92PdYkEAJrD7$;3!P8&|`( zaz+-&jMPuj!dop|WviqjL&+>miiH)xT{B z!B?<&ZoWpoW!a^0nd9}Xbd+6;Uxx)myMPoEN5i!{lkQr1gm7o**F5Lh(FiC$GqEvs?8Y< zBAjf}q)<+Y>{j{f7fCgw>DRHd5k{A;b$GUm@D$5Rm-Dz{j%a|CmJ|R9Q{QC7caug9 zZCw+WH;1mggGGQzyks|-7Y5AJ>GMkoP}Jp;(E`-*qmT#bkOWR7>+kyfGoVn2IUlD-T*mX6N` zy;!2qjjsu$wb=Nr_$Il!cPAy_B74Fx#Zf?d3#qWo;q;33JVS}dx5m=SFGxfO^+{199m3y zr&15-t*!86UulixHAib_3ePtv)7uTOwjlf<9fh$Wuk?LIP%3B|o%}JX-0$4)$6Qyu zpP+A21r=k@oCXR5;NRLS<_1qSzlUT&hJuljn!;NWO}IbL3R}pj`sD{mGND>bwno?L za|!j6k-DixaZbmV??9H4{k{I8S|OA+p2ytU7N+`P-Z*h=TfBpZ6H|t^);tsuI=vIF zB`JlR*sru3Hxq<^=qh-B@>}m_h7r7Naa-;-JH}91nvQ~cr3SM0=~5|>?`?A<_>E)1 zow?&YEq-yFO=kQ=4}^_r4KrXAi8Fr%s z9CN)-B41l@p}*6c{kak1{16&>5;~sxkYurW2d11*KY++*1DH3U1%9{ZS;$h_c(0zJ z&k5G>fv6HVY6X5=Pra8?YXHCx#{U1=-3@#IXU5Np02#6Ce5x&MitKjQ8Q?Dj1V(@2 zC36DGC2YjCDKMfN3&EvecHwod@qRtOORG}nTX0C)85Vax z0_2MC@Gt=!0e;r;f+Kg~PxatoI)tO zMwr0jRFSFsLUw0XXh8vcXxW^SrV9x?wCM+zbzp~qmV6)x}<$K4Q&r-F2 z61eBK5w0A=5nimSg-p@lvXQiETV!;L{V%Ps?L~`h8b|&$;B*a9eZ9B_!55Nn*EAwy zTLmU3ttxVt5}aH*M;OisgDjxW*0{L!s&WdwvV&5;8m+Dh!OIP-#?k8Q`RrUt{g;L;5z%uvU6U?xmWKLzQ))gj@<5~7ks4sn1v-t!Uqhr&; z^-G&{>xDM;A@Y>k%c3Hakibjw!A0M>arhmp%f{_S^&)Ju){O*nHx$gwMC@z=4c{y` z;dueFqjITT9m11`iIt@qu8!l-aYOy+Fsj?s(z^h7gL53k|u-eC2{3 zE1$+1gya-&mq28E>+>lIt&Wk(x5a4D<_dYqyG+#rgF*@fJ1t43EqK?-V09GqYTfG4 zeI_kzf4^b_{33=Y=|W{@nM*6OuW`JOrD5M`Kw*y|@ZAdTryOpGq$58&>gXS?zBGSW z=~TfV&i+cauk7Gb0!P`r|FcifqVX*b$w??fz6a;Glq{A4xB$12&V1P(kMWi9nHIjT zcC^*}GdL?FYZj9S)&hPR(@xv^rz96{^=rXC&0ykvOK4#g+mZc;U#xCLAIL+%v0b6e z)x^E{@*(R9;UT1o$33pB9+yqpVM~i33xRP{KOX&Ot`JDHt-Cbzv z&Z{eD>7sp3ceov`Xh^rI{bhPVn@pIQ<>3`|@9}$I=}!Vj6TX>zM_*PCp70CG`KTM7 z4J&yx*6f(E^UyT)=`sXLn??x(;tGey)1!_&%~>rV_5nEZ1a#hkccMiAXwa1crgr~_ zyjk8K0BxHJd&Z9c7mR;gO}%R(@cj>ChkfLV1NKG_fbnxBz<9}mk*fFklSdfK*kLS{Y${wwS zD+kTC!*Y^?0IT9`VTxaMn(U3cJEyGOVhghCP?HBQ2{QHOTB(n5rD4;1YlatTZi-q# zim__nf)Ve6l%m#~o#Y=6v)KAk*ydhlp$q1?1AlDH>wVUtky zlNU0BajWtp{9HP4d;_u}4PDP% zZ$?eu9l!{2%8QL!U%}fYBnTS|Z*IUoLJa;U$|~;i?aNE`q}P8d_+uRZ&!`ygLVYsrE(u6l~|4>;Loq+LhSrL-hWx@Qry0p zLKP&|SCh5spzIq)GBNsPHl}uDPO)x{`?NR>{E73gdVGi3Bg})-dz^iSfC15tMt1R- z5X?~YAw#=48`j!z7CVBY1X=B&7(=}GqN|5vhj5w^~;e9a52gm%L zZ0%GtncKeU*ZAFvky*NTGtnlG^cB#Mc74;-3#Mke!$tNg$JVQF+S7f+9}$DG+kr_K zKL`jK|0ws(>T(D@DWT}LIlHQN?NE1P9rSBlN!z#UCV!X_?k85o{SaGduVxlh8(^O` z^Qg7rV^$c~Wv`2X=ifAUhzufN6kp5jRnfy90O`2)7xFv$OYiEWMJpNm(jp6%&=34f zRCD|4=tj>3P~8+b`>v7@OuLF^x#Cy4e>@W&wH-I=2(!&9*jt=DlaA#wdU{v`ZL zj~kgYRcDfs=<*XgG^w@~L$2XEG4qXRdbBIvkM{1=Q%0^~bghGhyTOaBJ0bIKAT%4$ zL%@W#X^2Xk+a%JOgT@xaNux)0;mF-tnv_W@w-*8v{&8~qFd<27FLCM?h!VUkEAF} zKA&ZIyo3rQGKv1cZ#j0Aw=j71o%smb^Do2R}z54W>@{jbF6g1$-#FL@<=1ZTv@sEM_Yim2Ix-D_) z!er!-%k5}g_p0=*uT;qj7A~GH6Yj_^*M9E7N@poxUIATGYT^{s2`>vL64|o6^WhbE z8M)tB3wrmT?R*+gZOh}(WNd8}?tVK}$k3U?eP3&jn3mf)R4|alGF%1Q`Xz#=qo$DX zH=wK$shl2sN#J)nAL4YdYlLzL)>j(6c#h~H2f@GJ+y>5hzr1k|+Y!t+VjLe?2)5i8 z^)RvFT27BoH4k6LyG}g@&(CEbl050(RaY5)ZEV%rw}H5AZKwqQa0tCEEZ%f>A#?lq zY=rEsH-r6@KIBTSkw+5@#+TJ#AkBUF}wAT6tRY+)8d!oqA z!W=kTo?^GvyxT*FM?(RP&zClCl{QXjT_;J=N(pi9yyX#op}~hgini~ezF5wIs4df* zg~u(Gd_m!^Pl)fN*3I+lZu*2QQobE1e~1O z9O;8#slVya&_u(v9#5vL6yEj<6Y`j9=>y}KiWPhg0SR@)+;I1r09$E#gcofRG{+s{ zi?SL9nX(oO)170Eq>K_XO6Z|V%2!XNS#4$BG?>iK_s78bQoO8 zIEQY-$ai8_@f=gAEa~KPc~ar%M|D;e`fKfVzXB6{)EoQ_wC0NCA?NEPZtrzUEdkKx zlk`>aTE=(@YJ@4473zw%QF&$8+vDPhn(AW(T@A*G0QNTbQp#PeD`${0HxC_Js2N>e0(Bi>UHubd}PuF%wS z`k}w^G60DG=^6s;KEgQoHr*pbYTJP^)7+cCGV1BCNtGW(xp*Bt&80h`cxQZFM10X) zs=!?-qXnxcNxk5?0F^dcV=!N&Xz%hirsO5WBvt4r79uuV@r$=BZTR0V_iEv;Y;eAv zyo;Inwi8HQ}`xdY%-jXcqt;wx(q2s7^?bCYrNB!!GS_lxW=@%WYKgTUgGJ?w6 z9r(nY#AGi`b~C~D=y{E$jc2#BX1}~OsQ)U+vajw^=dcw*8HtLZZXoYp3(Kc_?3O|K zGb%<`hYS>fM;qf)zX@Nw5M6syYPy$=K7xUtS3yb&Q4wAUy7Oj?lwk`dQAY_zU%cAJ z^YaEy-7pYz5(_~dNt;ctVJ9fCEesu?{YLfc#y2ulhS^D^`r{*aYw=mdLOTnd(3ezZ zm}@MP>)EkRHNRD`4o`3?4)yTqI`2}ObIDJWBV@sMzNHW#fe$Q3haF#O_Lee%cXr(s zaUA!l4Cx4j#>NG{IlggC}qsDhJdL?+0F)* z=yzGe$ULv*j%Qrhg3A|au>5l*OW$4&6x0xq_r+4BqVGMO+Y%RLfui&AxVXUt4?KlT zupvlj@zXN=rFJm)Jrw3Ym@5WMZ39D}I9KEo=f-ClLH~d8>r>BiF6fF?3-rlOe*#@; zpzf|Ga;NrVO~9kdeI?nj4eHNp>;mAN^_lMMA7*kV1TbRH_*Z?JalBLlrH~?a{~flS z_2e%DdWmkx0*szM2O3IgpQXI#OVU;t)3-_wDvv!|x22^%VTq_G-!r1OoqU_Y;0}n* zQK!ZdJ54WC`EQr$bRZL!YV7#e0UB>Jbv-6d74W`Xu@bP-IKRWeacMv zOh0;AQt}v;k*xa?_~E!N$Ahyrf_tO^iby#s_?@S1cAu3=dLd(8XW^jgJxsHtGcIdT z(tNdkKl-z90n?V)5LrrFK&m~}qkYVW;#Bp>41W2t zg-`K{;m^ICBF_+-Eim=X7CggW1rt)PlE~5a!Fqoa0gnVx{m|4O=ex(#o$=K0_+6?( zvOJ}5ucpev_}7d6xSr2$bR(%$ymSFMO{?FCuDZ%favX~%Ws4n|jUPQ>mvNugkTbpx z(15XICCsnsy%;q6$i`<y<<8h6y5$Fi6|4ub3W_NmpaS4rhcLh`2R8WPSKHf z-S=;II<}LJZQJhHwrxA94m!5kv27#t(T^pDHe@lBihAhdi*&u7sl~G?QV$<@Dpd`bhp5Um%Y4N!eMb zkrw}T%cX7Ea%3w5#Q8 zHen;Ka$Ebd?7?WMq{DcjiSa@^}__3YzG`seex?th2s3C}f z!v+?yZXr6W*b-=ae{4$+zp=#TTo;5J%!RBPhJSCfHSJ@45)(5ufp94tZoLBb@C{hc(IUD ztP8m%zhQR@g^fJhYc^-_^AqgyNQQ1zGU!Ex(1duofQ0w0M^2(Yo$?|)MmC3q=z}58&c^h!-_Xe?zFa4R2 zfXBA`z>;c=GAQ2XHP)AU(~FI5w~0I9@3Z5wiG~o2 zvL4NvmtELPK_aP@fy3!P_}lQA!w(7tem?(Ua{9_=hAe=ME{(jS|3lE3q?mZvd36pe z`NK^EphMZ(Pd*$$C47cV3ohz2Mvpa4lbHkwabi8V%W4UA<+8D7Blc_qq=}6zKq5zbz`<~& zkZ@FHMVo(9w7D+@RjE>Gk>GN@rL9k$T@nsB&ZRy)u_M+W`EWk{FUw{+2%+q*aZz z6V#q}TzAk+()J)JTS&0<#y$Dmw-*wdOTuv`U~E?dVnjXgjZ0GS3!5&@2}efNqS>N&hYao?mKmtjtKAHpNpUZ)W`9vWk^RA@Tdn zr-bcVvskH|eFZvw^q(ZV_YLO?XO%nvFq0GP3F;p4&@;C6^HOWMT!%WfX8lDGHz|6C zoI?LZRK$M7!T>k9f_~-8pwn0MqY)3p$YKokvBeg~tjCI*tqQ>r%=`)x@@6y%}TLgqX z&8E&9Qm>(hQ>0lRj*ZO@Cz+kVA;pX0d0hx>U8#eid02f2A$Sl2qU;pB;P`yx?t;~Q z`TT5V2U9Mpr4MJ@#gg(6M(?XLYc;dG1t$FkeYAgmz2Zvva}44){ON6IsgCzHBOzPFr-w{WItEp(EC4A4-^_4!^?LEGT>o zX;?75^Q{(WR+Yb88g(vpA&-tEk+fU#f~r8~SNTyFu5XkwA?@;g$uhGs!c*^`LvsA? zUpup$Zex!=&vH%C`cuQ?RJ49ODILw%%X&plZ;!>kt%jJnWT?KtNpC_Me@$9>TW2{TgS_O)dTn^C%8hLGWsD8ht>2R_IGr z9Fniy^e2h-(g@_i43a>(P;Xv1%dzN62$}UZcyf;`f49B*lcnv|pDtZYQ-Q%yB-NA5 z;(=wA{6Aahaja$Aq7OPH%s)`v_5)#mB>J|HV|Jnb5NPOm?RbHNa!?MIq-R`Ff)9$< z2%^uHlwSV~Tn4?qf9;Y9HWG0b2bOWlzPVHkY|glylUn}rU)L)TV)w|_o^PaNHGg*s z1idj*G->dIk&1|!x50I6So7K|yQ{1$+3N0=p^TZookwz+aT3(m(bN9JRB!L{NL+1_ zYt%$Jt4^_!mzCpJ(QnQgU^<0?!eOj6@!6u#uYalCxfXJI%K(o-#a*pI6>jvaYl*gG zzPLCN9o1&nhvn`pIAildgjH2pmL z%*P*xGWIG=tF!u=rS}Rg@)}cGZ)_)sU4wZ+{G!5TCz_hKUo!0V^Ry+=w;8H!$#xIj zRO)7!nRsL8dvfn4(GP}*P;&9c2$XEF#Iep!+Fb86YC0w-rq(&y-i`r7y3#wizI{`OdH4G;sptc9X6_6;B#aoYJy`0}_{5M@Ntj^DALC}}oH3-w# zF+odD#!io^fUwMHgr5g}TJFMDHP|Yt*^hKV*O@qIh3uC7sRTwZGx(=Fb3OhVHs`5- z%d-_uo-@pf6t}?KJbHdu(iK+Wysj<|Q{Te=Q8upLl+y_LB>X$hqwFh0gtX+p5_^3W zN+x01Tn4F;?o1TZjc?l*Dkq?4kpAxG z|5ej9m8@d2jOu<;75?QBPQAkMvt?tJ0s6}zRQGK+FkX4%9kt)!c0(MAnWBOYqmw}X z%s*{&6JyTo%{4k4JViCOTnm;vFJUqy#2UT2njb&OAMDsGlY8lUzzR`?yD{z`+9zVF zQ6Sy8YnX#dTiMx+4#$EN*A)A4iK&abW`6!&4T>9E4Fu-2km8x4jX(Zh{$1R47i{vr z&7Z-opDPT@yJIt9*>9&XyVVoRwqZAviLjR~Z%h`GOXM${D#Cb6-X9A!FYl^h(FoE0 zHcZF7j)-EOimC#p5#QQ$lwq1llOq?rtVA?(dHc*qwF?5DLj#|VubRg#zW)~>fTaoI z&IVCHPJlYvb4fwRcOYaT&}?5ih|(jyhb8nXt^~bl%r` zDuB(Y7^EyBFQ58F`X+bmy%+pfGR?L8Nm%dZIU6q_~q_gdL_LbQ-!u5k;7@VO=Y|4>YOtaXnnuD19w`JI2 zYJ&J5m1D;~VPuMuJK-?Hv|_l>?KI#K!ec={Ckuy2~ zS3Q?7=8QtobD>|9A7y?QLAfEMC%}4++s5SBkN798Y2+%slLyF2nS`2PD-wj3Klcf z%D#Jn6f4qe0dh*n^-YR)IzS!ca!9V!nzw${{=8-leVaJ|339gf7C=$QnOYnMXQ5pI zzYmZE=v9kiwsAC{cl5TC_gvS8eY5KXuh=3a@|IPJ5LPPPg;V;ii&m4x5RCD&j7W>y zm9(MJj*eQ_xGcp~zI%M}YQoQ+9DGx0@(RhPj`B|Nm6}S6ThT3e@O>8EE@|wO=!TE$FoK4mt~r`N$Ck zfsU8|@8EH(398rs6;?eEuip@;?i2LVC$@P6lCp4=r0q*5PtFRwUSwOgA< z(9a>j&2SC)48~RRK5z&!@9dI&#R(>^7%uk%o3wIi*?8l0OiY8Yx@Nw+bcAPq!6Mc( zQ$FBWr~q1C8BcFNr$D>PhTD_G8_4&~}#1a+?Qk#P^jm~*5 zi@=!z1OZM+Z?gq)8`S1sjy15dzmtnH`w2N;Fs7p{YVPAJe*tGjLl#`meArSj0q?5? z7G@rMXlmOT&YRNt7f;N1_XjKOi_qDGvEOQPI-_A0cj$5Ai)8?TWkU>T)HV*bQKkAx zLdiYqk`;HHD8%c>e@JetE>>yp4szU29L|lbJqfrOtd~2DXW${~&Uzq9=2sM@t&7|r z5M8*ke1V(@7J$iUBLRvCYG(5p=iWF{>>JQ;&{yG{RslhM{~GTh@LvT}K~P74{4ezz z=t;oQ&46NcZ$3e<#{jTn(2R)t1>bc4SV4RjZdA8+**iDNbt_u^7e}%xxVYahMv%lL z!On>jeZbiJh0BCGqpwL5eSFW=SbScetS21SKlD#w81SPysb=gh4NIc?&63;WGWAaz(dO2QJ1?Sq?A=n*~p4Oo6$(kg~rg!e1h=tOFAL+CL>Qj1tx1a%8wsDMbyh_yv*wTc`) zFi3Oal|d1)aA)UD>)I!hu#-NPeQ(94y;=4Jove=q4$NT}zJ%k4dl|9$DD%im*FNgg z+h}`ZvgvYanKzB?`JAq}mnqpz&2)%5;^n=C@9R`3F>o?fuMo|~O(e0vWb?WhiJRCG zKY@AG)pvcG2kc-fSik<57nC^~9a_71m#(Mjmm@s+bZtzHO^B00UB)|kMCaO9ggj$e z-jRx=cTC+zsbP9;7UyxQByPuC#+c0#UBI|MDGAricZ^sWn2QxJ-X{?okquGbzHx=< zfAmcUdhU`G_iq@>459r4{rCid0drnXzf89`APvm_6-`2Tup?0Jmuv)yk@emGui*HB zmQPuynm}JDg_%Ca3c%+OCe`W`9wanw{@i$Ijs!mj!0PfY>2p!8@)P;WvL{F*3E_h9 zN6U7vkvZ^9g&Knk{ux|ydAa8{xBCO9GD>vhS{9H*V9w$_r!uKq*NuT}q{q6ToUBJ0 z%Br6g*TWnaK4ypI2*a{=fyQy_btRKrKaAmj9cS0vpI_4N2swI)^A$zZZdFARoe3yX z?g_K&1zm{2*VtiN;vn+vjrQ#ZD5K={B)w>mv7XA^> zW{)NcSi2&L6e20`i(X)IvS08!Hv4$w^>kdGn+k~q(CRwie*OP?B1RVbswq)6-Z8mM zxz0I>me6DO)CA5SnrK&F3p)J`F@FEd7m^M^S*#;vGUfHKQGKN8XK)))y}IFRQEquk zz&^yx!s6YRcw2kTZQ7m%l@oq69f@grnQ4%VVxO|KnkMfnktiRVTb(f-4QR`@i>&36 z{8**r>7ygu2{{efF*u&A(8e%SFcXcLg(?vowD`i~?Oz@7Ypx(>Z&_>KddBd!jgs)r zpO>idQfk>xb_5%Zvu`E{S|*}0^vz1Ah6R&4AVK1Wso}eqB?O-9db_RD$KHuoe)ey9 zd8*)p0C>yrKegy-d8w=2E?pa{yQuFYMpCHipF71(&1y!fpXo zwSL>0GdJtj!~38aG6T4BzcsmBZ5^?6bHs$!LORz5G6Pjnx90%QXXhbBYE!97D4O%m zsiNyu22ma+P2S=%<^;p~K`Ey?pQ2-vhd1jk4d4isw9lcM;sjTnB@T6X3ECs z5N+9%g8^*=q-t^l%url|*v(*rw;AL^6NiR*@0?2|$7&vNBcezsCW!RjE2(p7zc=kNVd6$wI6K_Q zCh50FanPBDP}Y8S@k|gjosr12k~C2E^!qoji%Fr|w(wGGZ1@eAWWj5B(r0)YiR)!e zFo-_sweGwl+MWs+i23);BT$>T?Ux5NH>k4bTRU8%pJU|aP)96SNy5*l)|q)_4dV&K zl3i%2KL`4JW!U`w6qEu|K9zuUuxCIZy8kru@D)aYCit-kqS|o2w$cG}gw4A7WULdk zz>@$)etRQi%%b%CaGXT=Gl^ENe(9Z>dhv$hcUcsA*eavooRW=jJ8ZiFv1~vXEPY|} z3&B8)5b^RK&YbNI^x8O|;ESXf`}C`yJ*Kwa(OSXZ#Z>%d7O!o`B;#AOwnv%sQ(SNsg@s?H^vGRQtT)?#m1en0-CGLX?)2AW)%1j0nbv3d zwjLGeql4!}T7%s8!v~N7rRMFWwUVb0do0P-p%_HAh_k`URVFzuQ}N;L+y^!$)k7Rp zSFE8(*MD>wD74?hk`fM&hQQxN;_1~SMU-ZN~O0Mc8Gtr~2STSrduFG+U$-ebFOC zpG&JPG3`!NGZWq{BW+p^%9jt)P~g4&1p`B4@iAT<BBc0A(@{aF!H*UqV^^+k=&Y}YVL-S_B3P3^Ka;BYZ}j z&}YlSEcUM9T_&=uL<$+pbXs4j9b}6Hew!pc)TowX?7(e?RLoM4<&f+1> zp67kMKUeD#Jx-NP1x%;&sS^&4ILUB|b^7+>@vnb84E-}pr&__GOMstWM zd|*MD29Z`d!UTh9V2|9j!+5t%gWC9}s76CLBc_Tvm;5*z13V^~DF#cUbGj)aY z&5W4%wftNslgT62$eQ)fLu?q#Ts9xU5Qbm(T9eFJ){oo>Bx>+#-y+WYt9yoyyK;;Kv80f3}d=lz-Jl*^c>?8jI zRCt;GIXnjv0ut(TDJp{xKCU;qCheW+VbLzK2l-+u)pqbX=*l@y(W#qgt>KLxUvUIt zaiC@-tYkX0yttf872-+4O@Da#l@4F_F9(p^*G-1E(9HL z{E$4d58V_9v@}{^s?oMwzJ0$^5gc0!3;A}=*|KAN{W!l}gfKyqSvnC*s@c1fvGJC5 z&}~*q**=^&o2kFF*^Vas>u;&4wMbdw+fJeZZrhE|M2jL$^iRwA`kR*_Pxqw9!*g-= zkg9uIhO2W9v4X1{+$eg0-4Sp5!XLh+V)SQJhK3Wu?)gUCIu>-$$1g(-ka_E2=$yk3 z^aVX$_rG}U8!gAdc3cFfF5|3Pe(U&66-c?-jdYZnWhhu`jx?~-+H@UzBnM8#@+VDz z*$>tVRWS$Q$eCqgY5RiQPN-I1=-{^yhINw!3wv36-RQl<#4^%di)o>V{r4JK;VOEz zhE1$@_(j0n_y5f|o*>eC@*P=Z2*Rb`$D`bIs9ggDX=M45ab%mqtN23J3AEYF-sM2;ldU!p_>tjUFTj4368g zjYo0v3qT7k>{A(z#yPKMO3q|3w6Fzlb1G8;SuN-MUSoz+Be)8uAGq6rc^|EVysJYk zhXs5v=6s9^UH-^_6A8`583DNTTbvWQ|8P+#UX1@9pru!vPJ^$`t?SBHR{mw&t*z6R zb%}qQWIp;%SZGjyr<7XsyM9qK#>9@=nE15omp1DB;K{5F_^58-{rU7^3WPMfmT_iV z=WejDmW|{b&tYRVi!IbrrH~QnQ-&NWMKQzKNGMFXtmoCJw%KCz453NeS@r zLKs#0O?onI@_Z{xPXntF@1z@OX$9?(c=FkFS2M35^_Q13seXI97LAV*SlhEc1iI?1 zbIobSdLV);KM5TSrqoC6J9ryURhwXDJ(jAH@!J|uxQYc8H~TNA5#j6GIm=N`N(w|% zgSHiQiF_oPsf-Vb5C&o3;JYGQ^y_Be?Sk$#nG&xRLP+v{ctpG(nZ-lZ+6XW>3thF$ zn{!j6hPDS72zQIviMu#HMh={D8ay5>@{vHgSj_& zf6CzkT`GV|zNVM=>;G7F$Dan+Y5&p0&Hzv%<9GiDzuOZ;1}W5jqTK;N04HXm&o7aE zQSu_bYS0&h5f>z-!MQL0e^|Rm!Go{r)czmB<5nc2xM?v7tpYe4i*u^^Ktn9LdKa{v z3Q<$~nfA|$XG-;YG}MJ`tJT`VW+T4SjNUJ3=NOAB)(O5Y&6G?$=_6+T4>|c?#o;0# zr&nkY1?&;7JGOG7^rzI?5Xx4;H~U|k)SFpwh_hIS6e^`fBt=0lfcepfG>1L{S>yc^ zRPWR3m+7|#a`(tmZi5zYYt1GRt$W-&n>;Or<*%83DIexo(cO7+$Th=oTwmQ?eUif< zO$w+!CC})!hI64CG2YvX6_(leC@tcDOJ{^6v5_+IG;bbGQ1(;gE4s>`pSJZ6b1yIO zoiwiS+6jFlr8nSXi4NZ5$l{(3QfNcD_Iq@GT63q!MMnr86`M-B+ zSVsiML<2)=QznSNAFRmbxUd>MM_^QpYIGTnwO>EwW8Wx(_WS;x(ta;n^?AR}h<=f< zNRfXE0d5w===7G&q3?`l6G5QBt_!2k|Dh7bXW=4BQ!g?1Z@XQAmc`V!q>R_b@}1~$vyPkZ+`=VrUS7+gc4tDM#BFF zWFPN|fd4@l9sAxuFYkf@AB2pG!u2W4P8R-t5VUd$ZZ5>BbUqS&JT~dr7!RV75QPHeiHAh}4O;^Oox(sx#A*$(AY3>`9-l&`Ghsv9geQ zC3)Pa)I~Ez#el^bP@tqNH2-W2iM3Qlaj9w7n_c>y86+&-M zNR3f#pfwuRJ%n=I?iXsb1k8h6C`%BPcS|9D%{$%(nd{aRerC=^UE5=BV5y>(96`+;)u;Lt#t# z-AIUnS(`Y#abcZV=eB^r)h@Apg)=9T?Vm!4R;dd^9P3t`-E%Ngj3e3*>9u41{N|LL zTOQf001>aYkbnGKeU#pT@|?9v&35dIbr1$;Uqf&wkbmhSyXF|XU16?v9aX%#&nJtq zRIN$ZRHQKJIiPd$RtS6et4ZSijS-Ta}B9bTtWj{h_lDB|HepUYC zT4f5md=zR-ax~d*b6=COpg)MQ7KFuUN&ePbAE>u~WP4gCsj!r>st^l(Qf(o97ll)< z7+&QOce#iJHKdJ{5uz#De2-u-0b4H%nCe<=-#kJwtKU5lz`z&E(^E!yn-M!ieKJ8F z$j#v1zR6!gjPPq2Z6N2IvHbnl#KiM{;(dopX8F4ky>V}&_bjVKmPmEOo`EawGqfC$ z5l;@K--8QryL7vy0nC$?1LKN`OH)xmRt7biIsJR50LSr) z(GTj%nqt%8?I!wg2QRqty*u8oC|fO$U#s>;Syu$3c}CxgY`&p{Tp=Tqi4{VIB*Z1e z+sBEMk&&5wBR3Zk!&b~72Def)CQD8snD&}}g{&?*{^-4ilbQ1Ra8wR`yU1K~<0Qm6I(Xjww{e6< zfLd{j&6o@qK52i#tD>e4P{ zIyb-7UTM2LT|z6+v@xcgPS1CWN3so4Nz*d>=K)u3lE)W z{#rhGlH$XgUmXWryybm^e40+t;f^bPgJM~uYru=Pps7~ia6_M0=F{j$8o~mi(xHu% zj%|VM3(4w;svNK}elmRgv^DwRk1CSX{Br8Xe*p zvth2zthG28(CPe5<-$OwkCD;0CT4S8Ne$Z{tNs55=zy&vC{zk9|absr;B7R%LfMR-d`p9S)k`3`mUMs(Obl!;~KUWp?|w3(;jt zu+@b+S(RqO>>%qdY~B>6IuQclAAtuk*@hj0#i3oOfGuIa2gW50WD0|np5y^ z{JYFh_(<*CL1P9zs&HUaQ$t0Z=FFD3&NrSs_H!lMR}(yfJv+AZ?JM+?RTR4+BIC8a z!L?YFo@agXJam1Rj2Cpkv{dfA<~Z{+gYM>52+Rgv<|tzzhA?%^&X|w{zM3>>wa+c^ zeUqbFg%>vTn$edV zc?Uf|gLD@`1_3UnJ7OL_-2n|(Xxk=`36~}5x(Xat7CY64Ls{|cojJj+qv@7Uu}_ov z)Oogbgu@fcAIl?r6z<|${yT1LP3U}9o^RCWlU20=tF8a;RKSwnDE^ogqs~9;Z@Q}z z)AEj;O)dXw%}(kchiNe-3WWaYsM}>!IXcGtFO#4Jdp6PMT->k;veMKwrN&yEtsii_ zYFf<+XGbImjAwO548d^5OQMIRSJmErW!mFi8_B4dP(oUiQF5ys0Xu?6fx|l|7cGu? z{u}*AI63@olFX6%<(iw|v?-iwfXOx0rX04}S{0a-Lk4R~NF{HnxG{*hh_veSB>6sb zrN)p|pC7>MIO>B?F;Qs)L2%vbkeYz#2PV|ey=G6olJEpJt_COD?BQZeMRaN05-!Z; zMn=X{freZ)C~Ne@6(hDpT_Eza?Ul!&A<#=u27ik4r!;u@hHq;w8NRBKM-1mN^5Y@$ zNXY_a+&~8>8-yh9(TH@x%&7CL;j$5JPk{83fll||J+(tW3bHkX3Wm2qHS`^ThPPeT z1h?e_uhcS&ZMsS5pI+hmzVw~8iGTJIi?y+ztN|2L)ZWn&+!7f*rjx=d11fH6!^QA^ zSY^d*jfS=Q@|nvc39>!yVY5PZA{0nPcfaSLd^$#HKmr%S4cV))r{)G=Sq@^eN^_N5 zf=bIdPasg6NzWljZxRH6@V6E&)pg&MNVmQhsNLrJ*&(ypYRy?!W?N6!U2_TE3!~bt zgUR??T!v1%Ub(DIe_V$*Ha?Cgs7hv$CRsF)cY3Wqki(`w>MX;k!GT|7XxdOLn8Qls zjo}@*%c{5U19kD~Iz|@-FyMs^YaHLu03R&(Er%w&`Q=8~9=6C4Ab z)U)(yHDpgCV_H7GT2zM>I8MdF9gVbPaUwg~DcH>a`R_HHP_!(;+1*#F23yFzA=O~0 zZ+V5Iej6VFaRTenw*$_zQ@7G1@g(nQ)8sg7pOrd__sasU4*5dB2sO2pO03|{mIb32 z=jb2k&5M@EO~$40MaA8<{IeoNV-e9e+@Ci1j{C`w@bj91vB7eq!wW0md-lg6o9rdE z-9_8g^hFg?S>?wZQ5!MZ@j~=~X=Z#}panleRlF#6#i3lMck@yd#&NMa5R0H*i1RzA zov`zpkD?uWTCx_HB;UgWJ-Z0PQ}CSnngf1x-f2(z@1wW7KLbZi*QOn1ScF>*DUo>n zFJ}aO%V6!zoEV&lwwpD4f@~2BH9waD&V!)e zf|Z~k(Cg6k1W4l%$mHbz-1!*C5+d|zn+`o9njM%wWCx}B=Kw~i7*gxCXCZ!Ya$ zD@M-70@)igD3X9aX8B}!=wPRwfXvB8ZVDGJaAG}Fro}q!Igt5K;m3X14Y#;uk*c=*&Kx`1X7kHZ zYazR{Xd@L&Aa0=GZXlj%a}w2_>)Ur`GM0j=eQ+hTJw{<6eynjcKfx#09w+UkdI;#K zK87PP|D-T-*ARvc#mn-+=k)Z@QMsz1E9#nWNQZs6T2t@wk9pwowch^>;b7xqc3FNO z?9Gt9=*_tTfi~*4=RnnGpqhX0MNSN!#f^mDwFA5qaZ)$`CCd=~``(xb2xQ_V&h zUe9vRz4o}FgBJ^MIN!_CT%Hg!C4|^qDN%Z0r>@}`r&V@QyO9NbkFj8QOzt;UO0rdb zI|xsrAsLw!%FKj}Cm9IlY`CQaO@LbjAckt3<;2r)lAfp4b^El1wY*|w-=L{Pt8oDZ zf<#uAn-I`V{nn+g^`riJqpCfj*Xd10(EH|nFV=0`>7H_FqDhtcJbxd;OdQwT^4X&j z9!6Fza)zqs>NxQ91+^g{z3?4HND7(+3sOQ7`0zM<6!g708|6^U}O?5c1cw3_@luRsP>{75Wyly-j zAn|^iGWcFbe;lfrhP9ZPyEeWebmLR&){~TmR23j~ZEp6Be_5cE@{`ql{u;H4y+w89Mtx5i+h@me}Br z;y(VKy7c45hh;9Jy+W`}(`aBkuTST|as)dSbIzXxyTXy&X)uW3iKOB|BEIeLSQ%ub zo*t5Le7$}R)i$)9N(-{z-`&Q1+acHVZx;-3u2RCxnDi-CeB+2N`c`cGOR@{P2%71$ z?=8&`7&&BRjZ}sRbcrB0S530HzBNmr+SlZYu;8~}j7#dI&5!{jYdGVG-yTY_WkDuIGd<`Rs1^LqOm-r?7*+Ug4UcU;gwRD6!0HR zYDA;N%9(j^Rh-*_i!rML(Z3-bgFgK~r@#71PaimCwF7*aY|Q7sRQtd?JQGQe-VI0h zOrDav3x4xKLJSZ`5jx zTrY-D%xb?KrHp2CNe_O5Yk7R78O)*y5&!P-|3dwo{JqCVp(aHamlPM z!LB7OqrVylJDv%WQ4Pn}-?CEXv`a>A@gKHpE}uy>G>YhW-!Wwj1i)Zj;x%|cf%C{z zvCAf49XIjD0nBR5Cc*}PFc$iRqKp3hxDxbf-=6-cV08LRf1L#1-Rm#u z#eAbwzeNW6TjU}gr<>41UKm2>Mq9nvfMdaQk?wA~xiG_jaxyK+ z;n{ro;`T=A4)5VK;bGSUXX+gW98L1fh{oemIas#Z!g8hOe}t>hM@8r7qQt0)G@ys@ z-MYGFMoQ$#cc7kLUEX^YYetq|1MWSwyf5JMk!Ik+k^(*4|FKT7DFjjV%un~~I5>FQ z<~3E>yytXOJxzZEu9!h1PS7r`{ZO8_0}cN7Sn&$!rz_0^QCQTWZj7ZUjjS$I*wG=3 zj9$pAE}W7OXAfRfsBZZER6YQLjJ!x@>G<|z>K=?>>idU@pu^5zmm}Th3HJLgb8Fw} zitU--3ts81!*6ymg3`0w_-xGczCMKY>4sHiaB3kC=tMbgxv2}^e|+=!S(1bCW$$n| zb)d70RO#BJRA6c$f@LXkoM^;{;UdC=*Obru2VOX8EC@7|+Hf9^L+lx60CW?M7I~6k z1~I#694^YY&mKQr+l_miMfdvnk2A9WZa_4JRPt7rCQb`?F}}K77{&HBE{pFk+9I$+ zpJl3U#mB>wy{irNoT`cVt{9`4Q{tcGg^gG#l2L1(OJQ@pG=B!3@x46HmHyM-5&{ND zwAGcmczoLQ%NFq~1BkrlnML;HB1r?q1QSoL8Gil_9YIYvxM_+azOAmr)#3h9hF&rw zvHIo1Kp8{{2A=;QxEx5&-~IE~NyGXOag?Wj#ScMVCty+Dy`^o}w=TW=0z!Bcmx9kN zk_{<6Q=OE_09e+KZCP6y+gjaA^7JEJKPI+)@3l zh#j#(N&)#?R4rIeXpM1GbgH}iUG3uecyyYn>6+6*(JY?P>`#X}D?c>GJAnmuBk4$_!5qPe6^8%A_8>xIq= zBz{8V1=m!AJuAliLTo2RfqoG)Wye}d+bwc8PD8tRj1h0{y)+ywL2gYTo=CJc8@c^d zsk(u-+Ve@-i{P^}{B4>ln;2)Kp_d~HEsDWzD_FJ6>OjjVSH1caSp~_M7AYx$Ju$CB z7mdah!#Z1+CMZ1|%p;fhfs#fq14c~VS`;nnFTl4ra=Y%yTa z;)`xdP$hw1tQYzC?|rMIzj228-W0r5Hz4tS>!+G(-ylD3Ez5mgM%`RhRf>h+1h1Kn z)mMz{-7a0Exeb9H9oz-XQBzN`q#x=?CykxO?5UJZPkfQ1-zz)s_Ue(l&QNY>!_Ll6 zFxH!Q?`->%xPvFmE;01THdphYPNMnMZc??9kX=pqLTqraRcInFF{UK}oYLHhL0DX$ zr5i2om@!f-O+Ipvv0gh}7#_t3UdM+5?)bhN2NF^V^Nj`(bZb zEhsXOt{eIzvRv1Ip@*DUduKqGrs zUfbTXtm{G-auK_Q+cr0q^S5QA-;4ri-rZOLqfNWXZ$0b+2cEP6f1dl7)1_0`6lo9| zv3Bab*RR}Eb_#_g7iXrDnjOKQT$)I28xnhC4Q>{h<6(tvXf2kwOg!x^W`$y^;DWp! zvvA_h2Nm{wCac4*^i33#KB0<0ZGCUo2Eb5gY0&+(GyZ4K*Nf;B{O7&&dVA*FmDFHPG$< zJ~;s-{JjQpL-9MnOx+{R`{uc#ANFywn5m=v5OnOQb@`n`G7%>dkCdY&^P=P3CvgEu>uEDh#?) z#x;5jVx0!HE?0PKVh2)e@`OHYncX{VvS55f7G9{M1j@H@syUW|+ohsS?&)K6^%{>~AE%`hhLD-BagQXX6g^>qsSlGyxN-F|y zyq)8!xBhq>zo0u?ekEm8SQ~k6+e8Z?2gs|`(8^PAu4wH7|H!F~$|eT8o1PxDtawjY z>HELN#ww^z_Z6u-UR6kjC~hBpQ-u9#E;`d>>*_nh<>{=?LH-8XvC6n#_=V|Eckiat zw?W>_cc+nPbAnG0I*l?W+8nkm?+`sk;Q4YU%~utbViy=?V)Yc3xqR{OXz1#yqj2xT zmW1ea5&Hm~-{icg%Vjt}{Uz#ff@b>s?|8jIctkI@UCN@jLO!$GgtS+II0UT%{|~BE zA71t0d`7FJkE#InL7KX61MY1$fjopef1-8gfk)a(o-*@<4MtBwpovtMcGeRtqsz=o4y41HKn;8ul!atQdEEalI?44fZdC-4 z9z`bd(c(o*fOG!SH@ER*PgY8aq%nyf?iWq?s-LlApqkpxdyg-kTodSc`;g}YQ1(eE z4Fd50mw(CJ-M;akzJAkogCakGLm)wr#3ZPLVUPS=V9EAf^Dz$zw@hVteVA}ULc}^w zdCCpjj!o^iQqXr@86fFKqvGvb(4N)%;^8PW;rKys=bAB^iyYJ5qv|kkXk_@Ro9w&x zy8q`XjMor{_NVcD>8SBB*qUMT6T&*0#;0EO@f&KDL!c{Kq&u8ONjwF;ow$uGwxSXL z?0PTm1+^*8&{04=cG=k@q3PSiQhE71!)XZO_cfn|`$wYDkiG!`{(l zm6yTu2Q1}g)|zm_7a?htJU+gkgsL-8aou2%0ky-3s?gP{a3yfncJ4kZ{TuFo3*4AP z;m(@Cex%oUBxhN6Z*dpmn%ZGB;H4x(*P_C@f8gJ+ll~>{H%hSSSsBk$8VlTb9hnAz z%jCxWYGRE%h^{np@y`TUOB5p@=?>FhmdUDtEUp(0?~OIEKQ{XrMFgWQ1&X`NDZZ72 zN~CBM)xo~f#hQtY%$)K23pnyo|5&>3vkCKhXFF0E^`PYOJHHi=Wv9pmeYx_0l_XlyiWY#WVj8;zQzF?Y;{jcqn=Y}<|5*mknBPVW18 z&ikIvYwoqbT)#QSnqywqpLSld<>3Ob^KM-GLs z`k2IvbOUdltgwaL&i$y!X`l71D5Xy;j}{(vrb3D2S}XdhK(Bd))6S}ZBwpvwyffU1 zVf}t6`GwAvOpHnbc+y>}npKO+-4$az({o>%y;kAVzXemOM2ZJq^D! z`W)@!Q}?1@w;kf~Hnb^O3cE|AwZ|gsW`4 zW8XhXTx1=t|DA-6#&@umx&CIshjcAcq73Fm_Y)!)F2Re#ZSL0^WyE{IG1n;1Rsy_-=)i+H;j{yqC> zeug@WozSx;D-!)EW1ff){47#l_9J9X=Z3;t$L*7tX=b_gYd}KJJ23j!f2@Lle9;z* zQYzBbsv?-euPWPq!QB;5ye3NAk1sn4V--eB=6^}hCrL3;5+_IT?-e~f?Y`MUYb*Tv zK9w#acf@y#<#hSi7Do)J+|#qqE^=$mX+yxBG+k!t1Hy$xW46EO8Z(Tvi4n##nLBdJu zsOZ$v^SAI~u9OHv43Rg;f^LL%nu$Y)_?luCMeGhW94Xgq5d}WH6i^1i?alxUXO+%P zifn13K-r;r(*8u*cuP}ORYijBBISKBJqfLVli_Yj%6W`ARB&5E5)qrEA9 z0et~~ECk(lpQ3(W#l+_6!FdE&IsrVl_ZoN>INI54@1;~BK-)^@I2NnL*77oD_dYS3q~?)oABT6 zS0}EM)l@o)kl=&31`KlD4-etyNF#X&>P7VpI%k?v+Fkh{$+H2At%m3DBW#&bxXJHK zsOss!Vs>IWPSemM;yVX+(=8)9bBDqMPr;xMS5<~Q987Uy{@(?r&J$O$nZVJ1@F})8 zq1bFr=)!~Miy?9ba`}GZD!p^pNLvv)$(qi;;|)x9Sq{SCFL`ROz4L{=UjOoukE%ID zYQ$!z66;gEw766@=efcATeOq=zec;%8Blj~A=e%*`qUP_@@gu)c#Ax}sO; z*4O;$Qb?b2MW#75*M`r5NbhAcoeF-J5!3CD;}VYz$ms2yqM9&c??_-kdn(5!ONEoj zhSWlewXC_Fr8j_Rf1`HEmU)JT#KwE`(&m=K+g%_y9PVtJH1#IpT(u zjSJM-k=rDvH4`fid+>)*W4oN|NA%CnSf7Xu;RXB zUh!PJl31#Fe?yjYSVg?*rOT03ZNBMr8bNqbvE&LKQU!7@!0Dfaa8Z z^8a?a_adoNfa+~`O?Vg%*H)y@3X(4;w4lvM?+U*-fvIpDbmsHDP^A(0F%gAmI=6rOW9hrVBKPQz!kI5{yV@N6xuI(wtY2g(Q zWoo`~FinFE$)lkeejv%+?dlrcUoDLX(~y^|n{a;vvN5pi9!)z|`&s5>7zA%cVOdx% zCaL>!YTllbnEZapLxl@;`|E|O(yR{PbHS;?!t!KZEyu6>rNB+n4R1$xHF0gD;jbkx zP~0k;10nC3gC(w%K3|WbnDXzM*qQk7_aDMB|0wwT^M%F$`afT=8ket|4Wdrch=l9> zLk}7~s@LYcCuld?KawOG^Q*yzw|gg-N+U3e`SSLz3-2=p)zSd~{2O311rqTFAbkOm zb`wf~_8RDUHDRLkm9qLG%J&@n zO-XxExbC*Ps*NXilQ1r@xd^0XiNraQRazkWb->X7mqhQHm6;63)e822j5+L}k@XUt z`RQ{y<$I#X_9BZ!roA^}=*l)a&Ft&6uDm@x?Ytoxj0$VoCfFS%NrjT~^;D9?ucBMb zGlLyZmBYB5=WFm$ar&2Ou#1(TvaXG4go2=RdCN|qR#nV-dq!?c_e`%Q@!PM18+?-2 zM^0LJ?$ecJJn8%&?joDpb={bp5sEin)A~2-IkLj#saBD@CST6n2b(o6Q+jj8l<2~S zkU?wdIv9htZtru5ejAb@`-!kc+ukbu-|qq&h>Yqz6K+% z`Lr;q&Z8+U{%7}9xgLu<=};MwU;g(KwourrWJcH~Y>;b@=4}&)%5yvR3E>P|d(Yumn7*O$|7mCEpxbypUFjP0w+d3%;ctOwchDwEw0amE zoYibVpF_4x4j+tElfBU)7H$em|AZG1Way zoYTm(2=MA%CPnW1hP>8tl&+CoO0WuZD_l@<@vEAJBrg0=C!pj_n2tOuxcZwqZokeh zm-qfRqrX4nDl@)w1-YJC-?x>_GM;5tgR*kUApR4QP`V7zUFW_l_W-%NwTS?*0mw%{ zzQmElf`OE|R~!Rrr~v5y+S9QgMTqY~avvskYdrfsfa(Q3@xzYJKzs`Lb9R$eQ$qGY z{@3gtbtxh$y#*oj1|0)XWQKtsR8;l$o|pOQCzT1tS}4p8p`1fJiGowpz3P?sHa z)4tFjmCk&x!q5gO(b=Gkt|_<#-djC4`ez1C{IqjSGW7Feb7z^n7^2SRd0T{8vnYSN zPDwsT$G&3BGfi`{y$v-!SRHWb{$e}M9o_QeUvc;(I#B;wl8e2=<$wCV{LhpekswJW zzl9==VOcy`^Wl)%YwZ1(a(E($p49pI0j2dMje+9~`~`0k+(h!{L3pbDF-OT&_}?<= ziRS0Z8KWGfqgMk$jx=cos;+GFsjC>xB@1z_50^ep{1+tZb4Ddb}?AE-< zb=a>^tfw3Orpd0AN=KUJ#sh}4_UCSgzidhr9;Ii$TwYlAee52M#!Ee%5=P5ur$ zfVRS{dD`W**|nCyGGZ$u)H;^QY38m$(8FRa%g;-qmS4F>Ho{osg4KtUeyPDS^{Za; zRts(^Ol7|`=GW{|^)gIjmCV+ns2@6kf2~OS)9d}DN!^TYT+l4ipDYI^@cuJVcjSnM zHXO57)n7|L#;yrUap0h;Z~hqt`Xv49QEGYpUi(Zgo|l_Q>kKkx-0ey**I<@1UqAH?HT=H6A*I2+nP{p84xTL7wiu(~{4Ro{u3~8=n|=^f=TM z&zJ&n5_ZXg=YQk=T4g>?koCHts*6(P9@$7kFDDa{8KVzAH85KExoO}19XmpYfB)Gy zuMnvF_4Xe4LMEUP9r6oaZAH5lc};2Gz?-=1sKBfeE!1U8NjQ=9WPn&DRl=U?er=CLw zRrecenp!bF#j^^wiUX})zkS>m3M_~xEq3@jc$YLaNeK9hQKZF}Vc!L>2w_=mU#rk52{MexK=6 ze_67Nt&Ax@9>gX;AZ61m_-9UivN2IeD22ATy0@beZ|&WVsXV+us0wh?Pcm?e zA*Jg+Xyqwu@4Oa^ytK`SZlip^5}upENETDpz;U6EW8Uj{xK*Jw{(84pdWs}9m`FUm zxH&vdgBe{L8pTpYr#Ro62pF}AJ>Xe_%haB)!}s3Dgv2Dk_{Y;Edqn?E{<+jrQPhh% z^*>{y9U+KussPaU0No-;`jyNw0G#{ZJxB6hWCn;Pi9H2a#Hk=#VqqZQ|C7ytMg_sUo%9M)?gHSi0G+^zorRiVGgc@%GYa0JRColKx6S)dh>kseKB3z zA*dL;o59{=@o>5GVUWd@cQG?!QuVOpN0#t3VQ05V@m+jAk^Q$=oriK$w%X3xJj3&8 z12*w{$f{_2T`C8=Y@#=zSd?9*+b*LQC@nX9&G-}K`Yg4{7$(@{*Mch4vQb$W^R{W= z*^^pr^{E%8tGnzZ(@rbhSp6&d)$N!C&&;AiAxaWzv!#q=5eA4S$f(mdD>2_<+&$j_ zMBdA*G`-k7uwmY8GqheeZ20;wW$OV`u)fIUssW!!>^FVZ5WkUNLXmxn5pkSf6EEQo z&Lf-24-TjCP}B1E!ElC&6is^p=N>=H!_>MXLbq6b&FFPh1#N=Q~sSHN0vX?;y=+wsfS|45Thz<#$KLP53K-`N6b>REfe^o;PpdX32KV5>2kw8rUPtki6k-ps; z2z&{m(l1TaLhIV<7JTs(wfMmdq03KeGSm_9}pnXa8BX z5ZQF)M1lx`#=8$Trp*Ch?*Mc7l7A#I!OGnj!mOX9m`a)DTjlUJdZcmuJv8-?G)JEi z1n!sm!jwD7xf2@vTpUvZCjI06aS-or^GaUqYukH zctMt-39*#figPLEjkYtK4}Bn0YR7Ul9SvQj@C%M_rSCD$%6piqbc)Ornt;GspHZT7 zxwZL{qM3Mz>|&)~HM%c(_-BUBh{r*1Ymr~xXi7~@Z7l0Swz2)b p&m#u=Ra)i`0 zR~ISyQ)^E{kcD4e`ITS@alTmw&p~-JZCkh`o;?nYjW`@VAs87IZ1HF=KRkisP!r-x zwYBf0%gmv~10v+Ubm8C}J2(<)n8@1r^~Z7iTn@R>t2SKBL6EqKW29ALraDZ^#+a^% z)5$FvOH?=0QBX06${~nE)dtKmAdOnxF|7M6i+<_Nsui^B#8&O%SKdWuz-~m5@7AYZ zC4bye+#o95pDPNg37)h(E*dME!KCsSg?}X>F{Tq1$+Z_Z@5q>aTIF0({g*=G!l$Dj zkiDX=Y_3G+Oa3hENV7GF{h!Phk-=M_48Z!xDatYYPj~hNaz=0h!av$Q5ieW~-2Z9M zssUwT0PDjMh}5*@F=ns0u~ocC0tbS;C8D~yr~e=|)(E62IOkFRjzYu|VU@_%{dQ4#B96;L zv&9eFZ0Br2xq0lgRQG9B_e~@HuszfiLCY7PDfjKmb>oo-XzZn*>1D_Om0MRLTcscS zk+Id|ik0IOBDEmn%M7M6$mficArU9n(T7ce<6uL~nlG%X*LppsUv0BmmwN0t{2w&* z@iy2_U+4}7S=_w#Ci!Qv@#G@#S*L75CQM)J z*8C*n@k`X6e-N+7TQiujl`#!E%L*CJ&>xr)c_jbi7VX1i{+@cwl}OX}ZwaeJa3410 zRK_fRdeHc<;UCswq4LyC@O0Cs0_S`v)clu+i8**oG7Rj2DYiQN%)~z@{_nKKb0Q?z zkGn1VtArxxnmC|ZW1)VJn;qkkv5(9 zhqiAN>IzKeLw*+hltr6{`&MNg9QlZKp$dlV$jjCi)LuB$Sc zqV2BS8~tp^#c>iZQL%CnIw-BcYR&SrH@ z-b>UzcLW5o-jLi(a@J@}w1}bVO$F=^8EU(3)}#Tg`QCF(ndFQ*!t+LXyEq9b1GmIU z)tkq4%*|D;R4nn%w#t8+E5w$j+a}*i&{R-vh4&d5VpWFJB8ADl<#( zC`xql!x5#rJZd+}nUs05sy&3;)i{yZ{k%*-HBR|IFI+4V+vNAE+X`%V7{(IF*dV1= zpRyMFVp)D{(+rNU<4nxU!=)=9`X#;(ZhEoyZ}>YBf9{1-b*G#asPm>R@F=Kartn7Y zUDaXorjB-d#9JS9=W1VNw_e@~EwxT(o$BBW zHc8t4P-cd&ky>oacU#6PL6OWxG8W!3`dz`!cP~VX98r;oSg=!}ZKSYPdqQWb8WneD z4#72qW;HMcw?5*rmI#&YxAGG45zDP`h-0+j)|r;kLK&@Umr^KsA{m=Tbn9-xWbE|V zh2xU%82RI1gT2_9MOl^*l?gc~>|uj^T)$;%sW=?Y{{6AyvG_w~Byfe-fP^nz2pXAqt0-$Ipq+u3kAc$I{0 zBwfw$@#!Cv@(qO6 zcQ@^q!E?WjamEL&2fP!eO7}f|g#R~=S|t|cJi9Z&jAx0-n17}NSRKXd^_cvK@?(-! z?60qNWLidTpug@-%gAQ!9k%ltl6n~57vpYsRtRJ`h3l?l9{#{^kzYiKqFanYmzWMu zG{PGGd9+nT!bH5Alu}SC5V)(hj(6dvj3Gz)`uS14M>`iLL4N#lE;-(WYA;w4U0&bK zbrI}}e-76_{(Q;gY;G4=Y*9?hGv%-j6}JL0D` z=Y5=KjR>lbkR`w9M7!DqYE?vYaH8Vw5>_v$WWeprYczA+yIczOVg6KR`JwW)+9c=V ztWy7UvTY&-VRKpd_%W9>e#)F=gw=Mp(Z>>cYV^QS0qZ(q$AeJUbElx}@>w^y;Rxnn zveVpU-iuvRK$wVS$f0x9^0rw}n>|Zg(q%B6aUd$0j|5uBX@$R6(+gWZKI%11o?1lw zQ#q~VANIZjryy1LAtP80gE-8r0r^J-(h74t*@WH+pO%j3P+-&ijgxccVZs9B&6Ity+96$Xq zEndi(i26;`WW0+d_s>B>s($Ltp6F7}3=uW`aAviW`gfO$X2wsrZ>NHtlfUOfPQvvI zagT=mE)mNxN9^!(7d|Fl8RzGd4K-?<>ZX0$0or8M63wG$3Dlh2u3GM1x;#@kR;#Bg z*O9A*#AV##`|NCvaXK>%T^zKEj(hAQBcHs#jQPy|A$ftKo=Oqd9r)A=uBSh-5^;{k z!=!W-YIz#P$w>beP1P6$;azl`rw1!ae4G{|9V4oXi|1Uh53#rNI-ok7=gbiH!3XYc zu``+V_)AxfV&q9yVYBZ*l$y7hO| zK~fjTr8A@tVj&~+Qj!y=uQ$)?u&Jj@Xg*^pH3y$YB5xMYQK{WO;!`;4+b-SR9l?9^ zpUon5N~|H|PzGL4Ga=(Y(2)hzH79! zl7=LL9xzyKW!&Se!H-QvGg2|$}YmeX2%!JR&o8a{YrWaJo^KEztrxi z;#aj~N?}{8#U-S)<)0)n?0(9Aa-(hyn)S3^)zH5`S&YiPvrcS^Z#SbC@W%`v3{SOED&$$8F8E^QiCH{7^?)eEcr^;w~E zFg!1CKfdcT@G0wM)gIqHH{>2(6GJi?cH4|gt*Hj&rBkDAB;Mfn%zwEP54drL(4H8M z1lGTKzHX=*O=o49ClFIS!&r&Ogy&N|;dIe%4H!s4J$lmyLauBV&wPe%?$ApcfM2@z zPigdh$;oBjFvB)UaWJopW5cjAP$r~sP$i+(l%y~H2ebH?@64EvVY$Yz|11$Y7-!YF zk*&%3)o=J`1UFu!_bq}HS}dv49SZ-?y2so*<~(Q^s0ZbuEg0Hq)+OmcCDXLH6a8Gr z{=}E9lY@hYjR|68610bN;c*+MYv?%w%=;HE(aicGveU#*!@}1b=!vTL=Iz$v4_<`> zqB)Gm>k|hBjG`(q0yI9iapgBsj9F4}Ebx|B}@siB(>OWI;WsT%;`GR&L=)LnwK z`<~O5>E!kn1AHK|o(=TXc*zm$`_t>~X;YA4JvG)Ve_1VWT3 z1@ltB--J$D>23&U8;Z;AD;FT|?pi|MTb@VQ z8ecDWWLji%Jiw%h<$r6F?+v)VsHA>IkoQ9S!&um0G|D(Iz({#xMT?dR*3UGNq$wn1 zw>RCD9t+%OekxhB9Tro}wp-%@GiP#gdt*MGE;><_OoDTnRD!hbj@$)(yg8B`Y%uCS zT`j~=PK9``17YMnv%42MZQdxGSmpNyM)%Vjssuw{6-x#W4(}1IHHr+T;k1VOr{%ri zzPE|w9@3xd=+DN{E^A>$Tbp_WG{V2>WFtX4XDPv>+47b-o~N6W`WcNMY%*4B zv+K@45cv0l-B>IUeDTDsJWek}jOGE^T|v%Nd;L}d4_$<~atPH|LfLPvmCR}npsxY# zl0d1*ZLaw8^A^CY?fi~+0(?LN7p@$E8cM^o9Y+viN_P+SFc5kOf~f;{Ge90U%{$F7 z!T1%B*y*PYVB)fx8;?oc_EHEaYJb>jjt+<pSr;ok7X4sU699DFzUP`=5h8?4j zvq*J?LR0JEXg2ou>uemm&`MZH!)>S&4`Uh3Kd4+rLlN5@(ItMVoWQ*2*h-slw@q{> z5cXxdE-@Jg1$j*C57DBP8=n#@V{<9m+&To}N#)G?q=Ed=?Ic4;|3OAa%~h<2y;s@3 z$6}pe!wMAIB4g<)r^~1Bh_y4zB`;Ki+~lE~bGgvdDJ6o{I0#Ero4*?r{Ew<~EIr3! zmmcu{4$$JbfJqUm9cvDKF&|yI1nE`+YLlwX#LV3Xd78u^xj2f4UzK)XL$Z;!$MAerM^*m=ITHdt2=~cbpvPrD<X5of3yBX^*B}0Mrr3E2#2jv~TXxAe)^neD$yX?O61qiSWo5ri5WSH;S2SQ~KG@HJ zwp<`A=8N2H{?-`gyJ_Ra(*i{GTfK56u{>ea=8iAMORw?fWN7>1onaF&{83Z#pN1C! zZK%B83MJnyj89*J{(#~xH>eg^Tc3h(ID*SO$H<}xM(#FGb;Ed4HDB-{t7g)WR4gYC zo&BAE*u6_g5zC-(X&d9e8$OOeh_9@cwbktJcVsE&`>9T{H$;8FYt=g&=5!38ShHJr z*)4?_OzPEgSD4W*C&MRTX9oNkNH4CWje@bX)@~t{J~x}*f_Y03Madn5?fi!!bX;H4 zl}7)p8`UP2&tCq0*WO_oG`@W4%tUa%*Z*9MY|r^F1;@LL{G^eaCN+`@6|KBCJ}3TBiuDK(owhZEj{hEwao{bbmC7zj&jY_74vwfrO7 zZ&*>bUe6JGbYZ5WbN=0on778#zYX>5TUyqKLt6D)4?4V_iodE__s~2lmUutcGgr6u zz&OoUhl4h|tgerHJ73J(#ORfdeAQok9|fK7&`G&bHPv=nHwVtqunzvA^wGWVPeK`Z zP%E5&9vU-dccHz~%^Bzr9OnRd0?ukTzx5xT|4wPhaXDWkJD`VsGh@bfum3(S$}6>N zvM?vCU+#-O+R-7v;a!Bo7)qg6z_T|0O-<-xQZ9xTWFuIoJUEotR?>dF6cCiu0ei)2 z#E>1j6O|D6ipR6n(O7mLpPHK29Ud2+vRcBp}g$sYVc z;byr8Se}LfF=Bn-1IZM1+pPj-377=5`?~LeH&3kNt#?dDP~dnBXcj>J7k%`BVwzs~ z6umq5Gu>ckV#d8JcdgTi*Ni~Kg?(IwP@HmC5EG$*SMSM$vAKJ>@5Y3aPr0dpY1p0R z#?1+O>3#UtN6V7?F7U6~Q-&n7^UEFo3%o!VA=?2awGS_QyNDxdXC!|S3Ten+2hp`m z$m&3o&sNiRAF#K39RIL1~$*&$ba%1G9jG_KZ__j46FB%f5sE=?#6 z)DpzhTr+kKPoQ*h@@~;N6Bboev-*$*rJDZ|1*sJ~{6v>z3mJJ+x#6?BpZ!NQyeG)S z&iq5;(}gJ>2GiCffe_+?k*X0rZ@aqjl?iWSyis8Q-G_IhM{>dCo6<8_nfCe7mA_k9 zmcf{ZsCd;9j)HTxw>`vO3SZc9%Ne5b28_ z2K>#iI%d^;c~G5{$0GhJ*W{g*&_2_N?mOA;vltKRs}^$b98JYBo8dQ~Sobx#%5^{9hqAeUcj`krYs(tNSzhUHtBk@58BH5v9MM z51amNVb}*T=k9(u`v}-0r4yaj$21CmhwH0T+DJp0rut`X0Ehwhf8CwH@f}Db0<|A+f#HO9f>A|{+>0u9PGz^1uP3pm(pxm#mQfO z(M0$%mk^OeZfGMX?VUhF4FwgI2lZJLLJTr+h63i8JS>*HQwCL0TvHC>SSEFI|C%QU z?{FghcE_2tC;iv7h|}e|fX}(dW0$*Q=WBm9!%{&;Snkl0RE01{XNK5C<+`?6aZf26 zj+c=-D96G0sneP^&`4&AVG~K$9?ie8Z^0UoiHopJApje1OTf9zCcTf|DS=~kKmJX& zg@I=e)zQx;TGa_9p|Ygx4!;RHgX%1?(vR(nYLD;**enwmD{V>V-yIqDom0xgalTVf2jRx0G~VR1J|Kra`Rme5 z#jZ32|0&X>Ypl>cpfA`y6shjw@pIbct9Ni7@bvbHts)4-SnWWb!A)NA%n~++NzNE- z85Vmb|D^zx#g9!|3oYbuX*)!bi(`7BlHQZ>2}7|vJZhjiEaz`R!rYIY+v?^FQb z^cIbX{C;WuhP4Brd|XSt21M?F8W-`sk9$#308!~R)O+qQ0NMlqhj)FAH}r?@K&RJS z<%G^@7qt=!bW6{$fl^1^X*4;5=>9Dsks%=Ju`qXjRFBylHrzWbQNM&VsE9=^3x;gbi&Jkop zXR}>wtGGz-5cG-Np0v+x*3Gv8Z%~);JIQ7c&yA$~6{^2x%+oU+I(es>8I^crdaAy) zd1n=4{4U%*V*6ELqIz5^!PSpVsFxNJzQ4?WtXkrZ|K>Mrxa>d8{nc*hyl7+Y4Aw%{_-OKJl`m00pcrH5?b4T9c7DaC=6AyhAw(%k>6m`+}olX<>O}KZp zZv$P{Pue{kis~uic#{2hz(O5CE>B}6XA^#WyO<@XXU=T-{&{uR{F6mx!f|pnM3~Se zFSe;Vi$hJ#Gb}ev_&#S&^C}XD3|QDu?o{>4Qz`Dp2m1Q& z_b>N!ZDE55z|_smDwi1E4xLHl66#SZDf=sB19qi1gvHP@s?A)bR<-(YSwnTmS(67h zw|DQ+qkW@k(q9`(wG_!_wE^$J8Ov0t?o8n0(ErCj`hB#?*w-CgAD2Acd-FI3?W+-n z_3Ruobt=SRyKXw&B!$287J>CRq-~0((dRT>ry|AM_Vm(;6-d5xgJ_{>OOfAHR-`^N z>Cw7u(UBR17jztc$7|K?sN|g6}}_B|dgKWFx?Y;VJWC{=M{| z^?PL$f))9U%_Og)!9n6yO$UeQu=WviA)GfR`7ARycfe8Di*bS>ksizh-RK-gb!S-0 zG4wrkg&2xK9wb4~&Ec}?&6oZ@i^45&K$@PLa~mLjvard|dFE$qA{qKj+4FT`+f^L` z0=RNJEV$m^wY!5$!eSX>M#QDjkQj4gyXZ?Y(QjW%J-;~DU)T)aP)$4`F_brt;SJu+IQv`&j?nS+iEF{W$H;befU zT35U{u$!>;G2dA{K$*JlhnDPtM*~|#7KNnGsNOu+`RJNcC=h2jZ;EVo9{3=9<}+x{ zfS|n7WA@RLw(Z3g#)yfP&HF+h3*$EFmLh}I_cKLqZ+W|z6sA+2Sh;Q?w4#v0*au69H}({QC8P#xPcy@WH% zh_b;pH!2n<37zHZ&q0I)pJZY#WQoj6YGU$ zfWe0*+VBk$cQYzflZGC3)vj|RLl>J-@|WTNMiD;LM&GpVZIba70gG%C+WI28^A?dR z2~OUVAFE)cCCqg#VeMLyY$=J6u8vpr;U>>}CtA{%pW-@ga_U-|Q-hVOS-jY;zn<{- zH&R$3TDn_@+xs$a=Jf=|o>51H7<25&olVtW@e7<%hGpGQu}8;FU%6~?1s<0;%Hk{7 z@-y#@Ga>d)RX(*}_!<91^S}CPe?yF?=Xx{9R%k3JB$)MeevYGLD~#FZGKW=`^c0FW zR5?QWtJJw4B!}?&OD%6xD8>>ncn?y(dV}AlbwOE2f@HIwrS7nP=>-JAJuBp2^M0IQP<7S`O17KUD{o<5iX(YIoP!IlbK|S7|0QVzRC; zENKe^+^(oNFW3|k^gI-T^NV2FaLg1&GKm0_f>T6P1-`pauh9nDMJ~)nZB*^S=?X+; zY+m0boePy9WMCu2PRXo~qAGUi;+Wf4NBG8_##_G`P7;+JE$BJ!;{qFnCb0 z^LX3ymDU|kZ3RJN{J@x{uB?o`&%?}*mA7cHVB=i1IADHo6c@Q?zYcM;V@*!v?7(D+ z3Gl+RXvKbp?P%BhlPeQW?(Zy00F+1{8PzX#qN~>n=u>Gq!`LsT>wfW^Sdw=iHEhh&0s~*TzobP!6B9N77I~3 zsg-fYUq{s^N=YX&zujRmsD8hBJ0nRs2xDFc>Ao2ny`e&$W&Rs)%+mX~eBlwp0BvA> z6qnPmG2Ic(3m*xY*|7Z3Ja;K(6G+GhAGBe!LhAExs@_v9p_#i6i_7H^PUT=UQQWR$8NX&pW$M26vo|)MgxD&o{`@j(v5#D<;Auir z9doYz7>?ym!z6zMQ>wcTwop{w-Dp^YoT*#e?2~8y&9v{&qOJ4<$uex1!+9~J*rKyC z@+DlDFGb$>UA(2)K7lGx=bm4$L==V|E15MXth zX7RdYMVowt0%-qEDQFp$sq6n#Yzm3lYLOjjGZ`Sn#5&5hY}GVlEKWOMbC8^1K1;?J zWV?+Wk^GB^_EsLUWW? z^7j$R=NQ?=fm43?lUU*bVO3yuvk?+RW&n<>{BL6IbLo5kB*Hzjb}e23BnO8K+h;iLHA^ zKW0*Opqy)3Flhb#ab7H&sTVoz#G9@-LMXZpy3=}0G{fOhgK^14h{5VYZWv+1V8Y$1 zroz}7=M*QyaQo;2%WR^#)L5|d^UL0BO|<}6;p!sR3La6YaHnscx;XZTn828C`p2|T zc>PjB*}9&YD+zD%r89?F`&>K`w6uDEW}ISn;a9RxVFV6rFY3$(Xs;N_8gOvube>5m z&=c1PrTEw>&%^g)bP#SlR|!EUGZI9UrXiU#&uPI^kLAwr2X&6!P!r#muZ8g)y1@M~ z?9*Yh2G8})hD?mAuA;SnEExP2sF1eQ@Lp$GyGwzT`3DE%1iRRpQ3#t?o?RL0NR`Wn zSTks5XUF4Fh6-sqdot{rRqT-dGqimldxKJH4@%M03k=p&50x;b3sAGq`3BIwkTnD6Nl#TkB_>axwF6=-2Sny$4F)0g!;r7k4}ksx{rkZDKaY21REJm3#guJcXr&h55&jVb*u9hC0i{6{ z(}vxbPk)!B6uxbThTwlTR*ljPb5Gg663eX_R!ld@hPsOUsz+U23O8$xlGXVuC>;$O zzfaPWcefr1Yi6p0-gnNE2i-SjD3TAj4HD_bh2~pHMW4vz>pa}m)b46GeIC763v;}w zk8`TP?mJo9iR0(0c8t8meJ~zfA?mL_ArZ9_iq$-g=a(_lePHh2AsWfPNb+<*G7=)1 z&D`R!3Zta3u0ESbLAinWH@!OXZ`d7KN!cAy5cX-vs&caSA>vJT1e49})J}|d zra)yKR_!(Fo7e-daj-o5>CWf&EXQCdWqR}9q|GQLu$lWPK8L7bLh%LJ`v&!fqISfs ztc=2+omDt6qMaa~rWRwKFkJO|Er0RTW%hgX1{f_@GRU=75zCuVRZbAI3-pp9>xsLz zt_G{45gIWsmMbTB)hgYR5ue3Aurlk7*BWj7VT&L4avT?wxoxbyAbNku#!zd@zm=?J zSbH_Z_Q3j0?_vgd4`#0>E{c$EeumRA=cK96eJ(p0TA^~bil=^=`Z%XpemOmDF_$|1 zS{FmT*5>oKl0D$UEjZw6UB#d50p;Wa&X|$)e(5KaiF`%$u5Te z07+zIRuDOrAN?ryI-W1_+~_D?@}Dpi5zPg6zbszPxi8cd8*ZPD;Ub}-f&yfY#o7dt zKiT|4K#+-Wl_L{Z;fEE|YlA^^LJ&*xCN;Vw+e-KNZpF)cdoe9iTa(^c0|pzN>iJX^ zv0$x9*?!91Slj75HJaPFW`yNmh=DWwuYUyAANV?bBhm~3082qpPaogy(6`2_17rZ% z^6vo93Ik*d%>SV3PP;>unYky$yFeZuEgngg6q(1MqYK zFZXxoJKz!)=UQaxnXC->iVC{xUYdX1wXFI7cyIz`^!kTpcA>t6(;z#5$N=Cn)nah+7SX1XcG5_kVr zV)XG*u$ek~orLa4l^aq?QQ8~ig&5O`ucqVd57;`7df1+0k%|ZPm|3{cQ)Qfg!Xi!Z zQ?X+c@xJsnp$-soKam<^>8!^r>JL+GVUFkl8ZvTu%9&0yjK5GE68#3ljL&80c=jM@ ztOz(yDs>N@yE%{qqS{e1{54s)=1;l~Xpd0~RSyTOGb5 zX-0BH$sdUd?u!zZRl$1iq-2)3fP&kfGgq#fSv_VnVzz2efqm#<+Z}6*=mk4x$4nxV zCHwL0L065lr{Gy_P61mfD<}1`)qQcAQIWQP6?acc7sY;F-Ki8(F6**v2g+D!IqMAk z()kUTe`+t$<_dL%v!-Q@i~b=@*^vic{UGFit*c!~I}m19NgoCk%tH=ZJH|y@grk1&pun zhRh@8^TB%cl*C?3@LGFS(kA`_R2;GB9)-VIMtty^w#scbM1x{TWgDG)y7G+iWAmKS zE(6&kv=3h*m9|;>R!S1qw7ZES)xoR`aTaBM1UJ0vr#kwF<2k14f7scw$_!<6TQ35r zF)`*6Q&%O4aNpMd#b`E^{civBCV@<+HYY2u*)lpj6cR<~&9Ixi2ABR5Nq$dr*}0zK z>*I#4yhufC3eo~;GD(%+I#)d_k4lOU<|l;SBBW0PwYZ0A3ELTP ze3We6LH;><@bSf6YddqMg3eiDxiqf$+26h_+Wu~H{=_ubQTgsLmo`-7v~5AzthKHC zX-a0)dI`lZx)jD}pYA~iigwn+z1=wq^B6G`DF)Usik9y@89Z`zT4 z%RYZDZXH|McJ0EdwxS1zjmou)NaD`QP(WoaXRRzW7|n5=~x}xw$rg~+wQny+g8W6ZQD8f=6T-te)s!z z{;aY0*ki0yv*uc}s=P!2(Y*VE5!>~$_r=L z^|z!|5yzBMDx$=vxqmD3c3cb%eQKL0d)^qjp}X7ID?8u3jY96uNElnA`3h&hbf(bG zmcx3!C9V}ixvFB?mkX%CS^m&Z5eiqLmwm6%F0zky9_0hkkEqbS^0=lwlw;Z(DN&5S z5r{|0K+05+QZgrSe&5QjJK{X)$rUH5$|+jIwC3fUs=#_%fEz>rhcVXWpuUX`T7Nx$ zoZ+$!P;V4o&QT?R01D*iRNdR~U zVDkbG;p8T&T4Kc%za{Sohud!E1emgjFzK7rIarJSScIwu?C(j-PZVJ;~ONx(QGj2F$ zu3@^8V_oiD{=~Q&u&5G`;4ka&&8!?fcr5#sJr%^yn%zp#Xidhj9gX7s~yrU!4nd9l3~D7r^Mt78?<`O?`2!5R*hKt%?~<6p}rVO1xDkL zuSIL&t>1|{kg*!W&PI0I1QNqoff%@OGx~F=UKy%E-se786a7iBk689f;lqz8yq$2g z2}CPH+xtTg|h)lJ%twhnMif<4OIfm!X zH4ZwB=(k2&BlIVrr+~GN^7}DvX#MAmuUirf7g0f(uh3VE7Xdz9<`t(-(z_MMrCU({ zLx~oBzkbMlU~Yesb9q$2aUFmMf8n3M9s**;87p-jCBHEE!%qV6S-gANFTVO9Oi3zZ z-L*~w^C>bpEY_1aypebdtV0-UN;)|yNZ}Hje!Y?@a?jm8AJb&UH`FGM$B%9YuW&Se#E*gwK|zfj754hp~cQwu3Ag zA@#LxCNL&^ga&Qi+ux=p{}LiJ9g~fPAm@Ce`8vX><7gmD6IX*!#AKN+e0>%ek;>C0 zy~Q2A&|^5@W>fW+YsYxFntN32v@&>EWXZOTP*{eKfGt4guB-LtQ_9`E$W}cwgN_@r zVuO)&KpE=ABzgZA1NmQjw-84|iF>oU28W;vfxdKCAdBka@x+qVdH7jxNr2er)thfC z0g0~H852Q+<$bXdbJc4YLpm%rBZ9d_2id*M?ucBYM|EK^c9x8WfF6UV;|!%*n%<%D zCg+1OHjP^oA#IhEctMA{*v}Lz++;n>G-v7WsbF_qCJf`J737t4A>%EAvT|VA1?2hq z6OpU&u+Q7iQAR;0q~XntI||K(LbWzO*EW=&!Ri#`6p`heaZbQ(&dJRDs3^hzH-7w6 ztMjDy2J;MbVcgYsqHj9DQ3_vuY@D%31 z(aW$)nlWx+x5I21o4K1?98IuN#d9C}h@YCkrPuDrPeiB`=^UR{BO!^)ERfm@n?l`| z=pxhF?dBr;Pphc*!c%rzog!-XK!_55vUa^-)hXPg&G~9VlcM-9?h_@}TPH$r-_?T9 zzyg#qStQKcvA7}iY_}%ERtuLSx!R}#0dHJ5q;I1bZ2F?<&O_TRw{J3wW1xn^v|{|0 z(kL@HBcIO%EA>OPb8y5u1Na{^MV#TL!A-Deb3(SY>iJDbYU&fq|Dw*n1K;4O*Y2;M z4HigO9tH3{0)SEgY*KIgBerMdJ#@_v1T_Aqe;WM^ zCkOt9@V{2iAVcf}Ab#-wHe>35HyfY;n+Cl6rvm02y-y)!0t%mP77%Rq*nOV-MH?Km z1F+5E-P_s#KIQjX zZ<~*KSB;RIU$ZY2M?k%jd+Ga&)rQ#RF!vAt2H6Iin=%Ytcp`4!Ijcg;W;^RM2-+@p z&IR#d{oU(o2qBe*1y`0dz^fCZ4EPMEl%CON%)VddGU>DyQIUW)yPm-E4OsH{g1jJo zHFOB!U^INN^(=!W5!BmS`Lv=C_p|Hz_4;e_5E9Mshj~db(vOj?6ulOEZcEMTiCpE? z(Q``gq4}s>N-3pf-6=pmtyx+PkgStNwd;Jb7loj2Jh~Ej>~vYM4kg)sDk{!A^tGMw z5AbYURd3Jqr7pW}q;tOl{w&iMT?OK)h`R|VT_h`5O*{puW$cl*+LHsUX6 z`%!?1>YB)E@*017Cr6qKB1Ww1sJ5`X))*Abs0S*NiYx9PXqh}hx2-5#v4Tr$MT`_$MX3yIyF zZ!M%mMfqzlh<|1|GA~KSN;f6tiFXlYLhIt z*st7jlPvXcxUphy%q8OmF~d+H==E;(Kbn3gs7jO4bPtI+$Kx zJ|?4AjBBH68NpTl(E4?9*^eFPbwF9JS`a8208goR>cZS0*OfZ9hn+{o&*SI7o=de= z%|H}QpUJZg5}ikcie;2yNLxJf|8}QpjGje&d7q9{K8@o}-&T*Q8qx(zQII= zdzAVe&GqSzgi_NJgU;!dO!!{LKOq5yElbx#)df-l8i9|WsrY2wfy5*VK04n#hA+b} zF?(JK)q$Mt_u~7V_hQHz5P#z~2{3d5Ks@>GgylZ~FyCQlyFOheApAbzEob`;umjYf zmjV9H|17)Zw*z6aqW^8d-ub??}%8Rud7i2i?XbOXKB0JNV+UUG~r!@#F))aREBk&A)k z78bOI{=X-E#w#cDode)^Kr!a$`#8>gfBXPKabw;AMMWQitxt~(f3z;(4QmVF;R4{- z0e@VF&cN)qseoNWMPVPlWwOVHUq|$k%GN4D`P;IjiI3V_O?dz(483z$B#zDa&K)s4 z)QQ61b2ogpgRRO}c5MYo*JtwWe48>I9=(vqWpMd$ohq$a6`;aT(e$i$$$r9TCd8yY{LJ-2d$ns#Np%ZJJ3uB zSV6SmRXBRlcJ8M;An}}cInmvhiG*WmekNm3Q@VG< zj0%B4E2t!qB%UW4^=g|2C^VZnsMoU9!@O-vMBVD+%S=7`$7f1uZNA&pl4-FB+y>Gs z`O&T`7F@`3-X1}YKTD`R?|m$ah`eOz<@D8+**|F&I&J^FnUb9Ks5#%To_87d%0qy? zHlZQ8|Jr%cgi&~M&F~aB8cQx_SIOs~@EyBJ$jk7=g^9#b6=h5NnYQx%9~CRz#82h= zI$&iVw^+~(MK@7eF%v;KOsz8+i-U)O?IuL(-l4KO-mnFE?%u08J8&zl4i|0as#UbQIIl* z@0F8NEw^129&}&8H^oPCWLRmA0k3B7m`JyyGMM_7r`%4!WRaMAyN(c)LiEP6$zf`( zC5xR;eVvvo;Ja|C985$n<3PTyx+wtZSi_M%`yh|ZGcMbEhyD znBf^o{GJ0jcl}UqmtpMub7t=SXS1W%)uE+1!oT80>%U8$2sSG6Q$y7+*oku>B4n?W z0Oq9D`1Gy}O_4X%;cT*K-Mzy31Uecadr{h-`K^uMC4m3ye=ei&F9AugxBo?^2ma6G z&wfO)2VcmWG`|1hFQqm>xCipxe%op(xEQJ@Zu}vJWty>+Z&V_t6Aa1AZT64+RkFT& z=xWB#zCAQAiXxeRl{5S9EF&r_TPq{|jXRiMrOn2X`;`p(Xqfk8C5ZhOWl1Q}SEA#n z5& z@%l2#Xn#{hGsGh21vWvPhGN&M$ zR$yDUNpN1^Bckvu;hIAmBOA-8g5cHCl6+t;Hx4ri5Q*S!^ep) zk~Da<_AEs}8K;v-=4s~53uGDr>?;mk-7Xg~VTFThT+R5ua0Q~h3!KpIefKxDSNN#j zPGH6A=DP3FZ_xcL7>?7?dqvbX!5{eL|5LY9pA9}JA?E+Lf_{ANkDUJ?6W~NU5BL9& z$?oIRmF$TBvw~~dB>+axfUht3dn|m+`;=0xXyAY9XwO%EUvv;&ZAdT6>P9cjvC zzN4>5Eakr~-+o_9-tqf1t?w^`sm5eyeD;S5)vqknh{~fqadu+ z`AHY3A#oMEEJhLnFXf^~N9w2rJFBBX9)fHqNZmhFrc&!+uo`3+KIDE?_V>x>2g#Xs zCzRHiJ@6q!WWA>OUA}i*hS2(5$!=V0Qwx>3jd?>*Sql_4`e(61-CX;@V_iw{P=C;(g)^v&W-nWu5 zvZ6!qP!XFj9-of>WK@3u?lnymDt)#4Q1$5TVp9YQG4{)I)Z4a^j8yk8ABqhHSL-q8 zgK;e@jCgYKzk()b)>33f^QO6cYx!DO?y7ND{F=aPh#u*Ufn?0LlwTi6^8n>C5YNd2 z5P$u@+v|aU+jreg89;Iv5B+aF`||whW%hN_7k?9=fm;7ek?j=$#qRn3nEnrVfvyfd zFk}B8NqYwIX99j-?)d-1zW&2H4P)LJ?;eve{|DbX07~y?+@Vzf;*$jijU(#Z`n#`D zCp9SsEWw|g{2E#pNp4>+EI+mmhO~%P!3fY3>1SHKNh{+c;Y_y zJ#{niIgP0Ow;tA%sSkQ2zP5FuXperm?67Q~0khFa>KUJ5P%`j4pl%R98>M1DK zYTF`3C3T{e$8470YI)YxSEIZr%~nqOyo;g6K9@gXFXOJqN7Pg{Ba31?FY@W5+nMh1 z5eGLnB^E+w^+CZ4Y{mC0->K(P?Aqldq;|{_nbK;8RMzWv%iSSB%X1=n<;eW&AB~}q z(Ds7Zj*w7V5x2lx4$Z6xzK?f#ck#(6H(9)7WIg)LPjB+S&XAbkj|(=gUQO5 z0Tf@2EtXePSDsC*^lS7E*i(RbCwYqg{8j*o$%mc?UUy%w2 zdSm|%cWSda8^QP8!7XCrTxfQZz_jqw;jJ6H6-^v?K8;;~{wcQGqu2nu#Q$%Ew-aXg@pGa2{ya)Et2hei=S$G6F z_G;Cn`9dr`Km`!@o+p!DCilg!;krfy5L6W8xE7kaCv9`Tg6^g__ZF?{z7>#XZWh}) zza~a~k4HNYX){OLRnd5bk_{h=YIhtgF!JSU5yuX-{c|gPs&57<{v8vm2qNd^YwN*y zLv^LbQ)ia*LTmLsMm+&xe~o_@TK~PPw;cukRM8$w{vsWUK>)2Tpcxh%p0^>X_%vx# zV1c43&8!wyti`YEl%h#)XGpoYp|nNV;*!eRlLXtoEj`9GYeLRy~pVeazZI1&vOZJG)m+=F~rc@8p)_Z*Ba-tO0% zQ7GTv35`HU$1gCKz#!r+lhV4j`IAddWxh=rt*!{-lP#A&gY+CchfU?nuQVv&eK#zF zz2aMXk_N+O0ZNmdI5cbY2`-{#>CvyMX!^=!Eb%j3ZvoNYDhD6v50`099#5f7f3%Es zvu1csW_X5{hpe7#VPr9$x=2>qnWY{w@$PmT^-T*qE>rUiA|?L`=j>s*Ekm@hpikbw z@)+~Bd$@OOh6rk@!@NC+2#P5I4T?-+XXwzNC=2D`gK7G(oy6$bD?Z^;5{U&UDmg2o z2D2mSlT_^24A;GIOh;n~T#JVtW`y$2{s^i3d4(HcM4a}R1X_6~5YbkwuV8RC0vZ04OE zG0?v6`~|DIMYh0#B%tx9^2^5U4;;z^75Y`x*gMyrFq7eZqjbBYN-ObOtbrawU)PkV zCqc0#!3><=VjWqjC2JXZ%ShWQj3++Psr}2=Tm$*13zSv)NUm@FAmC~WX#D^Ia7>^G zIGOuehlqA#-cu#IMU5k|zm^#<3rGjfaq1V)zkEhjA{?(6x4mKLzZXcmtnV^oz7=+i67=9J{@aY{~YB`x9XOD>oM%&G({4h31=^+$E#LlC* zpJYVdpcs7CxIGbY?-~|%=vaL#KKnqzcwRx(Km}K>^qzmb!8(0$YrU43gVbn3;OpvX z;Z%Z-ua!YFRc~-`Jsw=MGKY^b{-P&sg;REMs$cNfJ;Hf8r76HCtwv(zw2(@L-=wEu z=5;ayzmb{hZ9)_ZabO zx8dE5r>NJbby!?hTd{>IXsu6`-OFd{w~wMPESC6j)eGaa1)uf_q7LoWVfUGLep&A@ zqLw+lIX*U^@17SitmTAky~Q)s8@0#mv^qnP!46aLo6I`?T=GQ3-IO}lgqr~MyIj=A z;v1j}0O&==RUb)Pw%<9=Wz5R+KdPbC8ppa~s>l9)X=z4FEFkL1A6I+gIPiK+4sIWB zVI1DlmGb4e&E&Py3uEA#QT`jB5X=K6Ji~+bn>$7Pv-C^;)>qrj_SwI<|4Z+Gqn#!+ zD68x-TK&7(@40Rjoi?-~*Xv1v=7}Voc2;QLCpN7H4t&1czwvYZ?#PMZRNYp6VX+-M zAUfi3%!guzI0shgG}6!sOYt)oqE*Yv25E>GJK;tS_EW6 zx1{pnO|)bqD<&G#{E!uf83nLSgPTT8)L!_kQ{K-Wss&bh;hQFG*QiT>P{NJ!_~?aQ69^gjXuzclu9Kt458I`+ z-BnYy8NIm@XZct15QQFfD63g+Vg0Azl6|G86dQ};#J1;s>(KmNwWjUbE+LI`GUunl z{-B-7c01S5aQ4_&QC>@DR;;6gjR-V{=nPgGmx+7f|su&g1!k)QD#Y zP36dgD1h+yz8C?6*cSXL)@WUe!XWRbzv8$CmWb_RX?>t%mqBb?s8Wl)j+}ILqqW-N z#qP$5Z?-8TLX0Zj(i+kWR<6n5AL{XxKR*&@uu1Cqb-SMKcvM?7Ib4bOGyDdJ1FB@M z7J>E$0F(=mUdIBY9%sPE(k=UE%;*vDbpr%EvA@Ium{!mcAI)+OW8FQUo9F563Rrsu z^meQ+gbRH*tqTI}Ikj8|mpf`OSH!>GcCl!BzR&HQstmGs zE$)TyU(p(dek>t&;1OnKnkC@>^ldsySbW&;>WX2JYDA;lxatcnt^0i9FIKGhzLPYX z#A%bt8|z<{lPFw~L5PMUcwTS_fG~l#2lwP+L&(S#!Ja4>*C>T zpLL^%*KxOEuo``l+cg*2M`pLzdJ>QP_>a{u8#r@dqU=wNcnC@NOM1fhj8@1#=n+FY zNIq#$pFV|e(8gXaaH!I^KI>Cf7Ng(4u5fjb#TnnqQP~AeCYYeK9*s$)H?^kH`1BXb zi7s>bhA z5PsGnZ_jMs4{T}SxcR0SBOp!XRaaWl#V6>54T9LyLVV;I@0&Fp`xU>$MIXjeJ9V&OIl@o7e~T%N7rs3HBIBsF{_7ED}J4;Kjs1sl@2VA~HpY=HrhTYAGUPG?QgO*h+(Yo!T=l z;dc8C|C^ESFTzh6_I3O-UB81y`NSXYqb*tGGq5lk&N6k47#oDvPyqvA6KFU#Sn+1X ziGkyr(IPMqq(K7wp%IyJ;L0cY!2z*Cm4Q^1YSM|eUhC&UDQ3}XEYP-s$|~~tbh7VC z-ko#Dlex^2O>d^Xp0@52>7|gV+zOAQ)+$vMY$XpiAsBs}8yM`<%;E;h-P9Ops={r<&~95ThsXULv+; zt`IhMg-epWjBeIEeqvU73~ByFvj!f?iYAA&eE#mu`#RfrO#ZET zC6S4;vMiN9v2tr9XJS-OoJ@CFv1nO+>gP^Mngr@dg-Y!cy{`y1yvuN-{cs^A*P;(M zyAMr@eK}zJ0OXtl&w3w}bZ+GUVEKak`4S5PRKWB8FNQ1v-7~u0=c!PF48a@X)``qHlC6io&C5 zpS87a4kMw)+IkZLjz`ma71H4webWo)s%kF`>!!K%FR`(?>3bqXMy>!=^P4zGn+MYP zDp@a;JeCvWqU)VaL+tQ_vDq)fbC(CfQ=;gk(~vopQ}*S(AkG5>tJw-iyl0-R-wXJ> zQP8JMBsK^qe~94F_gP4+|17F2x3@j59jg7rh2m18CRwX3J80n#^##Mq6LE-~h32uS zl4_X5-Qt*J=qPkdiM!##!#ts>01rJrjyR>ut-CoarOPn)hh$WRfo~>@?uowB$^U2D zbVhVq8p!h{+i2osYF7)kS%}vgzNG1RTU#gV68>PB^XDRg2A0YB@sUEm3H;wfi3`|3 zc$ROK)_7z_M*1>h^I4C`SCc>6Q$lJYt~6&7J|);sFIn7Ys0Am5RY*pgDrHZKu4+z` zQu8Gx1{yAUJIEqB6;WM@E5$Yh1X24fyPITL-7)-DRP;M!J|wSLsX?{$(}=LWZn2N% zC%H4qKjm8pRKbXro+v`=FWUzH>|%CqX`mq#Xgo$6I)gVLOQ5HEBy&N0nd?uDr?a!L zklssJjQ>!IIC5ZvZ{M9AN+SZJ2FebVkt=8eE&=bB@8?Z!5>MichpwZ zWzh+IIYEm_Rl-q;1=;8&enoJ$@B<}ldga4Oy9-pqz|IhdSwvnFleiGw!u6qPmub!s zTCQR4k4;A^SnI~C72^6Nz7KLur6v)R`1Px?%z?%a!@1=~Ovi$D!4dPj@W7r_&*xuq z)G9C3VpH)(D&8&G81da%Yz*#llAQcY^ICWdw4kQwFNO@`C22MEl;CDf?E^aH(Nm1m zwjwHt*{WaCf_Js*aNE@?P+y5t$`rk_HV&{yWbMo(Hi8I@8c?U8C3>MBuqN|48>v;b zsVVR-bP$-e|r#3NlDm~JnO(?gnqQN}) zzBIb{Vhy5q-%<2%CO7<;br3d$i zZ2ZNu38IVCN@a*JRi#f3`h}lLVkT2S@3fj!-F(gj*3;Q*_)~(y)agNS8c-CuamBJo zg35=Cx%cBHR*xb9BF)>Z7^g96W#Nru5veMorb-Z7@@-Vu6M>09HA;9YM{RaX{OgWM zck;>VlQg2$lGAX*E!7R~z$lB8#S*VfO|Vy<&FZaH0H^{jmZc%U9%L83Nq(rL)v|0U z8{+6*EpVBP&IGk5zwysU(JpK=$f^L5+@1bJpybVhT+;f5jW>ys&&uB`ld#Q%_b~VPhH4!B@0rx^O!zSc-J3UMnkvrM+Gexx@+Y8Pk|+f=sm2KtUJzrM(ZptHM^jVL)wunY;ql1d7c_IfAfI z#XNhwbsxN&id47R9#0LX9{*dIm{f~u%JD(!TAiC>l-9^_Q@|r)O1bAPi)5_8-8rdD zZ;db-2;s&(g01T5LCTas5}f=aTbvFvLrNIo^qHsrB^?r`Ndns!F#h6b&+DCS;ulfE zqdvcebvsB;Pi`g`xkUL*0@?CFyx@znU_KG;-T5l3$|6J{A3YDEQQbc-yx#*8rU)do z!PRQoSs*(QaPbOQ16NyM|9W_wdw$YONYa^hmpFVMJRUyVgDow7-resZx}=J_f$K+2 zS)d*cW^6y7wP5G&79}l(TweqqcbEA6V`7(OCb?_3Eq}h&hzZLU;WuUe*Wy7Xsn?R3 zcEZ)tF=uvGk|Wvgum)Wjq;^VcH>@q=N8Zk}qN6FKPV%);4v^P&6naj>>)+;+cY2eq zWWkkxB{`GRQBQiNICelnPdj1z^$MQ#iWvX@z$GZh25lj1`Cynhc& zv!(um8Lf7}M^8RZIg(6AHc^(Sj)bHo;Py8aN5p8rH?wCrT4_G6QrLSuGr^zn z%pWBcW#oojJj2;)qy<6&Q}PM}&$m)Y8rjwfa$DO}ryoX>Ff>v3Ca5IxrGUXzN6wDb z8ea^$PFP(V)r`nhRtZXZ{O2Jv6_sdWi+gVj+F@Nxag>d@IM>rqPsLh$E%j9&Q$ z(;5b5yX9gC2gRolO4H9=Y2BoyQioLQ2i+HEP-v+WO?9qQO?{3KOok{gdEV=iFpqQl zS<#XdckAfVudNlf+o=d6i>+NnQ%SE)`WQ!dn;W6>d#<+3=#B+c+F{WIk!b%+r~Yl5 zLtg7LIVr)}N3h_|Qw7yP#xul*sRmX1E3ADlJR)tQzkGGt^=-x<-F86E7eYIbf`GQe zY^)mcEk|WtVI_vNxX)+0aC5}tCjAtbN7vx(0juJ1G*!{`e5dY(dm@;w_w`41@MDJx z^lMq#F2|UQ+y{#tZU8m(=TxPA(y&^Fn>in*w#ivyH{`$3)e?1&+;(4Dpd;Nzwt{oFL$KKC{7yZD|E90w-o&J|Mlr5 z1?$j;3ezj%aKA*|B0aoTWZz1)b!ouHK!pqFX)Q$>&?MO&CvM?%LU(N;HHysOo4aR2bdf>Rz8eW+25N4a1#MUYV)ZrWAG_YycvIq0|edtx&LQcu`pgV*Nbo^(`k98#hm}q0{0DM}p*SD@FO_e)2}~7FTqbb8F>vTSf2m~} zG@I3W!=Qc0;B~NU?>WPT5U(x$+qIhu;Vx7LvyqC>z> zex8EZwt};>*)I0mA@XODk;l)_7mIP@=LeU90KVmpbW?s72TUa6t}9+?rg2~fz2>fA zyQ?>L7tzVgkg8fjzX?jruv4Aix@PfF-&f%_%|Yi_k1|4k|9XK!%zZA{eHAPxTvv{FGN^w9BL zywa)k=xB>!11rS7!4N*j>yw~wq3@Lk!oBG-EWRfePnH_C*UlUvFj7N*oLYZG9&ovu z>~R!&+;lj{BV4Vz;%A&Fxd&Jus}UN2xCFpNZl zAG?@mW|mjTE1^d+quD5iH$5*EdPj2o_lyVBXK5J5rH?P(rFuzi96(y)+dUK>+)P2v zHb4=24-5J55f>d6d7IA_3eHgAn7LC%k#Jdj{2K*rt8aPHJgj?S&>VuW1Du6Lg4==@ zT`o6rzgs#{>)x%BRib`XKT7jo-v1h@n&u}b4$?Y63 zGqS^iK;qDDeo?saVPMCD6d+IfnmEWuE|k~=kvCkPX>0Qo zhO{iJlXKK-bF0_sstL3OzWdf=-@z4Fx%F>m%?F+QyqHe*yN(zv{5$qq^pg`AhH1lV ztza!tCV7Hphx1R_Y$}?8iqm5JCOe9%hOWVBVByiCt{OXm^~gh>hOe|7k3?6R98SZY zPwew#v86L=3X94<*)24>7HEOoYUS^&*9hAAWtp{00_2qhFXsKfI% zym<4mKkwkIy}QOFX6qN(F*Z40@5m=u203@ z`A8ux&yk+!7F`;?EQ=me3Af;uOFoDn~MIm*;*@r_6?cfB4kCoW`xX zG3TSnJzKzWG-_2muB3Gar5M_`F;(AX-FlH93Sv^ymWnsLUUr!Akt#IHb4`!?i)QDH zwNKqfX9ba5wSI_^6NV%*rM0vx@cRsYFw7eJDiGKNi&R#x8YeY?YIVAIOx*y8@Q8^%jQWe#8sMGU7*aGMs zf|@@E1^qwxeHlxx0l))CE0<%>30>iVaS`wbU!^fR5tmr7vs zdFfE~aEx=dG=N%D_DV)>wSR!+y*6-K?$atT$v>lnRCWYBS zv^zJ^eCHmk=BiqANRlFQtgXT5nM-((gbEoR5uUf^W0TcW@tP8HaYEe4FJXW5-u|iG zwK6}_wTtl4B}#OdJ8~XNck_0{dvPzNbeL=Bo~fH(_L7E4MM*0WUeR^dHeaAk{jOF|<2G6*dxuJ+TwD?EO321F*!LYmru(-7GdjA22Rr7U>(f<3 zD(WKsTsI_H+{|h^+9dZcO?+my)o#~QBk+69s5ilX)oeWej+ z3ybi@-x)Uy)I2-Bwz@bJCI-o)XJ;-I@H-?er*DdOe{&MQS|!3y=*lW79+lVTUM|d! z<(WCmj0_Y^F2Sas@0O5RRVDfJhC`IVL+Wx{b?9o;O4#|UW{e-PHH)8%_@fh}HYbp_ z0i#?tg>t!~@S3iY(WKt|UM6z0SwwK_L*7Gsq~W+tf40_|wS#vj$z^OAn~G%N! z=bfDx#MrOzgP|kA3}febyi(z!%vV<b zn(w$9R&l5E5b1Yio%>)r6in+-X@l1j*d1%akrSr~b6c0igl@@+Z--I#m`9zX^gPpK zce<(u%C1u9B~^mRzgi2DUdd8fb!^bnE+2sJO9be*j6Vzz3ovWJl*Be(3ieB2;sunnu81n*n00Igj7rkmZ0Ac+wKlE4m#8QHL#Oq zeXU{83{rwDF&g_e6qirDKZ;`mt@=8Dv~%oSU8S2t@uB12@Ie0ELK05gT9!8w4U_$g zuVJh3Qo0!)=`iHRpue!SkSYa7GPog~-|X?_P|6EGG*d=GQs289$^Eyyx*|8mQ1?T* zOLhu;gYY%KuV21Y`4ySN(R6~1KcDNSpxgNJv4!IU#r19*<{J8H+`C$^ z4tqlOxl1tJfSS8^-+vMbX4?fhG8EZC*XwHTU6r16~4Qo0E9uz!KA#VEmrn@&tpIs*PisLVh~f6 zHb>*9HSS#!Qge(OXXS$cUw#;N*j#1E4wIGIhb@~n$4|+*{w=51c2lMn4+)39Hk~@& ztYnrmT`IcR@q`~?_`Z>ao2cg}u+w;G)`=pT@Z>~^QZhnn3&aW!U}EByNO{M*AzwYk z)Kx#Sl!NA|+_b|anp8!T!KB^3?~m9)c()f(R0Nf;U= z%B2^|IhF>8<3mopS@pop_=}<+&(zdk94}oaD>#V=_1Ta}cFa{f+_D#*s?w@5ksQq9 zfm%w5{1IeG=L$&y-|HAP)e|@ACHs25Ge+a7vO6j=1lw}f<~VLi=K<2fq9&KaoP|l1 zl=p{OX7!ZQriICXT>gpq?VHPHgZIvA2<@e_r`631&r2dt1MAJ#{-LGg1eN zg||h^TV||&2+vR9gSx76spxy#NrLx29_M@u824Kn0`7~)e5k&>3340mGu3k%kXi?bUOj<8qFlh2{y&7M2mlU02ZVj+uf+hwZL#0~KV$rJKPojtVCm{5*0AS&sv9%u86f%O-|wH) zTnrF$8p*3(MfIcIPPp?G?C!n+G6BF7gftBVqB39NG&Kn1thm(SGuLq*=HjCnz!1#F z<~Swdh7r{u)L`cY{+0@uvV})}+}Ra37tCyP%ud^C|D!90rB8u}t&PU3a+RmP9u;hS zkQJ}#XhiVRl@}{KM5GXym9BU=6z+@LAZy|>h*+|nAo40BBUwp7sgw^(}uNDZkIdQd*Y+H*7}~&z>KDd1xgm? z^L9{fO6=zIZ@nQkZ)=Y0QB))hyF z=$Eh;yLyTl-bkAuTh|$%`Yf+HROVnh7B^ zZ%`^-;HwH%oJNhT6zS}4OJX(7QN)Y$^`_;Z4C8pKV7$<|Ec=~`5wE|q&@9>X*$$7(&N3-A3`yK|4J+A zU->sJ0T}RifFGXj)|V7O8VCFkeLuQi*w>QVV<5M`{r3-`^$r}hWCH{6$N#eq?c;OK zt0%1cb!(t_+x;`~Y8|Xu8|Z#{*XsfLgGA#mmsw>M-_y_|0&+PxZx^!a{tr{{7#v6+ zY!C0owv&yyak8WO zw2F#kkuyjT&e&UC!3tRAb>%Lbfx91LPJM%vbs;DQ!5b@TlJtQh33RLxR7C$5)h-3yU%5L6Zc)I6Yuw1|1-rx0iR(}Z zKZa9*S=SXQ=EA?7N(Z zN43tq8Oa7&Ml1cjE4vfI33<(!Ow?zMeWiT@?6s>I^#Gt}6K%_makQ&+K+E3Z)p053(>{@#eaM=?sILFf!3Z(VJdxn!{eG4#dATL{^|>ZZ zVz(^{zQmx}%Mds<7KwaAUp~Y4$D~_Jj2F&roY|@BjjA>AOc_(pN^(@UT8uV9RsTLm z$_RdwVb%>XVka{6^)J;zNXd<6E6?*_aczFA&;H`H3GLCuM*D>|^hhN6 zGJl%PW$Ed6RC;BwI3u`|@;0x;)9wohE1xR}v|`3TE>}fp33Ku=XFkcF^H*WO{89N% zcQR<fGBu?^T~MzE&LpW9n8sKN;dvjOk2emeG6jnNmN)#mP=` zFtd7IFJ~X7%q|>D9!6_mN?%u&X4|Pl$e}ItJ8D39x=N5M#(!1p!!v<2Q;xk;VK^n6 zPuB+t?c_imT zDeyZ;t)q8}OFHqUqMrvCzv{14O>@5+h))o4E3ng#{I+X`lUs+ysK55po7$ zNhd5o6C%oFSDjCg5F(xIX`(8?PbkH;trXQR(iJPT)|wmde$;(vkI7B*{)K$_*XDMP zAC+55>2GRv+X}uRLeO!vxatsbu4jrs>M$MFry>&JHvC3b7aCcVpR=fDCO$1J#DN?= zpCd~4Qx+ng4ewm6N8x=^W!|cNkss+VyXX)?FIo2y`k7Wi4iE6?xm*L*etr{EJ;~X{ z1lDO!87%;Ug0lvXju?@>wywF5S2rK2LfLzGJ{pQBO+8g*FmB!JGIgh78*PjjoHiECDHPrDAex7xvHKiQuZN3&P z=8p)I7ggOX7~7O^TXzp?;5}jfLAL=3P#6jCI1S)~cFt2sBi6g`G(_#^I2hz>ROl-? zZ#nLu2jO5x)*@}<){&Z%#1v{VIY%LDLcG%YH}&9_Qxbbxj;Ae!l=p3riW@<_9A(}x zb#k#K4PQz6@Trfw*UWoj=AdLC&SA1`F>=Jd_;x*G#UpEfoG*{Qo4xbA5CcA2?DWoZ zB4Q=s_NEDz$1%LeRo3*8*>@G^r)N*%=A3w8?0^}+te<9APB2}5WNCIUB0}e?`_9F_Xr>})lLJZ!QqOFZiuR(C zpPy-`frAzuL*Fk_zEf4WT=oVc@Emy5*Mf$D6~uKIC(~Aa@~>3FME1$4)-KEW;6HXn zoq1z9wFMd(EN716ckhtEF>sPwJ5?SyZLj~52dI`YIeSWH`;B262#$6#FoNh8bUOSD zXo0%Bawd_V$>etx2A&|$$8rA1L_mMGEL>Rkpw^7xg2}5kL;`B;hC!%#u5L1pSxz?RP8a$s@BffUV@EDsn zfKA#FsK%mwi@H}orI5eH)y5Wsd_&C&iTDuhg43b{)6d<(LTUarXoM$$$n&@B!}AsL zmh*D@Cz>0nYS-!x3};EzKyt zn)*`eB>S#j8>=8~#)Pvwr3TeV|C{e#e^_yYPx0P1>i0PYFj^#>yB`=Src{^}c6Xv4;nVzRi8Bd%Y0Rb`eQ`BMc$)*y^An@UTjkv=@%Hfx?OU98-1}C&A2MLTXJ1uQ?#+)5ALOeo{S@y z<;;h0CNFv>HFv9B%d6VHT;EIUZKE8D-#k z2S8QB8?yq2nSY@qzo}(}WZls_p^0o!x^2Tj70hDj>N^}G!f8}XhU_skH=dFpGnHx| ztnAxybqKN`NIhe*)k`_(BnGN`o;#pV49JkjKP6Gws>G|v6{C2cX+lT+QPF&Hy88f zL55)=rxH8`iWr82Yma_^;|O(=yUk^Bw+yFw!kr70O0G|{7>YQ^}YF1wZ8oS z;4tEeZ?(D`xc>GEOn(7(>-dd;0YLpiBA`+)u>b!f@gEmZtJ@6>Kz>#C+Fyvx$OKmX zUifNusy}V;upl+kbz+*75rnW}&|@3+$>qu4#*c*g~$~ zegJ^w4)EnaeD%S{4-iOU9q(K$4U{|oB8p{r^$Rgt>d}o}Z_{lbJz~zlYWZAGDPU8g zDo|4DQz+dGuH#EL6`Rh*Zr>DVqP|E&6P1*3ASCd7(7LRvqf!`zO{h??a$4 zjzl6zD_fT~P3dW>AW)MH+(Mx+sJu`(B8bC+cm`42R8o|wNY}KBYTAzHufzmRYt2-2 zg{$wHnIYP8usHb>DGF+Bmm%&X7@sGMjuKzUlhvT8Z9~7pD2C+NU(>cROktDr)!V|* zPiuT3nglzqoN%i(#R)0%J);wN%}-L2jooH5{eFw3-PQaK8-aiALQa21{z{yW{{7uL zODZg{F!h)=8g+tip^~N{teLWoUaLwQ4BZRa(kwN z^BPWzJq&Rw2);sA_;;JI6~UYwqN(UjXbxJvv@ieFxq0axc+2r0dFi&;hv&ny1R=Is z;_wD#qo${|{=Isq*3c|c%`bdR`yIWKujyGHno*JKZ_nUdya()2%zJuBtrD(h-enD~ zA$!t;&i+M;>k;3@7W%7I9CjItv};rwbb%g?PaY!#2J@egwg_80v;Nu%82H{(f$4GB z>`kLetw$~!9WNhb1S~2!u^IY>5vDyuOk%;t3%es#CdwhS)evlpVBF}6hc3UE)XMXxc>mm_@Zm`W91 z+AP3^=oI|K{`t1kenRr9UdNSO={A`qOz=)det;EZ{)aZSC_ZFezB%oOMSyf7)=A;f zme?Eq4>!#$9jVkFcJor#dTj1ws$`aZ3;Fj;M+j&oJ7H=0$(Rlr zG$Nn-AUmOUv;-U>T5jFoSSyZ4!Ui>$#A^H|sYRYsm*Z;LdH#WD+onyk(Uyz4bjkr2 zY8l0BNp&WXu7oP#R+Qb0Y$k^pnilr)VQI{?!-ftl+?-g<+8w9GJ3b$8!xT!(NggXNC* zqN%IZvtYBlt7OCZ$a1Wh!W=;tOcMBuJo84A$3afo-#QHsFte*N=UFb6N7(i0TGodA z8*0RjU>s<>a;fOB5^L`y9H}62Zgbdk?584T7$QAtZQj+BJlAqHr)yjjqU}Q0dTNKO zy-pD&h)k>LFtKEqNL7| z6kw8+Nhcw(bgyD?sfqcQ-846=&8$^`4+7d67yu`=$!x32w>KZ7>6bej%Fi`=6Kci7 zj=1OS5t4MG*3Us0weAx_D@!gY*HsQ1*#3?e753Y9d4f1BlgU{3!7kuSA1DtZw!0H5 zN6Xa9oiM-P?_3w#8h@M@qXG1pTEOry`g4ANef5z;f-1v^bE@P(rpXY<5rV#JZaj0F zZnl={?{F}a*r^!!18%ydo6WG{K&O6Gan0`YKKMb?Wqi%496zSB=AL02LNsQYkynDc zPnV#Zt(?~whPp47X8+@}TMCFF-~b$W-YWqwAMOzE`DDV@@3F5j3jlQ{fc82KbR+R8 zd@;4Eejw!(AUg9|?gyrUH4*PlGs)QXZ>k-@70O3<_XD5KpRdW485Sgys~s8b8C@FG zSN!Lux=t=c@OIc#lDg8?bfGiNcuVVaZBe}FN-dR4W|`9C!|ma>Z;2Y?v4bM%@p+1< z4EXb=4sA)Vg{e=L!dPC_VR}DDu-QJ?nM$#wGY_)1c^?hH1q--kV_$Rsp>D%RA-fKP zGJ?LlqtD8w`xZvXWoFr=bd(u`ss3c$(5fJGH*7q}8weh}R~d2rVOnFc z{2r-H#(a>SD;un+7yIv*7{x@I>$JN@fb-dM6;q+h>L#Q~6ayO)x49kg1iJnPM!Vrw zO#d_KKy=z7pZ&=b4Qlhilk62GBg);sxJ3nf1w#6IIe~!J#x{l+O|O!fEAR3{jsW2` zd`!Y;t?X>8t~3stSTD8dvQ_5neyLV5ARc8JeN8-F26ZmX7r_CU{A7PBZ|VTb5|qsL zb+5QV3@^Ch0fXw}B6{UdJUM1!^kA}nkb0KspHoz)fc=Fm7>hKXQnR^&=Y=!nYeuKL zF?NIN?CXl*OQT5Z^n{lv^#JLSs-vcKSf$brQP_gy!c7`DsU90H+ ze4l`)a%>Iyz}41Hz9<@+gDQKIy}5>1@ECE2Em&bxU1`;>GlFcl?5hm9i0OjT=V$`)sH5 zn3Kc!JvUSDIfU+zsVTi19BY5}oMcWdFl*NVy^2m60Mvpy+qjVT6jvo{vZo4-e|7r8W z3%Z4{KICMi@#F7`HNkCP!P*Q{46xPEhMVR0zA59C&JjQ&mR4_8N;TB)C=YI9K{GN^ zAVBGsAJ6#EO!F_u-K@nzsyzIS5CtC>yF5?~&wt#Q|DJx^5{@>e1ba}%uOKGtbt($O)dn_vI+H(qpz9~yJ)4Be<~tB7p| zhePf9LlN>MmE9{;wEwtq{EIq9X4-;rj#P(VoUbj$+-U~w=2r~($IZat z>%7L*;~sRhUhs!|K3H(HRMXh}7hSVGEYD7YJQhZo*(7R+-L%c8%yWC{D(yGjvRnkR zNy@4u$`0*H{@p*656wgHal%DA#Y2tAKiZN?ey3i-LXgq%kMKreVMQEkDf+}^6Kbsi z5A~zv@;+IhvP1s)S*I!DU^Hm7e&2tVlZ>4saZm}k2X3%7YLh?QYct&~DmJn7&dCac zPs5kRklILE^AU?r!!`7=YKsK5l1VkJbR#^SV857$na{c^c`aKC2p{4f#9~&!IaG;Q zxd$3uRdyjdaXv(-RJnj!w^QR}2_MMNbD-j5$$IAd{0k9<%=$6?$+F_*+YPH=zqvK{ z6I-PxWLR*N{I+17jQ&q{s{TurRsZOrBPD`-KFMFo7K3{n_l#ongk)i+0J77gGw+Ui z+@q^w5BA=`4JzQ(iAy&N?B1>6CT}vV+?DVuekppmuI`TiHvKFR%0!48MiyTZj@EY!=IUAz$IGi#aZPj@Kh zRmD~lp`S$K+BRI~;i*g6-~CJlD!*Hg{vM@9_3#)x7WF0zQT+>iO^#59W&+*6IoIP| zksxi{Cx6ev`=74(>V;4xxl4h?T5vST(2hm=lcv2O2~Zq^o>nje8BGNn0Z(hH^fGA@|J@p-I$s6=eSDXd2os;eui zHd4WRbti|IGgKF!bY{x2a^#N(?@>}5WO8(=jP>g+T*j3ecXz$=^GUFX6gCVD3{LBJ z-^|Jiu`L@jr)Ak@oc{uEdKF&6iogu}F&zEcNTxuFk*pt&XmrLlZqJSJ{Yp}K#BN@U z#tSI~b{ex0L0r8?a8c}-0-J4K4u?cgxET1B^Jt`lS=xig*9>)IPP9iNa-*ox4+Q;NA=kR*ffCT4yafM z-c115+mC9Wl(TEV==F=+@>Pd*zW~(W9Q$uMKm_3fK^dUx3Y<-Us><(wemT+p_ueTm z^5L^Q@Ul!x0OtJ!2q%63at=7(eW*S}H-UEi&(m%ify*9gP5*s}mfa7Z zlw$zM0uGKuvVpVQ42F}ex#E$Ec}Nu4vblFnFFKV*h9$#~o+4MF&j`49)F}*W>H7^s zsI1wn{Y?t14Vx9ML$Q!%2Tm3KrorOY-pI=`zh z-DNIH)dvTE(+;>=?^JR#S%j)s6D6oQ5b>gl$@>)XP_B83Th)tCB~S-RJ|zg!rCk@F zwACWnkfme7Y+8(cpfR>lYmB|gfH??2HTMuWHW$KhIjyJvn?;Yolc@JmE7dRKDfPaH z4$`mD!8c-?YGSXyN45yJ@%q=S3bF5-V(+U|YGhn#riGyUB{sBuW2GbbFSM6?$%MTkZrRdoeFhR zz48ciJKs00dY5+I_qzuyE=YQb6QEu-7GqhMt$+t2E3t=6ml!J?K{-Z2xFY?kQQTjw zt3q{H*@yjz51IV6;Q7ygtNskEDks4H;r@yM6Rs zMW5rS?X>aQz$Atb!C(1>C1D=bxsgoJ{_Q7eX|qvu#}jFYRPTdA6}<$cK;LE_o>kWs z-dc|h8gl;ITSee*6ypj`oEpZWhy(sFVavYKKS4pl^ot+`(OVf0=h?}lP=tZlVRx6@ z^eut!yd7l-XRLiFFxm452_2gsKb6D zIHZMKMf& zS{$Z2pO~+vC)U$mDN6^{PRY<7BjTWe}X($SqXDq&zdKvJp_l zF19G0`3XE*(^Oz4l1N3FH%N{~*a~);H3}}4sTkmia{xp8I#>d9P6JX4m_t!8`S053 z1+1c1obNBYR>zpS+$<2mEOZFPp6UfbaZsArTA6ut38J+Xh<2;bK3+*KknNJE-+#t2 za*v>@2$c9ivmzG4e=hGafKTKVs?0tAT46Z?K3hHsSb!tr7E5j}X`HSz?eEUs@?g_D z0c&p;WPz~q23-<7K|yJkg1#=|ssmOB$6?rX$+J*zp!lJd9lXxjaUz+<>6>sPA0_!(DivQ(VV)OkH??`1`4sKsr}cC9#~H5I0-*;vUEQI`n@O*DZv z?NlDG*ZI%NS0+r6y}XQddGs21mhFTx=B6$yGpmrv?jxL$->g=29xc`cO#ZH*EX^uN zzyHn_a%qk;VS0bxn5ZIh`0)2YaQ@w@DC{VBUfgzaK6)@*AfBS??&+BXf4sL{Xu8tl zn{}r_vTp20qqQwHtMMm#eie#+p*szGC25jZ0+bPiW^|t>bEsB$zYGG)r+&1*^fH~= z12VHWvF2qlj}OJ88@!UGE%ZhQEk4b?|Fd*1w$B`$8UKhBYJ?VF`SIm)OhBcu+pwHA z;`Hp;N)dMbT9j8no{t;Xl#$cyRN)q(%dd6D3Jc~6?ByH`6S`ug4Lk6Swu8!B6uNV= z0*R;$v&Y$}7&$Kco)R}xDb}u6+I6KCJ+16;Hu)`n$t);<-OrCdQNzaY)|g02j77pJst7j>ugfKl)eB!gn=)xY(^)s8gg|C^&?b!1jTn3|t^J}l_ds%T3 zvfgayyW%>QiDCD7jLXECO33hn18PiTD@P6C%{zGaaa57N>FNhE%v`cBwl=-CO4AR{ z$-&|}^EJ36oH~4l;Q~Oc_;_z%f`;T<2ut9-*It7*sz)E{ zOG8kXQe`X3guYoR9r@D}j*P|w{@I5=Ce$JGs&bC)h6>>)lCMtgOmWl^7ol98*mUzCHs4;*)2Wc>Fm+!EoPTcxz(cgt!A9bQmYe@ zgL4qZ|3an3tBD+CUHY_VPDTz1HpGIjm%OMX9&%7Ttb6>a0*>i#y~D!_2rq6_d@2Q;A`03VWXg#NHm zyA$td1W<`nV%=gG>p6I4n-LKNB-Rf+cP<%Bx5NZ$8`BKdF!^E+*%Zv{3)s-Vjv#-q z663-C5htzXm0dTC&12okAfynmEO#~ZT&eb# zTFa4{rM?Y#r_S6%%vR&w$XgPBQ4`J4T;7sMWI}ps_<@X>?0cqWw8&78cKR$?;QfG2 zDO%O)yHp2cwx&frM+h(ah51sqeZ1W})iE_vhf@aoD}e22r7*AZ?DbYiO;U_5WsLhl zI;wYaaOfeHi-l57(*>pHV>vCG7gShizVs}w%Riz2q(jYnhd1W)(NQ3}9iM@6wz)4I zA?&Wxa26ze@6>@CY=%1&8!aBC-co`{7Y?xd%#{#tS}t2N{MNILi>^lgmMn!(*f_Lk zbbRJ^&Q0m%VyoO((yr1ySi;iCa$`wJrY-wx+c`$dq-Z!obIL-3Q+}r>YSZRp4aE3v zfhmMbyu$@~-lAc;X;N#WCz#ine14mc zH7EmOo;lTWGzdat@YyZJ9*Vk0nHI56UFX^^C}quoqOVny_OQ#eP+LKwy0AY16n=M( z1(ETW*jWR@eJ|Y|GEt=uxNXu1G8@;l4jRM|am$()EHfjfiLSnrtj^OLuw`;Cd)ZA` z4ggWMTGfUL62kGp|9UD%DXU#;e(-c4^)?_y=qMnWD9v6!)`_LnXH9A0JK#XK?y^Et zRW6KhE|%0nq)<_6Z!|MdAJpPWs}Fbc!53Lsucw**AjQg;d0ps;DfmYPHA7QQsXjpJ zF=sCKI$|lyr9dBGjkGYeGq9!x)hQQza;=4!5U_Cpn*e-9%%BlqvVV)w= zvfIjxc6wA|H}fVS;x)f2RdL}Wy@-mYlT{f8@Q+*}`EL3Iu36TZU%H4t>@1pLkg2Wl zbO`u9H-}@-aU24)Xmp~ZF$|GxSMR#2{4N@>dn>$nj6g3GRuyL|7ES}Dh=$X&;2YrP zWeX>uQksxo6F^I@lCH34nr3dHeyFNh=fqAel1uajL4%k#JgRysVS)7DVP@-L(DJyWNlX{Ta zC*2t9>ylSd;0bhvCrl2NZ;v@CxsDDr;KJ1bVsc?20x} zjRFxdLWNyk_QgS~ds-R+g|?uzOxy90DLhI^JscZBdO8~wR*1nAX|fW?WmO#5NemW) zwuoLczK0oTWKGK}wD2~2yn)T#YLBYYa8IQOO?`6O;k17}xQ)7C-%^p9rq;x}PG6pt zIbLxgg+diYFV(`-5_ZA@&m8jzeDZ`ID0CL3lFeEjDYg>3?&VL~Y5A6C2YJwwaF&qv zt8cD-xM!+ss}UyOBRO_IB(uw`^+@6N4`mFmJfE^*uD08v=ynYvLOA_UJDGs7dr9 zK>iiLdB_>KdbuK|{P^nUp}zfR5UUJ02)&m8w7!h0KL1eAe7+|FqA!5O`)363`57Sh z`Bh$%KLRnkKBb?!Z2dl;IYisXt6NQAgSy-4dp;=L%g+VR1V6v>l^BXsIny6Tf2dw0 zyp%q7E5a+6Kjr)thxM%tj^OVYeT%Tnufdj(C|6yZT;6C{T)nP~^-68!N&Ds@m}3O4 zQa-;F3lOu=+zgedF*>89ERW4&7=<=@$#GSP(BSLVF$?~vg^EcCsC`)GVz0x#@4Ktc zTD9nlEqzTp5gj^xvir6;^`%$wd|#J-lL^`|zIkkobKmsToevz{FL3PAlW{mUiPD)4 ze-!H#(CDZ-+3ck-R9X?3IZpog@iS*YHi{m`q}?u45Eg}CK%OKo*CFgBo*&_F!xFT> z;7x42d@;1t-{n4Dk6(mdp z^$xbi{e`>9>vsI`k%Ww>r36Rsivt_8Gk)lQx2K{u+Xfje6X}GC-c7;@yEp4$ZLWBH zOM^PRo-wgbn%qKXd*M%q?tFeY`GhrB-~^EbXj^Z?k5Tc?L+?^T zT+~Rykgk=8SLEBslvV_}IsQk7ZXZ!N6Cd)R^DX8gnQtNgo=*bM-Fs)N-MZ5a`U2a3 zz#%{f-a=kv0mVc5jQ}A3VXC*fO*xYV%?cjJ-UX zXDDglXqlQiFeOy${5js}_ZDE!5w&i2Ef|Hc8DE~HvsIkl>(F8$D~RlE(%4tVNv~XH z!4sA^?GhLKVzXZ?ET2toVXW)5q;%8kKP+6NSDPQ1WR!U&YB*bN1}m25tjz91V=5WA zNIU{Ihi5L-d%W;qLk4=e7>|-t({#Tt;K?gqF6E?QMNXTX$WFCXY-3Su;O2~+i3hm` zEkohYn=E@>+bSU*-?gc2#+ru!xtm4TwnRNiR{U~&d*W>g%10p#CgN!L7$r`uKcCmU z6NkNoH5VZjzDrf`ZG_C;M=Oc^Y~TqoR5D5FJ2qv#8)K@SDkL*Jb3E{{wwiT_pR)@E z84VvFH#T0#QE3l)T3j_v_WS7(alY23V6}{hCcsHcLiSG2@S4BZMsjTe&!$;ah#I!0 zTgNC3AAb#fA+05{f?gDMsh*^YJ(8Wc7v+sh7$2pM27dmq4y&y6^ICg2G=0^Ov9(#f zXWR(@eFaZVhn=l4-JE)JPz$uF^(0{lXScVwNFC=l(dADqT3}7O!IpH?-RpnPpO|vu z>B6F2)a3;?%Q+JTecKF2#}?V8B1d;$(7Ait;;RM(CV@6&3E1Cy=D#2VY{z2QNfY%( z86=NWQ){giA&?FEA>ZWS)PjP}JDrsp53{Z?*2SFG1)%+{9R>at#8B)uAhZ{=2z%t# z9HfYRlH=tme~+)aD;PI-wsyo#-eNcXQ%e|=DCu8Umw(F`P-CqhxMzR#1F0tRWehThwsr}7jIo-pMT5gJV0yJ$JSHh)u5;v*rnRpLwQ&I)0gs=21Vhw z!miMjg<~AP8*2OcwK4W6V^ssLys4W2dLv^EzJiCdTh-;U2BSpJkx^wvF76(Q<|>~G zT8+2I9v!lBWw5{YWd74CcN_xhXN3?$35dyZi){Go+;;@)JwpqzBwHvStnfc};_-Ih z2|1nM45`9rz}1#O=?pR0G}yu>eMLbAYw`vvq~2Z2U%B5r(NNi);D}_}g<5}Q);W)) z|3k}3!A@Q_bDc&BujxLh%+|5FpSpyJcFvcd*pKzZMfRCM(G${cjP@bBX~MgCkPR`& z8aTz*+j?r2W)>Kk-5mL;oieEZK;yDW|9no+Dj{*SjjX3Qn1>qL?fH1CvE6&K)E?Zb zh=co`?pqW+TZH!qoQM&bH!8+8gdK#Q`q598O ziIWf<4vbDF>=uR!Vd2b1NSC=Ji}!Dil#X1>v9dTJ*!a`A2Y)QObqUo#xGSkmzL(4C zW%lhwNB8)W2X5I~4!la_Ds>PnV)8m?73;JmnUx#yLSKzdF%w7rf@S2S?qg}_c#Pam z9pp-uf@^q7O1nVq$}54EdVLYtLdb_Bw>uuf?#D)U_(p&rj0!+U z`bezY4~(zHz@yxWZ|{dnFV>HTpuA^)jpSuX{NW8pxlTGq`ZWZ7PN7mPIF)>k_^XhBK@RVlwGA+gsof*$I;uRU#Ec54ZZ_e zZ|zP{Kh&A@iWR+QHZt;l9>vB=b+zf4;|ciq{Y8SllsY~LF#B$Q>kc$sXq=|8&SaZq0&$dO;Ytv#xcWzsARHU@VL2n)9p^bE8gm_PX6F z44^V>jNBx95li}4YshE^5it)ZtF4CrCy@-%BrMgpBTvRBuQ?fPOMb&=5qE?I~ zfkYQA%M?o5Ze>m1CmngC<928CY_xH;SOy<)O5UmFz2T_tCY>dw{w5PV?w>{-V3-y> zUd!;d_+D*ZEXFQ0qmyvGCS4Q{7w8gZ(`QXsQd?7+Qpd7riHu@5`{w$Lb=8Yb6 zIl$YdQRWS9qon9ozT-OqNso4477C$iRR+#{TCA^7`K7M$#d?oiKdJBUK1IC~0igMS zcnPR}0ZN|$Ez0h9z#tNFIOPX)xa4-n7VzK@y>$!79rJWw`5Ah8GX}v*ewUyPQIK()ol=>O%yfj?U{}-}; z(TX~IHP}QBGD-ohIDLkWZLone7k@?g{TFZBW1d?g|9-FIAyJ$(bz7xw2?0mH=V5f zZ3v7+731Yt#_5$6|62T+@>%S1_?F5!np9vXoI;oyJGf)4Hq1h?M-(FN*~RzNA~#OZ zR-~8jb)_du{q4q8D^V9bN*}(%9Ujr`l@NdD_7`J~WlvlAWtiK`z`yiDtjU*-wEfT9|`J`l~Mzkl?*fCful3pKW zc;Od+^9khMY(WNu^ z@Hu-SQ2Ov;d@lzUO`Fi^xlqtjzWoFWZUMJ0;Ns{*w)C^Rn;x_FvpWHh|KP(MsD7_* zJ`i(QjmvET`|N+kMB4etad=N`2@RbAssNuipLb$nV!nGswC585T6+NV8qflO+6Uli zi?_S9bR_5S8TRw4dx;>_{d3Qt6x1DsZlm-^y-GR=CXp)2Gm~jG9E8QB)62l5Q;z-| zoo*#zNyTK47Zx;9tk`ep~>!S$>=(s61Yf48rHCP6Mn+eA7A+| zLwL8pax4{|xC zX=O`VtE5+U(UJ1yhP8G#3P~xdTOMe1+J5_`o#z8pFa|fIR2Cgc$Lrm~t2;RhEq86K zK~g8tMMblNo>4r$C7g4kHsz5#8Sgkfw-M)o{?qGiXm{87iFi=6egBMGsp84jREtI- zC51ZrDC7q`XHogkCst8ZK+8m;FY6&MtG6oY_R_=C4L1LTdxGlFFX`8#nE=+qPU*-B zhnBI?21R1pn7`2;?#IK#B9{B+3NGi>7ntZSr(3l{s8fc4;QK#N=fk#s-A~%6^{GnM ze>)CQD}vL*-kRNjI$0v}{>SG&PIL~+(c$3({zHx>PycL;U2!L$;yM0D<@5dyw{#FK zJnma8O84NuNS89Pk^<_`F`_m(yeQRER4qg`8OZ!&Sgw_r$shYRm|f>VU!GOt7|S%* z4KzjmZ{*gyNIo35ATKj*7WK|KEuvm-VR=p9E7~R4a>&8dw!vC@P+f~?hI3^>rRV2&8oGU$@6jGrC!>-JtSRH;ty_d!RK2CestgKZ zZsr*Nm%Z8wO}^gcE}c35-YAb)BAMt`*eV_zQWKfbgB{JpWo&k1%j-J!mzL7Y&0-r9 zL@P~Y38FLScnl%$DX{X9g%1RTKVeQ-{`}yX%`#TErnfRNIW2|P5Dhgo6sgL1A~UJt z@p6~Sa_jqI9O+u!;}HHLwPlcY6uiCA_umOP^`%)_FB#8_IuZV%Xwb*{5$4wKAUCax*PMS}kk>*h;w$G_Vjs^85);;?h)>0+2YiUJM)USs`cwrcp( zD15F>&ITr7mdN+;YzNy*&guTTBbSKYYuj(L^;j3|*^qMJsoDzAT>(B60YN@Kxz}ze z?+ajvYJB^DbDQUF_x4vTiF&7}^#2bxjFq9)%m?ivw)R!q(5i69Vc##R_T&5Bpqo&^ z+RAi>k?A`$>iF^sW@04O>rGM555KczH-L54Vr>mES$y9B_Z)5=YuufgS*mtFPqf_Q zzyZ;XD;u+hT0(`_A-cqeJEFuo;@dD9%+OUJ@l2+MN|k8XOS{VsS92U6^H%2kHmrsd zxA(gcr`@|GsUY`?&2)(Y<2~SB^5e^5sdIsKxCyb46y4vdpxk+5QlQi1F>{CN$~*?g zZ-wZ5Lx1^xY21(>9~=|}RF1YU5~o5%U+Z+3{+er$!`wHYW18>elE>uQx$wkvco<(B zxo(M2?i@tn9xk9nYC1Mq>xF}X^~f^j_N2th6FFx9M+Ia@`rJ-X!t@D=%cy)HRzH3Y zR8L;>j(I5b`wpUw4C2)@4_0ZE;kKY41PsBTCzQxIhdikcI5Xn7oPy;O2i|I`j;xBY z+ZrRaokR(e(G3*W`OTSZbB#lIK_;o;#5b6%a3DCR*$8cAV)tAC=DW%H0l1!Xgyz`5_=w& zll!I&0GQX3Ko^UB@c#ly>nl~gFrpy|p8(yyy04Jg=%!lEq?B}i4xGRnBibk5F(CR8 zGY1TQef2uOyYTMVou2)3;a0fM_ja0u=a+=9EiySr;3xVyW%Td)_`;1=B7-I>cd_xtMp-_u<+ z)itZ9d#`6Ddl5nlM~8Grr>V#i=kw4Tb@`G0q$RyTch!^e{ych6r!km)lq{NRn`tFy z9!zTAP=Kvq4<7KN^F+uAohMp^& z(7*qlo&Q*f;+v*eBPM$pWL^BV9}y%{6u!gipsnxvNr@>TV{LJ4@(DqW-IAR3s0=^coiBsf@2~Fd9W;VQDs|zi!6p;&;+e%h^3x1gCPq6faJTh;G3sI+p3h zYysh~9r;JH)d%#!7>Q583XMlxgBy_}BOJtCib6yWg~T>;n@d>e^*ndXO%RzN$sTb2 ziOV%$_zqWuRNN(sHh}4L4MFNM$9Kostscph;kQ62`vUEKxQ*euvgb%4frcaE^c8&c zFB5{qWZhm~nTRF^j*|+xq=JJWEDZv#sGwwma}q+rH$m{xbSR}nXYEPNNyWvY zu31_8<{?p9&B^hH+h^{?`|WPT&a|(C{mZ9hz2|@`_kG1qTc$nEclM=kEX+1ksmreh&tk}r@ zEyklfaaHJB^U3$zUhpm_Jri?mU`!%%$2MzrEKJ(;)05EjsZk(P1X-NdrX4D+O-%ZA zNvd<0yl1?;hst?m+WNY)Xkb2{f^ZXewyaDYk+e%E6wO1dmReWXue*g_ zkx7<|a>XN>3p?lK77=Ci14|nW7&zUY&zSBoaJ5d|hM#+P)7<@p1-^3?R%B+~6Wei46fOxJ zJ@ROvZC8D!SZrZegZT=9@``B0J*W1(@IG~66UWQ9lLd!h4#u-S0E3g+;t?P z3dE#*QyReBjIJI&VV1}P7Qem^$T&SrW}x`dhN+l8QihE^D?KQKM0tWuvNAd6{U+t% z=cG&_)O2(+{Ao6cB`A^P-Mn15??F$F@5t{w_n)3ta|A~vpw-u!VW5QqOk>OC02qD; zjclF*0Z+pSl-K%oKZiqXrNQcY1l~S=syq>S0VD`e8~z4?bLBwHrGMYY(7QY_U>ueB z#`u7^ANk+X;{ni--cW5rG4}ntwCK-$}fT>sD z5&#+iA)FWZSE63ZXCh9Z9|o&pv{M*GE}^0>3SQoGu)+qxjC}`jejmp>MBE6wvc}Y< zuImJ(zb?KxZ%qE6VLg43d11V0@ffR_wDulYpq1#40cF-Bd7+LphBJyG59ODr$6kA{ zktKq8ZKaQ!nfDk>R_kvTBPLS@V!B<2>0=E*Q6Te(oE4P`?$~|tBqnei(rcIHw(K=~ zCf}vu-iXtYYO4Fw+22MbrCN)Q-F-v<9zFATMQ6IX6x8jcsl|wkb~4-8W1{^$s{l{I z-jp@eLgasIJNeL?U!b&(wSP&|+j;bR#Y7gi-US+^U;ult+(tT3+t&jc)eD!p&dR6b zre4c?%A?;L;_Jt^PHja~)%S*5B986iN?|VSKs(sxI(R)*v%&B_P(Y<;u*HKf)kI%1 zamlF1f?c#=i_DIAs;vN>=7A0zC6RC|4zf_G7psjB__|2E1tGrdmnN3m-6V`RtbqFi zf>+@>&v#!~q`3@aR*H=u`3wYk=G)fTC0*3V2bFym36`vBtMrMquit}6u@{92o2yhP zr7OEh(<|0K6u1_oJM5d}?+QAGe@9X49Zl4bmYVvG2NpbWUT5-Zs^bdOjQm``Een)C zM8w10=mG;$WQ_>han0DK+WN(whg$=|@kn*m+$!~RsIVoI5R<#BAoeiHyR9l(uhAU1@1o=$uI`x`=-s_ zx}=TvmtRo-1^(ULfXc2MKSIG@{LOlU0Dnm!;1QIzEdo^QZfE^#fdqpGdqF+|j`#hq zci$M{jy^w$=pO*odng>lrhgv--7Iqw``<(RQI1c&iyA~3B3lO_yC3<1q`-6ki`_( z&$LWjT2n4cI_cMEEJ=Bq@K7j^)k-X;ZHol=>vvh&86xFK^mW%XZ9(&h=Ww$!KZCUCE!4V-2D%0rmafRg15B!-IZc5m60)vMG(#ebrgpf9q@#wB|jPW07sm8`Yu!S=?kyJUE00C%R^Ncq&qsfEEi{pEW zx$?0%G`%L0vz5wDny^apjagErW-&|AY8Ke9Hs~C5W=76(A-6Y-LvfL+3vl(*mQN`C zG~yqu=EBSvBd=<;j>Y2>YLV&2_k0muvF!N zG%6%Vy4ydy0o*1pxBD99aQ8J_J%&29iNv2lx;3=o&m*Y~uGhM8-mGpawV;d~Cjh8L z2LE#H@0Px;q4jyoH7*aSef&-pKe41TM6&1qe%$zh@yGVNo}WWd0GgbuS3qR&{VFP_B7WR0h(=fVOqDc?A=!6xSa&%pG+)^ z@Dn9_IO^*$I1CqO=JCtAJXOwBFs0)Y7!9M=Z@kuvJ$SDd+bXMs35PqZ|75*K*U9GP z8{YI#>3tm_AbhN-*v>k-&})%(af*fUp1bTGdjFZoo$G)Pw+q0iQt+n1m{?HUH%vg= zqLQaT&>LQDq6anST8l&49A5f*aCQ4R#CMlwgdAH%W@m^e4weO|uf9(V5a)(^ZV}S2 z7o^z?b!@_)i9~r924WNV=Af1KP%i$%MZNZ^6w&X{Yf??Esd>3kI!ud+>B45|JgXoY z)IFYIw)kNZZ|MqRbxovij8B;x13n-`^C^*CAuZH%ZTTi~f+rE!@}{!^J+$Dq^1w=Y1|B(!#$G_F2N7Qf(Ut=_U6 zQu|Aj@o>?tU+w8NRlMNldn{d&*sc#~Xq^V*mtMJJ!bikYpV7<_^-yn)3Ta~psuXuh z37B|t_QU(;F7U5*;4>oHIOKR7A>v)XoE24$PkMbL>(Uw4d6oA2>XGp`n&#{sFX2}~ z+}d7+4~<|Yd8`%L2OYzvcYY1=MsfWh)p;w&v!89ehls5r3|Ly(H%+wFa;&z8Y!!Ql zs)?#4JVX9ip@oBHg}bl({xW(BHO;loXC&CMM|ig-!tDwdEd_QH?i^2iyj3ZF_&dr? zYwiqn`t3u2U%FIYP^|X4w0C4AS{R}E3R}?&s!c0X4)oOE4-rYvIwi`pqZUhW%OZO@ zdy)|kz!Kl^ZKVAGpJ4XJig0Jdz2o$OEuCwS5WgtU43*uVykb(dMoik=O^?nglHuaA z5+5Ywsd3Kwsx7swntwxCqZBT~q~P%fu53f1+(C>iLG+_vR_gi;7i+@(M5)u#q7V1` z>RLV7sQAb9@ZQ3o#mRslj0`*&ws5`$chQ9^_{>RKiBYJOt3+j@zLfGY=;|(SlYcrH zn<&0D0t?zb``il;jds*It43wKfsb&pj;%Sk0TI=1&Rs?~qN}9H@4Afp38|@ec6=UZ z8$d$qaI-0~fxv4lZF$9sp^KRiUJB|{=Tt5?Y6l%E#!NGRqVK=tQFu)EFB6NiJ_!~3 z+2|JVt9Q*MGk*R*JD(lYhcn#b=#Hk}Oxa}>{M<}CZnNiqf#vqO2RmvDz!0P0IUaSB zh&nErMY)0T2iNrKy17r*4S;dIgzd%RB+8lfbX{PbD(>z)p5$hgeMk zGXNI^J~eIdnepuD4f@Lwkp3SbEWQ;m0HDn{XZ(L$BjEj?tYRF3NQYmEYW}~hmIKgt zM66jyfOI&&-+#;Geth&A4TG+iCV?RN&)J@aVaVd};uUpK3t)8#JkGRTtc6>&-I-o5 zF<08^SM+;eKV+p0I6hD35k{^IFq);whUK3-@N!%ByIXWShGW3~9PDTlp%p^fW1y|f z_3!BIX_08^Gxu}nBVP_mrT1mAGh`HV2iAO=a&8DzQ8DmUr`nsqfr&UY%JeK>FNN_m zoM*c4=)f0MqEQNOcB_>Y+6rSQ@uZlHczJJ6D_FwZpHO zc8+zV)J;-S@?W~Sgs$lYCS>Y6lEJhMC>5eHc$OMJ?I*Dc2|>dO<08A08q4-YIP(~; zc09f9tNww(0*i;xJWXW_sb9IjPcmJE-6p@>Gx#lD2WgHtK<=&I9}e&#=eYl4<7maN zH@zYSB`P>?;EpM49tZCcFg07=VX#_Gn6qEn%yehY&@WoX!bDQ!IU|evhQKExb%i(g z7LW0PjNIb>n(OytT0F_gOs-Z*y`f;Ge$UEl4X(tz_`?>^2J3yUzpSTg=G)x8I>wQY z`c=sO@4#~t)Y4L2VT>6klSadv-LG_`6|%!MnqQiuLzRotRYS| zEI_3Rk4@Ua)9v#mx~P8()nOm*;-{KTtZMVo^*}L{$BKgG%agc?G-VC?Jc*gFDSx0ThuYI zgdv_IvU?fZH9m3B)J50Y$-9V2NyI0G$7NL^&FN6-5EWdSh?Vi#W&8U3+7-t8LM1z_ zEmq;Qb?>Bs5#xRwxDYdVG0K< zh4@qD4-2^GPe=~&xN7q?YM7+ujJ$k~KE9=t z>{#wfAym=AggKy%a8z*snL8rcq0A=FHT;pyNSCsJ;@k=EFi71kS!IU;_Yy zdI4DBzrO)XFaCyGZ$yb%cfbq0Z_h_95S!)n4xDZRpWmD)!J}uG7_yoEO-csRhrRYL zr;!J{8?K^Ng56Qv7Nt;A(z$1)W~^WbCMUD$;wWMo4zYiP+;mfZsid-OT`h2%Xp}18 z8)uIGw#|D!1$X{+?i=e47P>e;3L=lXM2QrQ?pRKaNWGF!3xXBD3ZX=8ct>Xtzo4Kg ziXqNeqp*ui#Sy<8d8#k^3J5JqUA*^SqGvTv@>>g-U;5_EQQj$LTxYUKH|>BeWbYu{ z2%^qC9;C`Sk%_Qf=9PLmk9tiXI{Dh3{}n}qv+%lb!iLWL!+fkrc^*OfY(Hn;Y!JiR zgwS`;GL%17bs_J0)kwifBZ|J>M_gFUUOz)9IrxLO9wN01KcPiolfoKv$S|W55O%G-ClRYSC z{=Vy}X~OiA#0%JLHNI-^yT4q9AS}|TALNI*Bj2ZuBtapU7lC;l>sLO4K89?C$@9!L zGaR0pd~mxZxTvVYqd`s<(~soC&^tWsF&)PoC7pg;Oi`X42M8G6dc+as?^?Xek)5w1 zmxRLvcmf;e$kizgXN3n=*G>gLBLw_Vq|({>;Ubygh7JPHIFi?79jbEJFBI-ZeQwfP zU4Gtk920Wt$dn(SNko|*mQ5Alb%oy8oV38XUfj>Ymk~P@4~)}>f63;Td&l~BrMDBj z4>k{bWwx&c=$`@t&%i1pkZKKp00w_!;4_rb{r%(e@+$U&$l3-QXjONb0QIJqgG4G zHPc{Ki%SBKxxG`1GR7+v)@Wq$Wjjs9DRJiu4xQRI+g`ylID?U*)yG}h7M#=ptDWS% zck?6;Nn~3{{h?W%IXHJ!%iu3mUmR1Y=cZZG-qL)!q>sd0I|Ppn1y`-0AH?2oPzI)c zVnnot{4|(?&gM0A3H`x*Cw-wU%{+&73CSL0lJm7-m&|U^zp(|Jj>*6JEON1It==M| z*Yd>MT_#Ni?Q(9Y=XoF-fn(fGI?wxi*nI>jQ%j4*PY7shU%(SWE&;=YinEnV%O$^w zFQIUcl*&!d5wAtaX zwR(Ia?x8G<-KI>@$zLfm=|m~-yKM@9*?k#OMZQr<>ls?*=h%5yT~9p=WM}&}KfVp; zWHQo<^cP0Ez(d&HCrzg|^ZhD{@`v<WK27<#w{b?ga+K*I5(Y27Iy(K0wr~QqD9y!&?cl!WgM!d7Q%fld0xyBb`eG&+tbn zlD1sc0$7Xdz6;M8h!3^V8@cOBT2$yfEQuU3O#Jx9(x>30jq!>+1-yrR5=ah#f!Zlx zv*(0q8ytxv{si9G)v09cLr&nEtk@(cDdg`FTyKI%RB>i;H}T?lgsJfnIPQaw4ljiT zKD#VM#M38D-ow(?sur4o;ca@^v3-gLfts%z&TpI)iMr$cIS#rA0ujft1qAFbB^<}l z=RLIJ*^~wdtqv8`175#Qxzrod=AVAPAV#{=qC3yd$~}Ic_{}Z-<=yD7pPPj)WAoS- z?CL$O6X;#`;OV?02$|`x5%~?bpJzH$376J;q{Eg03@N&;eBW+mUgvI!Rt}Lp-NoG& zr8dNJ3CF@^s2ws7!tc_ud4q2EbwyqFTXh75YHM5O2RNnYtx6S

A%n3|NBoYD~UD z8pwaQt?WkgW`S7zF+TG|3zj%#xH+$Es1Xc}D_o9pQ4sd8Fc()GjtL9m$`|j88#zJ+ zML|q&|93`6^AYGp{}V1ZKj;m{35-Fv@85XkHopF}xx-0ri;<^_(F^(~@R1kw-Y-0M z%a-%I@)qf{*PVJvIn2LW_bTcOUETF*@14urFRkG(m8eYcghw~*bL2KWsJkKO9#&b; zQnVam72dG0f4!Ca7B*k%bkv9u8=PiGis8%ult!|xn44s^+*Jk6+NM}b*N2|*Ju;y% z_=o4RX|T${3n+`?i!k9CleEu++Q5*B|K2~>o)YlsUGWocR-sqt7z^8}e{s=lEd%ae z0e$8N?RSv%JD~2+HU2p7MQep!6`WZ!jhvM9UxFvtg_+bTyZ1l(L(J|JG%Vm@)LrHV<>m znP-&zRaW!q?UhGjo(%Dtxv!a@&1Guhu5qx*oDLmqkk{>B%plUQB6sUh`hZbgZq-++ zMR3HAl1xybZZ zO>QBC#5-m_Rt$s3hrh^QJYB#~MrHV>k6UQMcwsS8F)8niXv9MTEh)0GbtQ+YgqX8u z))1R~uoU-E@PwpF3_0e$r63~~xv1R`5}zH54A@kWVs~8SARwx8*>|I`K4sBoSu`;_ z;x}!9(R2`%Tylah9)cD!=8sYg zofPEo5m`-I^tJl1Y0ekTT^vi?yLpQ$6uG}Ah`cIA&Ex4GI6RWG=ng<5a$4c6e*K$J zk6>CB)AMoljQx&j3@Qd{0pOLEJoH3X6k`Hy_&J^l8+9qNE)A+LD#T^@luwVgTLOlU zplHO5DZgomj@rgSV~s~~*;8ftJfDe)_sf6)rW7>t@eukc{I@FVwy$F?dU^F47Wkg^ zO5m5yJuGq)d1R+eA?T&w%$j+hj88c;>AL(-od0|^Z7PSClpNmsOukiSPF(f+RQ zYv$IH>Bfic$WOA~oGI50*Mh7u#0=Xjl55g@GOoyI?Qk4Q7|%c#mT>H>s6q2_tbY(7 zoKPIyy|bboRQfx&_ng-u#7p?nUnBQ!?E|isYX&_xHav+j*xi4kUA5{fGH^9T>va&C zim2%V;{$i11H%sYiun&_$}+mbDW2n+S=OR!<;V(AHOA+As|>cWybT-xPjcEfXJ~?; z6GfzA=lw@am~-}*^p7en54p7+d7qD4∨rbQ9XIKjWmlF3JArQgco?h7rtgVbK3{ z>zAqvD06qEv>j#U;!Y2xf`zWXoj0VJytdr;(Mo-w8hR^%Z6`6Hw8TAz5bv6rhi#^} zn~b8w*wuICV@IvFb)eXH_t zFvt<#AY;`JFb8=^vM?BSTr?)WqO?vcW_@`wU>PY0SKRxED>S0xpmf56S6XG1lnKvObjnh-~=XC*twYEL2e=gK4--Xh&*9Tf<2w>R@M%=fmf%Rj1` z67gM$`+WqOBH#8AJbsI9!v(zHBeA;}9ZT7h1-5sfh1UI?XhYmW^O0@r79e|WVXKBY zY6_e8t047rfZ}63HNT8;rO(lqtM_G?zxOJeGbWUYitK!`6Q;$+>UP*@;c5+QDV5hl zwxP{7_J?MdIDaOcN8J*VPUIEZ@kiH_|FirZb=XWtJ&?cd32!c6Co78uLp6|K5$U&+ z0;NKHWVM`AVCSq=O&zs8?JXPFH?F~T<*zdjhu;-vw0$?QD!cXdI-)2Ux4CX;Gva)4 z*kuv^HVCS$-5sn2D?Fo11@UirBNU#IR!>-;64`Uw^2`h)m!yqaD@=hYZ*Va1c-uK= zfN%FED#&;)2T~}~x$5LrAeMT*h2DPtE&J;)0UvPLR6O306$|*=3TDalv)8;G zj2r{1Zj zcF|k$d$p{pWHRQorGkOCj5|`6dh0t~l5dy^D0I$5v`k`a)D`T1OFuVRj}1)1tPM*u zH&n5h4Eia!O>V5JSC+Lr))UgDX?M$e8Ry>INAiowVBS%5BEJKb9%O@-eWFJ#t*-Kyr1KL=(6E62?qy(mRMhMd{+;Prg01#^fx z_x?(p%Z9%!diOki-HSmxG24!97GQ6-Olob}oG3l?^I@zJB(t6GZRF^HV!nhyrxRzy zAEvFBk_|paeF{{<83QLJp-#=7yzVJpGq+pZ8I9r9f7bHQ6nrGXsG5r~*$|DSf5RK< z4QOS5SFbd=xRSK(9^E$vf~s$96(NcM9d@Ie}`v zWv*g|;9kQrL8dNMxr{l!-Kyz&wX}|Bo%+^&y^^WBf8C0^NLVM)$2orG= z%)g*_3lU}`y%$v)LA{*4axMFf24$wL-5#SmLZ2+J5lJ8to`Ct7Zh=w>hPaVSL|@9w zxIF}4gnIG`joNt|%I1;yY*e#yVigaDlf?{3@%! zI{!C%p8C+8Zggk&5CP7PxW~im4Kh8sW4MM|XLcpI|M^(?1<4Ry<1|fbTZ#N@>Ldmc z-5#pF>(H4=@g0W%{zpDdVw&~(zwN%azB;m(6zowVegJd{_@4vs>Sgal-saDkt{wM| zJ>b+7L5hMRg2Hj}sK<6V+(lnea8N>$YVx(WzJ`cNaToCv{mjtT5^nE+gq>=Sof>b< zX^gQ3?@pT%VDhehU%g#l>VDWzcgR2Q+OQ99W}iV&n==%tu6b;R9830UEJahL`$2w5 z3M0uNt&%fzy^w`C&dP~IF>ani7}ukeJrTuPJb$hxA(|$Dqi8sBip1|k2KpKGO_7nD zxQ~ml;|zPY-o8)hk$_M=hVVzZbtb`xCFC7cJAU$kAu5Q(;jx=mSCK;U*@-u%-%+VJ z^s2XSnsiiW+<9hgf$;A}U=-b!p{M=AVx=rQieA2%KXH}$zwtk^;O{sVbh4X4+N860 z)5ynYr0tyoiS2xEZof;PMjT8F1bK|xUp!~!wfke|t4fIs{+f-$&Qc@m^qFLxPDrRr zzS$6BokX|N;u9`=e!W~3kw5RJDfMVAtv+{nl#o(y+i#(VX_mXZH9oQ_+axLdYN)c3 z#B+<4Bnp*{9yRu3ButV$HIs%IWH*O@MwHF{+RbUD8jqH5QjG0W2{R^tf6P=oQvYqu z<-5QpAb!0c@4;i*uf;L2oje6Ag|hbp`Hko-Pod7h7Aj&~9~ z!LFy{5G+Vf{#sZzKRgeKIb{Bg|GF+Av-Zc3Qfwfm+aN~cMp8s**){7PJYXlVHUlq~Z78jP3L!Q-LSeN)#>L%}=#PVlK*Q)ob-}pM* zuwlR-H&l~R4wdh)aivI+n;5!_o|S4IozhQ_#w>ulte=FO!mBYtQGXfS>OFQbuwZ+b zVHy{>HYzoUqLVR6N}q_x*k3RMhJOBBt!|PNu%Do9>WbubMs0u^2ZcqVGUaG$vrIgyL||qPT?vL>->CyiO<> z`c!y<6Zj0@`+28~-xpJ81##A1o5XR5deNaWd$=!M);*WD97Js0C`aSTwP>yisTfh& zpWg$BBev#z$~zzgOr*I?dzt0dJj0k!7z=YtXY&prg{#oh+meFslIs3dZ6gt<3ij)! zt5Gt@EV4Q#u7FB|dF(hOcjKBr5SJeFL&;)fmmgg+8x4945UE2ZjtK0f8m87h%9)$# ze-=5Lr+%gED^JQlERNL3g4O*rK&t?kn3MjXx5u|16~JTczSbu!|C5M+0f?>xy2zjx zf8)x+8~YVBj2}IikHCl9du}p1Ca~076ss_rW|h=wjNcAT<=q!PUhU71t!xuWe&pE) z^U9?zgoV8AyxpES+s^j;Sm{Ad6kFTt8o}a0G=7Svh5KMXXl3Vd?kCsKy)*2CFnJ8k ztxlm7vXandlXP;Y2YQRf*53~rp`YWBEPc2dSCHJ{CAddOGl;#?HgQ?0MK&3OD;YI? z90;WBf6SM~RWh_A^_93z7jai3;ZdoTc-j7G+0wcBaAhAvkDEhnC(`;M&^TGclxf<; zELyFV8WC}$ynRR|ylcLhMXcGZ4nkNIBakZ-XE6sRIB&ECt=HQp|3hJ{_h*$X-vuFAdUAHeN5wb<{+J51ry%IqVo{VLzn-tw~C<=w=b^yVRywFQTIhe$~I3poyzu)7DK6%UCsPHY1kp)%atfVy5_6 zP1Aa(xl=LzQ#lk@9B!9BZK$A2EuU=>U$)N66I#zeAU^_rA~#=bz^48{`KEUuk@!Y; zW+76XlSPjEE$Lh0PPgH3J#u5(H#*F>`C}GU%L+XW4bo=5bgc;7VOn&Cm75j9Ly_6&G32UuyXfXCc%*C(Q60O)-i1BR(^T>jsp zIRro;09zFRVPUC@1mSJFnJsGXVupr19|;xPo0IsC!=|++9m;2n4|^Ewln3g$59(7w zU(gkrVkFQ{{r=sPWd2J1hV%}N1FOC8n)!a$cjG`FzQ|&dRV`y|PpYukQ=Z(4)u`bl^9V8#<}=Dgm%K|Z zIP)5-S8WCnoQ&rekSsb3UyLk-=AZRa3iy+M6jlXJ%G zLY#fXJXs7AdwUI~seHzGADHBhI!ZG39Sf$aIyv%oF6}3*X@gE2;gLP;eV@w$B?OjH z|5=ki(F`S0(8R1pkbNvRP&nAhfkmtZT&Maj@`TRW%l3QDV9#C;)3y9@i6smQE=e9S zkY-a~-0!FuxAs3ALDzIMtvDw`=)vw_oPZfvZ-`@;*1CtT{BG+EfvElK8H%cu5|Kr-?4^IR^ucYx0Rpmwp!X`En%#7u(f!w~!Riu0 z!$}T}wYm@R`1%NxJ?C2Y=EuA#=Q|#liEQ1vzc!f}a0@&Alw}x1-N*7$}^h z3sMPb2zfWv8APi+H+*q&dMDJnfPzOjbM8$JzEYp(QN9B19`p_{SXSIV*Wcc`Xlf@L zn--Gi@dxm6N75^Bm-TQjSsx>2UEr^sDwxuOhK~|k|sX9y%EnpA$0Gx_&nm{sogaX#Cov5A|jRab9D zo-s+2I_dm`lXqEGwULQdSd6(t2cN)+$OK!5N82V~AroNM;sXxlMQF_jRAdc zw0t_O1a$_%uD6+7X}Q8Dw7eTg^ni1CGySG>fi( zGEg>ytAy7-a}}`*Y)TjV(S9ht151JR#NQ6Ws@d$AAKoS*@a)IkkOSveIjH-Fc7ZH;cc$Kh=rq&CB37qi?t}y9H3>CL0@{pYhy`(vR%|Mxdi{eXIHiPn~>t?=( zk@IP%Sl$Y4zLT3C)mqgVL%bUdRJcLgND%|&CX=m||J6^;nfE_fUTLFDG{TwNvH+B5 z*UYyt^R>}lhy)wI$I(Fq^t2dBlWlOnJapS9DFE0vsDGzAyxOYx|f>FHjn!=%Wy za(d9zRdkq)KV9FtpP$&97SPmZ8z|7&@c7C3LV1zX$g94^h zSX%}o_y#iGDM_!IJ3>F?6u({WH3tUs_aVL2Unb70o!cTs*TA^6PRO36;4*6`&{+(+Csp)QA9Bv!1g*nIH zs!@Z;CVcvfq&sIEgKNl`dzMw@9a$CV7<-Og+QMe+EqmD{`qY#ET-UvSc3@Jyg9!Z= zfHMYG&_)vll^s~pN8ud+_X_%021X)Qfak+c?j=zDDv&j32G+M=V^bfsa;s9YMe zs)ljhS;@cpc+6d3O>s*UBj6(c{W3yDKTh^qo*1B%e?XM|dX-nFH&v&6m` zZ+-ZqHvvM)fa$d=dB?JR(Jz^d!`Kz@#v8Th#0!_&#H(&*q&ZZVE3M8WMx5C+!8wk> zwMZ%@@8TNzblr`}O43a}3j8 zB~0~(PK42y-fbu+$ujSA1&&xN3UuziP}f}Fym^?sEW4aUiE>dl*?_QIU?SU*hcPW7N#E6}xE!tDzxn(35Y6r9 zI0m#EuQ_vInZ7NG!~Pv!?-`YoJbE4BpIDu~n`_u|BR0*f$~5$V@+glnHKp>!`)d=L z2W}yF@V1KVL6Im|R=e$2PI0VZcG_5H1Bs9%3KsYMg~g~!Kj1X?)0ntJ)C^o|DJ0?R z{%|L-8eUT#%Y|poOwzlrV^4&u`>`pK8075f=`OvWe+sMeT^n;gW-B#{lSu5vH~w&# z?+ShUe6oR54W7P=lHtRPlvc~`_Y(K3srU?cGM9J*k03wKaE(*TS%qVMWp%YdaN zsjj*5+&Cx#A34CllQ^$ky6fdK3&ZvZ@FTxhC0po`i+gOeisNs=6;l#DL*Zy{Ttqgg zq#EtOlf?7@i!QA<^Hocn-FKH#@K$^;+kfpn!zoIQ%R<+8{xxsrRk?&~c~m-ES{a=C z=ziO>0%vYqnl!L6LHRN8_Q3AD^w zQ+f7F@es{+HHgDavPS}`&ar*$T!+mq<9G;ekOxom)o-k%TR07+@98sIX>dc+L3G{K zHtPZ_&aTK9M_5FGym(NkUD`=d5HjBESAIv5-0@fH;tnA}a>$S*$D&uw?#Fj1yQMF> zr`em>X^o3H&Nc6#2maSBo4vQHrX%0+`nST*4a>)?&sIz0gW8At#3Q;ZHd=3OhKzJ3 z21oMqh@(%;>bv$IFXv`PNDN{XsTVSyKWZUxe8J}K%+1fzlp4%I&CvWd+{{V-NhyI^XXfY2$Tr9&qY4wVaWqzOMU9_b^C&=RKxM{%?{zAYz7)`I&HRy^~SV|o=uhoPJ6#FDBk3u^Q)b3!V+8H3S2 zcKrT!H>9z_j-%a}v`UNz7QPG+y+;!Bm1>$o*uQl{^ELcva6eLQ} zC30IHZC6UOB&#%XFZnM?wiBBZg3`REY9TBwmo3AxxK%)^m_H8YSHZ#}D<9(Zy*G^A znEl3_Nfo~~l8>tMi^0EoYmXYgL;U1l_WGSrOhGBG(%ip!SloTR=#`l9ADjka+lu_3 zTfS#n$jE1*UkVCwrS=vU<>2fdHW^Wez=<_&@tLBanKhlfOYOBrFr8*fR68L?2Qmd&y zrD2+J;5iqovv1|~^7XMc{Yl_g`UpQGZ&{o>-Q$WB29F9S(}wq>V(nsb=ecv1)AH$T~`2wdCD_?gU^B^!4ugiWV8 z43tKmOW`v|s0O-0{D7G0optuG?j8JBZU9ui?v4K1)kfT~2eAypM_YIIdq?4L1}85@Z*ls2F_jO2;`6(9m96^^LVgEwVxy%oEbHDoRic_1I@=lj zL)GM9IvU`AkO=BBk$oiW@Vzw3UVL%D@0<-G(Qsm*W+qY|(DqE=J*uR{Xd168%?Gza zXUbF_A=WL+Y%>3jkiVacTtGYHcYW)PbWF89ZP=CjL>g4t#vH(lw+}u;@zZWxW9`PS zo^9I%&H`;Hx3cXi{&$RFy3`enD}oj#tn2AG?YINxJZ(x|0>1vS!#i`9bRgQJ$j@={ zE&d~db1tpde1tdQ6|@W773i?3@5EM~j9F@CGicybT!atu_&_#pN43wK2j!*_dP$u{q|5u_fL)GlG_-4z)+9Q;VwFp^XD%*6kMJa; z{GgFijIZ;<(u1$sy2DJx(b%~%@_Ur&cwgSv9)fKs6|9*Ty^R{Sx9?4f)S!Xm5Zx?n zXil%A!gWKOz6Hm_)S%ADUCf%QZIs4g@K+qu4#XpV_b&qceGftZYM+t&MxKfM8GoSq zo4f;z2Sk8C#zx$Zd@bZC5m-4O@eUfgCAvC&6JG=XJv-75P(C2YTm-nYz7+x-CF-I6 zUKA1^U`w5VzyAmJXEYBPT^mQBhkyv4+BFv459Z|MY7gvcD%y~~@BNOH%l$VX*;jQf zkLl<6{EB@M+X^mu2fF)w=+TXX2Ryl*6>i&jnn-lQ{!9R4Fi;1kr1DKQdRS`0T(v9YuFah|C{m;hbE;flCU{aTk_Cs z1?_t10(U`S_%w5&qL4!>3i3UP<5h0b-Y6(`F$rhypFiETPAErYdBGM`Jm24>p}Fdf zU|ROWe{H{mUo^ZJt2hmgiM;r-^BHV=ZtBx(JxO6k929_pxBki{ z3}FcDsGQ~5(L7k%j7ZWokngd6s?t+U7DoPAMRPGZ8C=POl9DQfnF;?&@g6)%-1KrJ zdyJhKYU0{C5}PJp3yFA_7b;oUG8vQAS_=4Y$uY~11wRnsX(K}L&-dG?pjnC>1Fd_%RAB^!&^I-(@55E`9>A z=KwwWj{pV3->u?cc2Z&bJ1xo3`9G)Klkxi?hR$!~?>6IZMLz1N(z=le zWzLTzsV@DcP&5P0{}xYPd>3EY!rU5KkE01+rFO>B9c=l7!5LNc^QHVSqjCQiwGqo^ z9+CGXQ6QeJ`({*$fh`P{&qLzFO|^{O{8J7&#LRDyp{raQ&DbG*M5Bc@Jf9+lCVX62 zjkMd*p)4KKFWwWe@F{+JQTykbe7r%eYrW~>%5(%iphI;N{|Ciea^38cEb}$qa2T&| zqY;yfo45sA&5^YD5W#XnN9NgN!YlXVOrov2n$hwh3=L$`It{PvGRcg2o@)TxzYXHU z4V%eA8C!2GpX>7e(HbViMb8yT;#N{{RAW{VvKCP@2IDS9jlbPKie2gyrWNVj!{1C5 zM=gq`yN7bgY!4bygSGBG1dKXLzR(+%MB8Vk%P{3pJ$O=lTCXYFx`_~Fkov#ZZ!-dsJV2L9n`HCY7=@f9sZ|@wAwC8xv`> zh#DI~nOq~X?&(aDdNU-s8aW$n)QK6W=d&isf|%aVM^C{|Di`n=lQCs*^JMkGjPyd% zW5jYv>Q&yH92luFS`CMm{o?}Ba`H9WEkzBfWe?Zqd5RQ8L3ri2mDCpdBLH**v2PzB zR0|VXj^F~i^8+0ghH_diQ!lN3%CCldIm)f@UHbA%VNf>%ICPHUX1PlO70i}vE7P2- zlDsFAUT$bhI6|T|-OGXn`wU{)x6Kv~-_u69MJPNKIGnl)Nt&oaHARhSEIi~2CFoD1H2%5pc?eLi#@jd-Wp6ZPLFee%B)Xt!jh$tB4)*&-U>2*PcyFh<3ZmSML+L$#JQK_NC0&Xe>q( z?NJx^WH0+3hCSP3pVx!3UBtW5DB!)Pr$i=x;jqZu!o5?8?t<4DPO>u@NxK+c8NM5} zOWhkG-G$_Qw=rn!(O#}bhMM>lFMQFqY&5}K5`oGIR+T2)Yd(aPNiZS8JV7sc$@%Qa z{q!e8jsZK4I>w0lHdtpNWfW5JZW>VvMzU*-M(>kWTsf30k(~u|CqI`n zJ;Kh;RTP{F>$=D=1l8QPfiXN+w9POLHyhe&8?->jwbISc))4WF$W$sTRV9+V!+_Gy zo@=PgyaFH1Jh#YTk97>bssXtQjMa>PJw;eW@Ck&;%>6-YSef^_ZzPTyLzkQI2eEw+ zb&o41U|#ql#29bxb!z{^)LTW>(RIP1Nr2$)?hxE9Sa1sv+}$C#>xMvZcXxMp3GVLh z?(VxU|98$E_rCY&?uQz4%~i8zRa{_t&{0z`?emg64eF;b`wd-=I;S`_&{>{D3(m3zpWiZK3o-fooqEMAwqF-K$|$_ml=PvMC!$$ZCyesc%7@VN0`4!$pDy;-SRS2n!mVY=Zr=9IKFWQC4j%VV zhcOUM>Hd$u5(#ZrB?D6vPpr$WTcPKyBvFNC$f5-fq0?uv8O$EUy3n+o$q0yPHZ^)) z6jt-K$E$O~7Cw_6di}hZZ1WYd8N zd2>>6#Ww+w_X&}ymT`Qslxyu`9>rd85p>Q+DqogfJfcu66NU zTb}VjQ|Hzg+7ym5KEBddlT7yc9q$sj&!LF0YI54jpYuktIg5^0OX6>%aveeuz}R7H z;DHB90c!40lT0P2T;;Wa%&bO8jq!N1xmgETV(D@m?(NV<3DF1(z6!#ITiZ0{aQn4> zyC_}7%957{A1b?KU(ml(%g>VLpb*^N_DGndLP_lF5f=7+=fkKvy2e*p!DE+v7<$3x zHfyB`0gIy2Mn1K-{ytw9cbH$2g{0|AS#;-QZHadS$WfGKsKj*6H#10Zs(mb_T6Er> zJ3$VKhdZvC1rKOOE=Kr*bvd&Arss+84h+AMzf67Eh=V*-4K21oa0Z6TIRi*L z;6-(|Kk`UQ3WLr^cJ;enpNVO+8Nvv>3eB6pJi5}?(Va|UY!lpj!+(N{?J~ksVDAaW7Sif}A zXSx~WSQV(q5$otBwauE-Ne12lF<`*i2Y5dLME`FS3ylA7PaL@xxHNql1TGl{geHI& z07emrc>zV-68o$JD54v{vu8`l6Cn4lhfxA>^$NZK*?fuH|24*29*C-8hX-u(+&$3X`&nuB8w)J5qSL%#}Mwz59X z+&UIT*tYO}1AQq6q*kJUXU0)YJuU6y(I>FzCoC(go93@vGD2bG=ZUD+%GCTdsl|7R zF^OKmr>-!D;e;PMrS-Ouil&v4m(_U+@6!Sal$=%hxYtw1THh%R&+~8EF~jO))|?@) z#y8aAu=E$N?m5Dc#APOQtd^NoPez4)ZeiNoxZk#5v7-*zBmK}f-e8IS$uJ5Z{y*8s zI;P3uU;BKS)_euU&p|c@Uq1`w7kHX~hQ|0R)M}t_e*8H`TvPPNcM7O@G^y?!apCWq zgZF%TTP6(u1P>lzA2eUnJ@p=)AC`m6r;m?o{9~KYm zUce5k6S>lsf9XD605`mH+Vbp%H*fVe9)~xT2%!ysD1Dyg=uT3t&{SQ;IQlvm()j5b zxYYAtgw)`sdnR2Lx~C-+-&xTS8y7BuDh1;XvS0XvC$%A2 zr!Z2`8$3^L^&r-)IjD#?M@@Pd23x$RZw0~SHIC1~r#o;Lg4?wuCBYz_X+(u~-04-c z798dEE8LdZ#&s2(V*685dP&$|TFAZtpUa_+K)KgjdaM+P&ahC9Yz z+9Q#s2E}m+<~~ZK9R@+>Q8v`tkEj*T8XO-M3J$~WuH?|WsG&=;7-=;>-6p#ZGY^*I zc-I;_1e6Uj45Y!_9y8LG8h?A<>l7`ZhTd>x$~mrye9@=1Cih9WN_a55Ya*Ez^tb)S zo$At@x4zh4H{?|~sj^1ey1h8=P_v(OvpgQe|B6{`3pR1QK};bE@E2%pQwFKlx|go- za6pa>)j722ynoUg)M{+G%!T>BXzHA5F;>HWJc6~s8YZ8xF?Oh@Z(1H7TFOD(Y z-RMO~3KNAG6rJ*0CLn$JN%ilipTcYX$j_QjgfoNhNYOX?9He8}Xrn0`q4utnl4Xo~ ztub;6cMIGNuyIm&C(~-4+uVw#Fv~HiL>m{fp2EC&&jaY1B~U!E_ys|BJTFE{W6vkk zZ-~W1IfN23*uOm-sHF&tH+~T&4I$|9`I9V3UOX?TK7DbL`;EIxSHONbxKK0EQCb5{ zqw6wL!#{@bI+3F^T4!6XrcCA7m(XAP`&_8PGU7M#*PIjt-{fJ>Tv|Eu4DnP!hkWKe zQB4>ew8z9!U&d5kWd1uo=U%bFYFoAUqmIbUw#}^PhDh_bD-K@-pQVmhwArORNX(g> zGX+|c*16!Wo)iqA*hz=XqT8i2XH-n=Quo5&L4ek#3(>t-9t)jwUqIa?zoz~)usoCf z@u4)ZpOe=69P1WK^Kh?hi(X8hi1}1*>L+#DbeVNS$G5VS5L7E1Y=xdyB|Vmw#sf{Y zK}WIQ_{2OcZfMxuugPf8&k?v7ibk zZDekBkp9!foRk(XtCNYtr@f;XmCu#zu9w0WmAZzy2raekH;M=$X5<4f1>S^u0KK~d zge1NX09cce95}oN`ljB2q~0!Y)<_=GNLeK0IHqqUaP$K517Bu=k%gRq$&U)N2LNRN za1Ug^1EbrUFI9XMnxOfZ?3OQ#Pw$tGn}Aps;K*b5DQ~?E`LwQ&aMmA3;`HH)&BN$U zP3>2Cr5kZ)jF$-Bg0CLtC~o9$ZxC^_BoElS*aLb4CP~S+F0mALN&xduSau=xcW({X zteF5_@`7(0Iv~wnj7|$94|S8eoH2G;oy%UOvULWSQzuH4{84Z{t<6G06RL_&kgq#zaWUi@_KjUM&kDXM_I(y_vy5dXrT@U##Y+!~4y zq@`r1ECvmR8=og0yC}v^PLdvP3&buwd5xSjt}g8}rC;9;yqV?Ie2=kA#<1RIyaC^f zpt~FB_xRAIaV|5RWma}yP9dhqrN@wI%aPt&m4>W$OKGKo3@p*Oud-b5i-M$np*Kw_ zc(OZ&GHXV)bX69mgV(_3LZKCvHa34Hs&uvDe zKz$|g!XWazX!F@ONEty1ShKgZJ?y?te>{h_lKC}8SIQ6X?SAUwn)viGluT=s-Z;du z$t%o=%V_(&8|rHRZ7FXFZ}YE?8H;+P=}sX;;OU!(i0FGbaW&#@Eb&%ZapED${;bqc zJqfl#6f7QzX0qpja5j(8=_CR(u51@7t~&vk_1Z@m;m!Nvsl@$fL<5%h?eQ)mi(m;p zi;%FiO`nXEFnp0tPVBY~{V2@?rPyoU-!CNfV2M*8EFE}>0;3C^0$oX?LxB68;L95j z`@thqFW_qEI8&Hc`oe<&LEts(=oXOKR^l%7`{Ci_+CVcJT1P*RJ0g;*Tg=$)<%%X56&N@#Cp zDyByl2qi8wq2en3^2$304zimSFubT=Qq5Xeg*=XZOr)MRk6-Y}b)JxEeBFa^kw+vn`+p zX`Ms8Cx6*zBT^1VH80=67jg58Wt{BRt`##g2=Vfk?%daCb;FsxHpuEvzjn}&A9K85lEERX5ZE8!&2e$uJI)Nx z)h22nlu)VZ@3;NSK@le-w%A{YXd2#O_j@$raQ7}GlzVuJ+lqze`sCAne$TARo1iVJ z==^v~XyLgXCw#|= zGu3gcrKp85R91U2C$m zyIRbjrz1Q*cAYUX6B<0uw%Z8;e-+4|LX)Y%jzo}P*PrB;zM1@^7fI#B>r@)ljQHtk zO+>GmB5PX)y;7&mAbyu9l;1l1*{Ce?Z&qu)T-m>v+UrQAK1jq*tGY(kC;Q2MmGpmA zMjF$sQ3!n{bi2KZrt}9}YtI^Ax4ABeMZ^_cwQ4PJ*v8cQJoW77K$~|2{p=Du(S2Ut zy*9pU{H$mDCkv@WGWd-Mz`xem?pH73LAbJt@gL% z^9z?@cAm{!6iz9*33EFwT-Y>LmZO=Hby`P`8B~8KG?-O)`Y<-><2Z)zetjJl?Y zx$)iBMAH2iEV(cJ)`?w;nku{-4`Gn8539;3_#?Hm?m2fv2P$9&HOPCR&fcYU_b*-9 zp+7`LMg*}z8p{%=2iSPi)o*^;!tqh)+Zoim<+(vLmG)@bCNBU!%H!WDVBu( z4w0rXf}@QpfF(oc!e-ZwXuBGjDS7G9@Ovq_kRw#~ zsbDKuZ|i+ha1&(U^bUBogui8t0wNz>5fZQfdKtky`|sGZ>jr;FJAmv*pnw^43mJb9 zLkAPa{~;UPWi{@DOtM;KX^LyMmfe|k0)Flg=_li!H#*-F`|y3+6ehwj-L6#fjY~XD zQ`aVN?=?|q8VhyCC19DhR>nY)4S-uPYKlU^5_2QXD-@c%lipa3wTw{=xLOx99HFrA z(5~0GMSNi#Op%gCy+y!}3|sB#*+?SMhV7i^ILD7x*>3HKZzCRTe_pe#B^LP7GBHi$ zrkr9xf3Z2%klACO)$U2Rq=;6uxI?_ke8{r8Xl_`$Tk>Y8|&B3rWgTt|N zY?13!yYjh&%_fGN&y!*9jL~_ry_h!;moJeegtRUcZC5A7zQSSLXlEKtD3=If@-*UvN@ z&vug>C+b&8Syf`U>Q2g+qTSiV9=Aw?J56Bk$YR<+5D12kO)d66U^<{0ff_E*FmYvx zwfjQi9nLIm4XCULBi~q=tPSbsp7g0m4`ONF=eZhZ)>I0MLSj9xUGp?ZDR2>D@Y??6 zE}1oJQcmiWSuBlXckTPSovzTOYz@3fpQ#!&yZlbWWb%X9cpsx7`#MIFTMm<#hk1hD z|Me&MQ0zy_X)Nw{;{He?%i^eOAGNQm)gDcP#p4^S!=TH-lbXdEwd!cNCLOCK?Bah6 z#RB8-BWpYxeQxwgzv6`eYtkQCu58Kcc3L7;;{jQV1!;*NGZ@QF&|B^|(s{t=LElqHu`PMhsYbVZi&+hfajKW-VcBR`8d;;&FzJ5UvRtDA5H-1jLPr(kOX zlY#*B`NvHvYoCO6^6H=W!&ss;e?=|y)EMVp-|vqZAUUq2D%}vMYbW^ipe{esxRbg; zp{9x+>JS**&=3@RceYQ562%k!B_u4Y{o^AQxPX@uBR?NIzYYKMF&3)2iGuJCC@h*| zmy!Y*nu7c+CS=e*Ha%1IWOsq8P1lPQ9>>S`&cn4ye#1-a4N>DXhiDZfMYEU7XztS* zPpCgT(E|Puvig=-#VCq{zV4wZhhgbPWD~aBxgDPa^t+{%&JWHKGY`P@hoVwH3S##Z zg|Wi0A{nWIV`znnoR4rZ;8&I|D#|s{8oK1MNGZLIV;ulG1IVPtUhR@)-n6eOS?xkL z-&41__jS%-lCA3vbpjIh5TSL!-Wo$TPn(Zp!RPc96Khs_XuMqg6oFhsCzhq#$|qa@ z>{@KC;p~UCpy3drMQyw{@oOY4@*ItKRq&DjaVWOd4Ie&x{k9O1P0=JaTK{v<3%6R& zbkpEwEHKLj`RvcEp^nD0?Dnup1`J@|PS1mP+N3$%IbKc(4F(3BcWXu;Va<}6anQ&H zSuWT5Dgn+S{^8O#bU`ZoYscAQ?J`x*fA;4p7WS!Dh>_l}rCz@puzw>{KliU31*{SX zdM;q5`xT+9XJ|Uow){+povsgL;5NvQzA~s0UlglrayicEDr7S%F7j6H9cpDiz;xh%x+Xxa=iY@ew+LfHI)a{Pw?)W&{Ll(rnU^dt_auale3)~J)K+OS+48=o0 z$_dC9kU%WD7i5Fo?Ffs6-1@?O`3|Zb(0CJst_BpoU{BBsyBHkl##_RB*x>axV|4)I zqEUxg${sXaE^e%wLq1fsu6k_b)8Gde2vYjhsNwnhA3pW#J?P{1)e_->MhX;tt7-mZ+t!>%C~}c`Rx~6 zbIo(EKTU-i_A5Efb2)E~Nkz*_ogCOo)r8-()Vow&I7ugxTL_Uh=32@RJ6%$nw!m`@ zVk5mj<(ss*LjN_E3F+73r|v7_FXz4Zj*To2*VnbS32%v8iI%CsC4#{Y+@oiv!j@5K z`LxHMl2yVN!WFyMs4KWYq~EHz)UcE%j(xImmuCaB(R4AoG`M}f$#@|5C=eL9NU3_} zYZE((8Ft%}hR}m_D=wZ6VJe7hds9zbE^w~&yl!Br3Cg{v5DESoJLOMm+zG-~X1>72 zc4v1mqr_-;x?QFMxuF04QTeQvA?|9n9(;9QU`sn`O1qwpv|!7a6kI+vSW(fLp@X;p z#j-B(BJ3#iIg}Imn8lb?!odPFAFe_Ah2Ws=eYZelFoJzxF?==c(j>?s!h)moE5{I@ zCvS{YYsAAEfn`_#N|hOGIV%jA8^{Y(dY3?9`t$+-Fxh}FILiol+~q^Zw2Wn-KONl-ACKJP|acoku1BG&9+O(;UWmK&H;#VALL+>|44BSl( zMO|9bI`nj_yx@%j+R+l z`C8ZQ-*n;%%4`)NU4}ArC6zV4^&82dH+*Ns8&40J=+)g}AI_&fMh@QUYO{RIb<^A* za-vvke*Qz1zS8@f!W4x61L7zVwNgrsVyY<4^4(=zLg1cGM_v;6{##0yI7hN2Cks(S zaKWa|Q%Ju-6th_Bm$G&`Q43Kfugt*aU#(FJ)xY-Iqe={NP*0$LK-S?$YUT#;^$jS1 zqI?z%`=9+ENBRqJMxZwJA6nDv0KwOH(AHZN+StoL7)61ELEX_zvS$-)6^Jt!=3ZDI zp|Oe}r1U1#{~dk>5skLTgOdK+y=sx*d3{9P%;Hp&E;6nc?L4A zT$qCI6+34OW|h{vVWw`-g>%YzdI21&=Vcs}fn%BS0V_#LLn`l#OrXc3)y1+^_&lPA>LC1rZw@0&{q3WCx}Zv7k2$Kkjgt>Q5**~ z8N=iKUgpKo{?ePe3P?tHkl5i$t$!;h@ATda#Ut=S3Y=}boevUEizLS+wlyz=8w^lu zI)i?_Eh7fsUC&q;mMLK3zRdg<^!a4Vpgd#zrv5rIOo?8;f3rQy(P_#4{V0;8n5tvT zcQ7+HrKumzUFP2{yoFBR;`)sRk0q2mni!`2NRLG%{!L7}3yy;IF+wgx${<_ATzzS% z)YkVGZgnsP0>Y#_sC@RCeZb#S#(lW~qY0 z`HUUl&o~>C1v;piD8-gNIgTpbLJKDFvItjzFCaMhs3(8WW&i0GVEljU+{Y0~`BCdM zt?TWlV?Jqr@&$iXw#a+p$T%<)aW;^IrM&l1iUha7#FpIipvvJk&i{(irz62^;7|kL zPXS=ofUfpzGQeyKWV7`yk6~X5lw0;6{NLII28<)>?tqU%NG$y=IluSm;)DeF%HIZW zdRVAp5_|_CYi!)3-;)RKT-&S$nZj?NIh%qwVXIv|Z^9X7Jsg_d^tCWdR&)uwI_aLyr@MFF|VXrBU*n5;GVeOl= z&L1n!`=1jvT=_{k`TkL%@M{eWKFLd-Iwd~trwhdDN2iQ7GDPMYortmg_fq1@@)9Is zrRVvyo@{z_vA)F2GpO9SHddk@`RI zdKdfwDD;*9uIdkBT}P6i7=3To0U%faj~Uoyy*)G^%#^q_$riumNKBLK&&C3;BB$_Y z4!*5Spy)~m(;qkM9q_b7gGojQu6@p*_z*!F(?+ybbXUZKIn&H1`Xx&qBT+L3q0_ZtV`hcMWcs^^>Pi7;Mc=O_ z?TdppE=>BDP=2Ix*MGdRRBbzdxn(6@S7~Qva}po~ICW>#lK!wk<=?at4gP8rR+4Ps z3K(0YEnKN-arojRJ&tDS{VV7@)&S(N(MpPEN3^2)XKLdqmod++{VCowv&mId+!J2g zzL%DtY6Yk+Yit@nkTW4-Vk}59l;byfb*UScUXE`yyx8cw!s>AD7{sbZ3aSnLbLT(( ze6~p7s@Pkk&0kxptq(29y_j5zg=gE=lHfvv$G2_b(UdFJMy!y2s_5=p`hIDGq^pDV zd6c_WZQ!7E|ME_A!_9SkXb(Q=vWR*<$J<-{LRibXq^^Ff3_B_A`=+P)D}7Kz^z}uy zz@f`FMGf$_7&IS3(A>Cx4k1Z{spC0=>1j*bmaV2JyrZc#7=Y%M&(+JLW?8d~ej;EL zcB3a18DQ7X1M|uq>fLNALfD9I?Z0lSLIxA{H^dUyu?153cFEjVM~AZy@y;cF{J zxIV2V5RXaIwHQt`INJMMMWVAdXmNN|T-`ZF@*ACj>7B2NBg8+Mqv}zK{grrbnlNh3 zA54F|8x1CU<6Oah8)|2uJguR>#3h|ivdT{5DU@h>Ge4qzjFl`Ej9pT+d>HMTUUa2FFVd1Q3TsJG#V3vE?@`{NU9ST5F?>eZO8SKIvD)O(((t(0oG zKkml^EXRK0L*FLk~z;2Un+2fNsBWi?=zL4Bo3E!;i|px`h&-DQn)_%?zS> z%0ZrcFWvoYBdr>p>wMt3{aTXq9&Tk+aThI=lqu|mE}}eRilLvYA-&B4CRM9V{ZnJ+ zyum)|F7ankU`-M(4~G$OiyzI6k}PVz%wHr{xrt;-pvcu%Z{!to9}Ma>@2pbed}#~( z1df75e40IIv>JI=Ax(8%J(6@?AUzvI@XZNGISkzwlR}pB%oH$$z{8v3?N;bvvAdO@>Kb z*95Xt*6P^yJxLU=O2_;moJXk|i<#L)ngFEr)V>!SKCakbN6YCuv|G$%gx%`Xla**4 zWaMb8&|MDa9>W_SPyv=NAz$jhCF+R3BVhfV>E&q>I*0RJz+Yk_%a$@z{-vOGdG^zH zzhiC9l3z-i_2YA4n-|MJCi{f%E%a5tG?K?<;~34yVl4_g<}>OLt`OoT^%!uzATVq@ z6J)J*7kc_coX~q13rj?QuRZy3j*3Nj9GCMe{k<*3sQat9Rg92FcR~X{&Uc?B)lRr) zvNcFmb~otSF-xn z^ogvroqPXK7XOo|cU%{eufw9aF&(4Y%S_Lq7FH;`R)1%{^0hiMem2u`J5#^Z@~m3E z3XRfxt6RPk6W5AGz!JBopMtqN6j9t2(>3(`Xrs>ZCO7dr)*-HKYPG5rjK=$)-E-*% zdM`I&F2}(?vT>=MyWRo8!I#4Dt65i(cc}$lZEkqH7Is}H48Md^m#KCaw^6fZB7``+ z357``T{EYndNqz2-9K~!XM!#Km-OJajwcAT5dX|+OOg2OOoBU`E7ZAeSSg`Z< z>h;AXqLwxk6{goITYuQDM{;G1G2?R0*rmCjZxoAx1Iwvwj+>sfut=3R*4MCYDomY9 z^}9PduA+ro#Z?H3&+YmJ-|^mg8@Xgs@?7GpWkz_ z-m@5Gs(KN7(!jU*EqcElc;G)U_fEQT4Qz;wJ(^s0dDaKl{Qi0#=4F)#c;+kAcU6tG zhHsPFf0-v=UF|NLH4neu56vJe{%5K^+_+G$5%h zxU5=MbMSNsC8*S?5`P5(KT+%ddnT*{sqEyFhRlYEuE#Vd#4vJ{sSsic5j+$yaHYi& z{^|dI!2nZE^Ef+a>{Kr=SBG3jEA=T*d1a5k4SQE!#ObNK3x-Y!CQ=3Mm8w_GkP{^r z6xGvdVp!*(oK};@4RakYzh-0w#*@WT5IQADc)XTL#uHs@L1rlxjQwR>TF!?nlACu( z1ARYU;u(}Iw~9bUH>ZGpSk+4{o&KVaJO})E2Zey6E`b+8zZYQSPyCzS-W#zSfX7>u87Yp0`~;#X1Q%17i?EAQ&1 zzvo^!S(8;v$3Q&9D+N8d*mUh3EwoZzf0`y=-0zQfvrdS@7>9*n<=_6=o2Hr!>G;4Q zDApC`8368|fV&f~OnF`KdS9GzE^j)aNWQNq?-`La5F!3{2BF^ z8QfE|J%f`-bzV)&bE5q*mRFR096cVe$Qu1})$zzwrc= zhs`4eb4E~k%7%@{B$_5%+ zBf_Xj_YR$VHnM%p~$NB_|KyV#auytG;Y<@K>6C*fP{ zKpYmy?%2?{ck=ILf5MMK?(X0)Y5AnYUy^v1t*3W(ogCnG1+8otN&-b5ROAh#hFZl+ z>mou3_OHnDu{uU-%G{)Lzk;m`ApXZkB|U-5wwB-Jvw_Q#KjCkB|A1Tj&tPJn^jABkeyZEaMq(qMX-!zOZUzRl*0`Cf{65sv2!=JIr zanL2fa^ZyMo~b3a!Py3K$B5M~?a3bJK6N}-p>KhmSgl9G^Osv3F>;!b*OFi7p;!Fe zvEZ^olV9P9C>n9^M_ic41k-0j4MDxwTcB{67keg>y962IIDHMF)79Zh1M!NV4{G8i z9DS``Xc&Um7G7tzJ=4rDMWt`Dcp>Bo;|`7{G z!}pyrQh}p-U~Arz22n+olV;6D+*EYT*@inp-Xvt!;+ScVx=;L2EW8$lILE|$>55>9 zvdiBk*=cE~L;ccnDmmY$_)w;csg#59wFS3J7-hn3BatyfN@1Q-(=VMkzjjpg4bxt# z#6&yJ^Pz4F7z)RF5uC6pRIc_L7JRE^%@bQyYgF5r2<hHf`w-tf6Zs(V3{*YG58jD^bm)vaT4VnxaO!iMyCD%(8JI^|(z&UprwZ z|BPA=?Y+KAFO2OVgH#bL373zOY*^)+t|4kIGt7xidGn;06#1zl++u|8(z zYC|(LpfS3o^{2&Nf?qvaCEtCf>+8Xs;>mEa zU*SZ+;_+2T6o*9EEQ~h2+vZtz{S9<&uG6{MI7_g!xuXc%+kuGqcxHk$gH{ z!{02)^6(dQu~gqTSZCP#gQL#;@(;>?!t~~D6led%c&(`PDiW+by7tB-Ftc>%6VjCwxBiP3SMee%vGH-n68%&QJT5gmUzc5kH)r_*= z;_`LH4~3y*Ar*SkaVNjqRJWnWDn}podNh0uM)PLzcl(ZC*Ql|TRaz7w;S}*6-B4N* zb4yqRJ2e?%?k}-Xt3Am1Vil-G;T@iZ;aaj?T2P4|Cj%@aH>23v7)qe_D)$}iqnb!H zy{Cbw4wqjkf7Vyq{lq#R1cnAw7C504dB3vFkQ%}Y-m$r>oIJC|uX2;anUQHYf}(m8 z%oegZT~daq7q}q?L+;(FWa@d@zj}|Wul|JP6x1N!vWc$cx*M4E zYxA68p8iWj1nyHu_-OH*pStD2$7Ev5g468g-d`|QiGr=i`m{rk**R>3XAD+FaZZmW z`j3g^P{x!MyoBr;dYx(IR(RYK>JPck|KqgvasggXAO!jdX!#FcSAduHzyBfZ5!i+i zVE+HX^;U240p#ZQ-c|NLgyp;o3jF|LdRoAXe;1kZzN3A@_>Z4VY6fSGgLWYf03Ds^ zSRm}9EBg>iec*L@OGx(r!|TlgsJjk~?DIir{&(I=eq_IaVtQM^Ra3BI|Gmv__=7Qh z10!Qs10QIeTL-BO4daQ@F@#Ovt9GWp_mbB`QAt|r?JlkVMjjxP3EI(H{bUhn@c6wTzPa?Fj@l>jAT~ZEz}a(VwZV1eQ`rN>wfp^ zmi-gK3HNN(DLTpHC8=XS)U)^0snbV|-SV9D3+gTgn+(nhWL_K!RN;nmYMVHkD*LBE z`B-GL9pHgFtp7czX*{~#%v>3rRSNmwE$gHz| zu`ZpFs(Dz6N<+@0Bo;+8odRLW_vRQL_PoeF7MG?(gldEChKr6we=C)!QQ4M6!n|vz zEDiA|9@!x&`kXPUp2VTXP7#-4!POJhmX3M^oSS+t7LeJd_ISOhTV~RB4FOsUcIS+q zHukPZFyb{{Zp9N+Gz1e2rz)JC#^#IBof31R!sUY(rt7^Fs>XVB1q}qVlES5|s&_xP zgiI1zl)e6nxYU}$%Ot8d5-&QQD5?1$6(=e;)S%ztD;lUXntDm!YkjWMS}}}kF(Oeb zVo*OO+WP3&K_Rz#6K}*^TR;`#&=3H+1+@RYgNz+6fJ=R`b#Z5kPv1UGfj%Ht1Na~Q z2J>ElALpTPpSz;L5VxS{4}6WL?RWqBUvX{Tdk4ZM0q8eD#U=BR@TA8Gg^C6oONw~A*zl2b!)8U8| zlq%xa4OMoV-Onj=qD<{`z5F|?oSQ;_Xs}TEq4IkL-bSAyaxL{1>ir~?mg|gdajl=& zB6(GMEl{PvZruC_ah-Pdrk<|eSH$%2Kk@KTI6$Vv7;Uk3jRp+Vf7lH2;!|peQbBns zafgF*HAC$CFsG}Y+J!!Uy`cni>s2((XT^}gQzOi2RcOIG&86ez#@Q(*W2dG~$h@5! zpW7~1IC$Fmovpw{5qU;cfmN9S8|nGJ`q}Gw2l9!rZ#_)@kDtNOb@ha6%w$QB`|C1m zr@SnQ9+ROEP54s1OvaauE+M!ID0MR5mNPWINGUP8!2%t}LuTM;Xmo@7A&{O}$0}=kQcuD+NQNrrDRjyFz};Zkj7L zRF5=*@9N+Mh06n37VOWJgE@~H-EEc|I_5O}uXQ#k>=2=vT)(uo{Ankpy-oalVc+KY z`C@l3Z%TA+x|BRw?-M#he`$*TqEF~j;+_y%GpsK?`+&g~z;ykNy`lQ*N+E1b34{%9;y<-w`Ge9Y@wq)KyeG6hD?Km0 zfwqs4jxQMKeY*rtHl;bLeTaX9k-duE=Gb^&Jc>&~QqaaUR zm6nxls$GX(z6b+e_2FW`{9_w2ecNqLYaG@(xwS=EtWQ9WWy}08xXc1_rQ}#p9)4iv z%AuUxzI!|Rbc#Dam%380L+f0KrSfl{278tg#Y&>K)r>BcoPXi<1H6X#2$*%X!GmJN zzT-4!sCz${aW!=58d|vOVL)>$J>T)dY`3m#t5^P6m0?!$kYPxs)m|vxsc0XRQ%?9T zQ)X$1Lj}xbm{yl^@VCm$jIi>T3d)Ya8GHjdF6 zRGvb`ea7%C;l~&q784nq_t?+DJa;zvoVFpq9d18K5>56MCQ4h}EqD7?DIH&IrRekE zD0d@2_r3Y}_e_k-Swv5f6vOgT6gifl9?6;YjQafLlpn+rCDr(;m#2J_qHjHE?{GLV z!vRi^^;r*DZjCJG9F#}S91WmDp+lSjx#==Jf(;(QMEDoBO#{T!XY~dM> zavh6?ik%=UUbNH2C*3THvi;JFvqRdZ2uY|L@l8mg6#5!iD`+OyT#}05XJi%KFBr*< znbG1uCt(kj&&;^x-5ycU!jVf~av=0oULU5!_;7ZexPAc;_WeDX(sQUm8 zNaX{qpyF-$_bkb;(jWTcPnDnkmu}ZJ_FNSPX4-nY-26~Q0Jbla5Ko}uy(sW%vm2=G ze~Z`G<-{%ELtvmur4=B1PT5| zK5oWdKyUE;q%5UjZ}3Fg1TJ>%eIx+&Z}KugKez49fnUV-!s z^{A*uHAt`F@*tbea&Qy+ZF(H>(>Bcf;v4*WtY@jiRVm}YdAsi-74opWwo&`}1)`+-OM)xm$Bx+ii|lK8))} zmPy%p6yfxHYYf!_75%EG)HNoKpO&z5&nQQkJ^!F#3@M56T=feSYTPTT)ZH^dnnXByD99t#cXz)nJ=s7 z2|$UAG=9WL3AFrX#}4|~>XN*|(OUBr&E?%72+B16=4e5OdHia(;9t8)W}cDF%z5!A zVpy!Fdj09_$00Xph9tNl@b^19f|3bY{(-t%-qT?y4b!atM~uSW@f%MeX-U40E#=_b zcX=W*&fC`bbM3(24Bqs&-WJbw*yCRr%}ts194l;#mJ9PRQK?EtFl|@rE5aPn$~NLM zNG`HzgPR(b4L0qg2i2l`y#6(RGC6tZiF>=haQZPyC?&FY0BMtc$6LGc?j$!$bZBn$ zy}W%K3$K%9mss=2aPGsehh!*!gteion#)i-)|*_EdeuyAs7;$$CxQZR=Y&w9MXamV zqGGG2jzN>`f^<1233z@zlvwn+(2~76oj2tX;C4QU+Y03#G5oV_A46+S+LV8B+j{mp zgijyY_}5x3BCB1*E`>Hz+<=WFf4`F);IS@aC9et&Ol}u4=wHQ#!NP)%{fuK{ABW?l zf`)y%zt<6mBXqXGdc1XU=z`D*J8YXYta=M&K3|=w>{VN=oG-|SL4Mt263}ip0u=!Nz=HA7hOTzKQ$bw{ zNssYsv|~EBJeiQS1b>vC6UPy%pJ-Q2-$}}Vr3OK|wdQi}ewCW#;+-PvzSn4>p}!tJ z_1bH5JjrgC?0>R?6_ceybJNw7&j>du4D)ySux) zI|O%kcWr_Mhv06(-CY`YcY?dSTleAp{xilox7};(u`kxHRaL9zGwn!BW4Rl5K^)j+ zDqUQu*jvHDY*yQ26Tw0idT=`>mQ3~H55m~-(Wf!i_b=m7NcoQ{XcQKfaRc1zP z@-?J*G7xSHv`@Eg7yj(hI@y}PL#Qdf9_aSuGaXQqKg$u+gVel#8tE$mab5FN+BTwJ z)rR5)AVE9M1#qzN4gpB`^T*Le9hWw*O0F{@>CNeDW8ZiFFJH|G(?v46$3m zSRC)OV0sPz@|!3*g;$+ zYoeGH1__P3e|lrslr#jQz4Te(&ld!@VcM z!CZqrx}BZENJD zDr`3~KLL<5ZDO;Fn(mBg^{8$IlklJ+E@D4MTrTY(yH#jK`S?~jGG$eV6Iq&JL zmPpop#=Y9S0aqd<zEcsBRLtZe0b2|Er_9dmex1X&ldklWrnwL>IrVrM)yA;}9%CU9;tHMXg|K$oQ zjQ9?FoCgmj+`Luv>nI4&>ym#OS>Bvd)tO3e`M{`;Vc#N(SL4@JcVW@lnr|wT%>Ty! z6_7>xpqh@J*2VQUB!YK470);E96E2W0bdSE@9XhcNq-oDaX>js?9ktS(I1ruxlk)#%I^&Pv&qPNPL{tm@Lc#y0 z|I};DCQ_9_;K+3!ipC=nT4hZi>eGyz&cO(^E}15XCiVlzl(0OTG(eKAidcM%EN3U- zSyg^;3}#X_8Tz9D_t}R?v)Fcp=HGUHw%PCLl?jI~Q$hG? zx&x$IN&BZG+J4pWPk&LUC^+Ss1iu(n=1sAM6nY zCVb`Zr271x=3SCAeyWNuSe>D`uWHhEj8G#x84Z%d-x3*6oE+LnjKGc*E zC9(gu8}(blrdxRiOEB`@t7|}NOZmPJJ-67SAdatu9i8RWTG&?I&g-z?*QP0Yk^512 zKJQ~Qpz)h#j@_01J}aFi9fY=5zFFfYSDe|T zh)f9&raK3ZJvV{Q#Ha47;+UK>hntoO6j{3izaHKTyo3^#=56}3SN4C~@O2vH94w9sxX-l( zXYGBq%343~h(O?|m#zOQ$2%eF9X5+~s9RjT`E@MkdbQ23>+s4h7i>7yX;@Y$ZR6@i zI01h7&3X)Ui_S@H1e=I)ssy=cny8JOmZ$o|>?c|~O$u`gJ{%74z?Z?i0D}@pKpDH} z+6D26KO#h07-r(hFG5syJ1J? zdSI1-Zpr&mw*i}~1?dCJ0Tj>ZMGc3txB0Z5-V+(;`C2vcPJThw(h*4)2T{1Fm=!0kZL#r{9eFX zm$eK~CzfRqYVAKrnc}1rIU%u9lq_AyGfUBE5hQD(rwxB>DZe{PN4BzG!=On-C=jGBwZ{8~yV zt7a48mZxwaSNpOS*j5x$A<@Awq_fb)vdebAbD6BokaXE|Q>QYV$Z&)0qxU|;tpJ#3 zh&uD-X|4%*OvhE)J-@g<=6x1hbUtFdh)(KDFRk4gej)Ny`#aw*brt$a?KQ9M)+DDV z3Fr3_r_lPBK_|(EGgzfOm@qqeK93{NUXG56+5ginOV^Kq*JAdUTd>o_XPsft{rKmBX;q!#-e1 zD~+W-!V<(_kWIYq6@H}|DN&rb&bx9>26B-JPZ^p}I`UoHl~auC;BLSk%T~BwGTc|d zm`uf<2_rSX!y~-^y>HSDqY&>HZMo2lOxeeO+9u0-gdf+x`9r}d$Cn&9Jv%AX$ktaI zbnlo{IS817ZM-VWE{MPgsP=^V%0 zvdRagltgF-KVfqc>FO1;Q-$ap5+oZZmCJW%Y_f(sWSgf=glQnB8$lcQ-8R7gUjC&3 ze;o43&*G-dg}jgKV^L6ZYBGvWF<=Suodm@T(N`!(E(O=Fx=Sq)x^3a?tWft0vt63C zm<;pe+}{^=LI!9*cZFKzMH_ux4B_UlG}@YDf4~kI@GtZkDUZcu!J)e;4~L!dnv)As zT?{BUr~oB}w6Jhz+?lY1Tlu0f`FS7g%cGWEHV(cu#m>de<5!+jB(|`MePUM7_+EA)wZbwBw^nJ@iuZBOlN7N_;Ey7WDce`7qx%zgU zm38PhB1a_LvsGM&fy;no;Zv2$yZ@jWGCt|({AG;`kfOC7lz)@aM~9b-dIDQv7yA|x zyIVhh6~f%^E?oK#YqH*foyh&9iFAjFb#?NhEiMV)#rMah3X5g_bYd$+8#@)PM%j;s zo~iLE%Uu;7c~k*BjsA&wM;%VF4$PF*rm4$Xlg>?vXiHjedfrTnP);;qzwysT#{LZ) zbxFTYRT{1$g5USYa~NPNcv|Tm-2Mm#qW7X)z_7fQMRN`-N#^o#V7fAB2iUtp8-&Pj zN6Bs9GcDZ}0!vGaUgaHG2h5v!sLE3Cby82BmeR3^Rli8QK6}PfSbJDnUrnPBScpKd zka&2{vp0`M7{%Pb42}D*DH*3TsC%!ue{U;a+Ka@%WyJM>bnknzLZMtaEIjnqwzbx~ zbsfz(We&}{PBoO*__msnGZ`blWm1jT(K-Ok#pl6W6lL%FeAl~lT#U<;{m2)%Ej^Ml zfz>WFW2RQb&CC4F`nbuxb!K0g3bJ&|*gEDK-tpdz2Xa#$CX`T7{z?psUEa5G*c zOPPS{1vdQZ<0dkH&uUy7I?v#%lUSENnYGHeRmmWSLRj%1f<+x-*2(p??5oY`plo!{ z@%^Pgn~JcpgKIh}WDM8=Y&^fFt+l?-jRuo3fRHz<*ew&xvAEfN^Bm%IZ{z1-RZH!v z?yM=POgAu81)UL&9$B`H5i$1qF)=GBf!R}ehtyHOSxU%R7c!Kh#ar1O-Bc#F7pvgY z_$ueq)6+bpBWP=DPF3dsnU62k>6;e}2rd1_JYk86Je$A7za)0`a0x!KN`1#7UxB$4 z#K?|7CtE?s<_MZy1MSe&`Qp&m4{d zg|*gO-g4C9!|!;BVw{#ON9k1m>)UpVsQTK}`xsCRA6&qK+xd~uf_F2JyH z*Fj&lu7_lJw8bBQCQVIfBD%m)eJbuyoyloYI`zJp-fPAklM6ZH{cG^%ImSvQW!6#~ z*Br?}^J%{sN`RPQbw#!%$#zH*<>_~fWIQPT^p4_9@sRZE;I)aZw4!w=jc zvKD1!{aEv3yBrkV7*xOwAuvU`iq))9&)UuzW9p?WihNhnyRycQ`%*IHo#pJ#@vxVU ziD98ya%@aaUNOH57CvOmoZiAF!slo5f|a6sqt~S1WeoB+LAD=L;$PA0!L$JJKKC)^EC4)WXp z?>${Vz2G}s&4oW>`DWDwn0|Su5CJwJ67N(4zS0^~E8!r`1OcOt11Z{6 zMCGGC=!tVvgOAZs^kmWb%bt3(uz!6u9u7vj%Zh2%PC6t)=oL{YNNZ_$&KU|R{^m>?F$X#y*XLQ0S#(qEL;*U8C` z+_JVek-&x}zAK9nXcx-8JM)Uy04fNg5M~as{k3Dn^^!^`^ zjOctLM0_2nvmIVEQX_fF10FyKTAeZW#3fR!^g3$KLk0R+T zb0T%bb)Y*z98yo2nHh%>`U&kT)q3c6L{4#^Yu6aPuDU{u!g_P~u@9fFa`^2S-zJ+5 zqoy-Hv&a7{6S6)fdqCHEq9Q6(L_~9&NC*z4`N8czqQcxqo)D;**RDly>a!y%wbqBv zpe;`#s0xFTP`3d4T=0wiiukV!v|3Kmg<&xmoG76DQ+eAlS9lhT^ag?hqrBf-Ujcw% z-_PJ8@V)^q3D402FS4~AGHs_G8^S)9uzW811YTfu#)1Lp!VFyM%$FfxxBQh|){V)wy8=L-gXpc!};*rn#eUyjoN#Iy<3*1iBXhfPpIXUvY&j#qax|Jm2!h~ zH9KYVxuk~E8gaPW1jJP1GOtUdK^UGTOHX*S2#|cA@(|_TIHL!W9#td<{%TGXE)N;Q zh{I<8nIqYW5PHaOoqSd!f@4Gv_RP&~C2j5Imv;|-&cM*iHTmx>ag9rS-sm0s_5tdK$U(ycO0St0&hoPMojI0k>f`M-EhK>inN#C* z(Uvkw)rp)GYz|4wIO@7;FUQ(`u&%_cOeC_*e1&TD{5Y9EI+YQm-C&)>@zz{W-bHA5 z1~3_bYG;2@z};PM%`rsU&UC9u-s@wY5~7=7_lG4mC#@xery zJ$tWd1o3Tc_=LNEn_|jhk!TgQV-W#MGOY_S0$Kk2C|(9GwD;TYd(2{n7RJBJ$4eRv z`G<9b1skmgqYni5i3jYeyme)WjP^g6w26uO!_0lJRUM)scje!HX)v+5b0ByPn0`l2 zqhWSo(E85iMrFt1XjddNu&OMJ00BqrO2(X`;1J7lF0j6+ijF;e^%#ctmi)E9eiMD0 zA7u*bYi>)6sYF1sU|#GHQBjq~c$}l!o#kw2i%|IRUj5#p7g|$e`M#+Pq`-jIdYJUl zaY&t?<3X2gb$iJx!>7o)NpjhG*>h=e9W*n<;=xT9I|cc@h|ndNdO-?D8~fs4@p;9c zD5z{eOmr*8hS!T;?Jx3yEX%=;So1;>i z-buejM2VyaPPvrg7p{#K+_OGHF~I?zY&J+V9T6uOV8lE zjZL&Y+DMdRv}pFp6O&o3?GhY>?HB?S-l?vs@ZQRvuYAByy4xBdOIXFLcJKz|=rR)5 zvnO2P5A%y~6L(8&@u!VtHN_gHC7AH-%u%Ga5Kac9*6KAALy<{NN37Yu5bnHjyxqh?Kj5cB?{tWosGTCyp_om(9} zvNFZl1^iK8^eEEgdsS2;JE>XFbI-_QzNc%t!N%ztCYoJ;H0=!XOLIqHEKRphKAnO; zpG1ON_8oD9ZB!%*fJKzZaQ3XAeQrairuTG8;?LfV)DE8+9!9~WvoWg&BZBluF-sB7 z;P()+w`xq1{U`(mpShD?{xj_h)(v9kGxNLMl2nc{GgvHort&4vc{{VTSHf?4LqSQT zKf!B`NNg5yd+7qM)DZalc5cNqp2T7=YpxNfrp%b0`zJ*utXW>i zPe;s~3&S=s0qyP!4QZX^KOM{5<_KXVOr#@c?RSnBqB^0~6{VL?pZIBDD0y4_t_dyu z2G*714{N+9iXKe-*b+{|UnSB4Cs~6X$wG*DG*!rM3VL1&gW1e%wO7d!}lME|Hcc&m_Dg;={?XxG1n&KPdkx6t>I``Lfjimxaz4q zt9Y1spDaJT_JFJ(*wE5v(SnSyF78?Ja!ouf7A_#u_;*|a5>sqe4BW?QZlZlD-|i|j z11Qp0_K3GUBb#5IQ$n8q`?`FtTQ8R90#?X;42GnM{plIAAeJ5h5q&y>Sf$#=8)uY( z(=x|Sm+i=aXI=8;2_a-V-tjRG-wJpsiI*`IX@q8@<|#PlKjJ*!Gp=) zYBKRBZ<<_f!|X&A(VsrAdD$Y|?Nfc+#`|m4BYuj#%7@-Z|L9}NhrzPWpWu;Sc3W0? zURy4XdVy{Wd>IX??_$|i#+LL`!Qunf6-R#o#_?Bi#J#{Au*4SF$M}5&TurE@_Hmw* z_X3v{1%iG73f)G$Gk{^bKtSw(<6Ih6$9>DEuaipKe)mkT9bkl4-B+n)Ka<4KvIYcG zrNkNAxe3zT{cCo6O0ci-a)|(YcH)(E*d05N&?6qo8$QmSE{zCq87H%;Z2eb@$j9Iu zP*cd<#9C}+=>@P_Lp>`Uf+}{4D&|M^)e$aJ3Gzgu$v?>B-~y}I3xoiz`Ww&MbWUS> z`kX4KUz!+`AG_mJv9s{KswWu|WLwAc-3V=W)piIK;mRnti^uxytTNzS|3wd~dHa_p zLv0N(spo~O%fM|nA@LFMWRL1;NL-M03amHNAi8L}r`5WP83`1n4O=FPeOXs9X>yaY z4Q#76R{o_h$e3H)XmO&znXVLuw90g1;iOov5;UpSEbpk1Z*Jo9OD{vZw7tis&)>}O z#h13ePE)c`ec}1nuP}6~b%N8Zpgzw1JMUHol{iPX*@wmgzdb*@$k=8>928>3UDZTU zo`(~D8Sz%mGrq5?=LN8#S22ob1}%5mqfR3c)uh=`+Fm=3bf^*XguF;|l|0A}C-;Rd zfsEYjt#5JY$JsGu)(g689RrEXyz!H=D1Ti%J8tmX3P=>{_?$oqrs@~Tc0QYNb1lbg zP{E4!iPrWf$51YQg?7XrMFg1{gB& zzM$->AcUcIDPs=&&29LE!~W;=i+!Bh^^0_h9niCPWolakZ{AbJcIr%@F}BE!ouZnF z?K^s*HQBBntBl3;PGI0u?Rk^2?VF87`$xElPUI=(YcX^t()p&sVKK-(SvN5M4@MuyNA-tkP@JEjG^GN*eP|f5P;EtYy?YAe z*~E2G?YVm9=n?m)kRg#U?(cJ`4P&wfk~vZ_=K{W?)%4qE`_5_DD8H`47~Uh6)E%Yl zG%~N2=A%FpD~hH{$~D`}&vdZ7a1z^k~#$vad)sPF3Qx=?ZD|D z7lSj^;5&T#_WisgE<>RUf&>ChqK3i@f8Wn zym_*!3gBpO3_P;TxHs4p&ee7TBCuh%Z~88UfVsf*N9{YjDP zz?@$tm>TFYcNGg#gm+C?ofM|#4!L!pbGDU~uXEBDRq#quaNAzgotzOx%LKh&4obhW zGTmBiD=w1M>7hvS?H8EX!@1)*B(CvIm-A!rs%&778hlSJeoUfwwkC-=T|S=5gyFB! zv%gcNL1P=5@(t_JK9tw~(9pjkpJ>R;#dc6QRSPR0F(a5nW+5&%SXlbzijBvjzpIpM zJhkkeN4u!{OMbQJMU?WEu(8;jdm7JFv*^9FWDdSz zdFT=XjhiE^B!1J=MOlc=;UxPSFJVLryH~au@SsfOFvYjj<}jQMFPaBRXZr=EL;@i( z@cKl>PT*w5wybvFil1sFW-o7CF8#MjdA`zepa>z7({(bTCrIuh(FjWYlGPcBlXt7W zice3(3>37SN3bbBUm?>n>0|8qCY?@rsWWBi1jGK9HKYJsmJ`7A=_8J|C!OGELf8=eby z$T>lJP9N-oJ^jgu-|dmm1(BvK@B;FH7w)e!Q>*a?^Rd3kSp5+jRJ9)Ai>Pq5*0Hsr zf+Iu+A;dPkMwFBkeT*c5;;0vLM~d9z1KELDa|@XDx)Y5Ng>%dgM7|DyQ>&_|qY&@R zjA)AI&Dnoua7;|>0A7Xw;JIvmORpTnL&{EAs^?Z#Swt*d-5IBQsRZniN|W^2+wY2q z&Wi8Sl=5vdcoUBnhwes3{8iJa&V>%)usrrh{haCpXX8P%@}3Fp8uPY=YS5}wRv7uO z?VqJnrZLAqMtcN+<8S1A+jFuR zaoQ6c<`nv21Wz-l96BXCq1yR)%lO5yCksr-Kgu>O^5`Qe)n5zD$?;jlMnxH?1y4?` z4e#o^z`OBQ(Bx-B6xXtlyO_)nMOgV%$Eoj?M$QMwm|B#-|CUnGj+wo_`T)gS$pRWP`_*mh_N{Th{|99)jZVOgiADk_o^Uyx;=m2 zXb&s@KyKrlRp|X)r;%UswD{s#hVba@kZ-pMkT=268cOsya%*5=mvE8FJ-#yD8g#IS zuxH(!77JW6zBxSYlV_-q|Eow)gCe+P7PR=dqux?4Kts zN316(i?4RyPU0~ye62AHI~4PBK6^NV5egkgWjD_Cv-L>fzyt$Ar+s=QWv_!Hc!1xD zCQP%J#$}>K$VL1g-2lsAQX8X)0PgKdSm3GWF0Z8=!xL>+G*n@quNbp13yv>!7JrC@s6PEutmyw;{D@CVne&5{bqf~ zRiKG`y°q9C|!Hg23_tErS{92V=`T83{ zZeMnxUXQe*(=H$+ko#J=bV5V69Io5Z78qqts%Z^XS>zzaP3@KHbr8P|n`?+tF$h_a z*40RJ4!Qec60=W~wPqD(CoO$AxEU8kGLN;!tVpY3`?C82MkJ`WTb!x~1e-fHqsL`r z-L%O9(`l|Jn!g!q(+^d$arI;eyZW~sj9A1H=d5E9%HTR#cenB`J6Ah*b2*A4O_6!F zMR{h>vLUi^Lp#6`=ZP-`FBP`3P~;FRa#gN23@e&C05!vK@ri4qD@>20zQcK#TMpzd8cxu~qFDQ5G9R=-G5R7nLum@%umx%uS4czx04%Ssl zntcO*3i6NMw+x@aGe=;iko~an`_LxV9NRChi+jC)tVkf{?iM+V$yvs`wY4mEVl@*e zchZe#EwNP%-7Fj_1<&MSTzGXKjlakwOmYxBFNK#IEcDNIvpC0*j$X^?1a*Izz`OZQ zRShj}-BG`~Tj zPxFk(;qi+PkEks}4khJ!jk}h=4m5nYH^+V>j#Y6tek2~$2uajQ zwT;JW=Avf9mCtukDVmAqDw>E|`?^JGqsW>DAFrQ&$|HkR<0($VJuU48i66DuwwxM| zEoE7^q^Du6mXeUE4!Pif8I}$d~8?u!6Y1k4&f7v5nKJ2G3+lR!!8xy~5ef&Qo7>vvBR4v{Dti zqS`j5`os3fhXZ!?rR#n#^6D3j7Mpjn8cw1hgTBThQY)TM<>ciIm#HiwDERGYlr9V` z(@&jzMerHwg$ORpt^rAi00E`|#m`3YecUG4og}Nf%AeQfYc_fEm%G!?i9X+>7o?+C z4kP`0Lh%^1zii@BxD+JF>9FX3LsZbeii8_bg%f_lnZphKQi)^zMIe0*`OeT@dga_n z1hCC^uQVSDJ9>r5=~~J;>b~@!-h7%@C0?G|mWB42oI^3pokT3{e8M_tZg8;M)Z9e6 z1Ccl^)}ZV~NitGFS9Rl|_@*y&3;CP-2r=Fgh|O;A{=LA{IDsn3F*z}Ny1ANa9XmNt z1Ku2t-*tGJB9pG~?fj5XVvLe$J>=b$CaA+~)l@TV@3_lU%-(3?Xo<4SEiH}8XcEC` zqb^1+m)3>dad%aYiq z09{;%wX5ibT`bmkIrO!7lPZs-j#&H$RaOr_s##qMx<}^aU zEkC8)8DA;A;_{5`oCsKHlVM>XL=rPSJ zW8Gnpa=^f9bE1lbPL}lGji=Rl_mO6|w-Udl!lLND&T4hM8S>y4^tDwk7tF0JtZ#E@ ztR4@U%kf?=f_@&16ZOTM@QNJPF3y-HLSf#?tTOYkD>>2mLKX%kd`^3G&z|-iKJ5>! zwg^cDvgc9Q+ci^Ha3-^)9qB(A*)n64J8yp`y9cd!UYSVAZ#+M!NZ=R1SpUuY0T;pGXTzV^X<*?`bS;qMXCD4WJVYL3 z&nLfc+W7-ZNE<8?2xg)${RG^;quhi4{#FC8IVk+e=R<>Fhj{+PN4#W%WpnNU8=sI- zM#j%&+V4zX*TG?*C{rd$;6>=AG{g+V_NUtRXa(%73XXt!01y68ivz;?b_)3EatZ)4 z99p@3LMFaVwShHv_i@0tpMpi8Rbk{Gy~E6hNZ)310KA(uc#EYHa6Q+VbsT>@-`vM$2Kl0e;nPK!vO< zL8OpY+I}Lo>C*USHI*hVI z=%N7vT1_ubIABk8A@#mokosg$Bs6sDg`?;dTzlOfO5QZ9GTX4SA;%$P zXJ3?ARx*~xDi=y2ZlN2?3KwrCmezgdKj%+b*f68_%zOPC(S~18uFTwR8IAS<|Jd9> zI_Vytex@HSVj|OR`ZbSO4D|?w)!)S}R5ZnRg?&g=JA5BBrX{tL*sz+dE8ulC`|9$k zl=#5tXbc}PBj-V?^X=(txN(U!Vf8`IRtz`zoPU@dKLVaGAO7+;>)QCjSLt)|o3%0G z=3|CenZ%Sh&G7eXhD6gbf%J7dZeCJOw9BAd z^a%#Ss-&kpa~(1SVwwK1;uSW>*Br-kw}!fJ9mdw4K>ym}+={+#y$*fK{+rcA`rkd$Y6mMbgRpANZF0om@Lk_sPNcGu4`OIkEyf!9+v!;I+b(OGf{V$Ic zcE)=;RGT_-PY_vSOpn74siGP$Sw3YoNu%65r%1ZLVUq(9ivgH7_h}#}D+`Sbgx0pV z?;@4a$^g6cJnuqJR*1>OTaUHfMCtO`<>^A>EQnO_m)rG4Z%^ZTEJROgK=IEhK_l!c*@-9xDhn z6j1zgd%&rgjQt8Viu^E1TB{_6|FFqngI_ZAa$w@{BX^D@aqZ*mw1l~!<|H8EnA{}}@fGA5D<);^cCC~Ly$0V^7pF79q{I(eUDHsB0=30I zs@}XN>gV~W+1Ce-)zj4##^{a>l!cz}*$6KF6PAC8Vso z_bZ{AYhWiBd^q7yNpP0XsVm)*&x0ul`aj224v!TvgtmbUysVU$Gup}Q)H z%9O@fNwV0~6q7*A@?`o=as~Gj{<7`acS8h62@8O`A7n)#QKxpVz^?gf=-_*akL9iX zwo!7xr2fW>E7+I%G+6O%@-+k=LE<`{>o2-{1q%IN-$BFD09CtINmNdr;BmDRA(QgS1djg^3avkwegF!p5c)(Lvnl)GP_qcGWbNXM>!xA=P ztodYBip!nGy+{hBQv(G~+HkhhHFbWCvUJ4!O@_{K(A+ zaa)Y{Up-W**nZAq9;*6B1+rz4Key>hZ!F3F#bE0!>$8;L{+?gt;r@^;T4r0^U@ve{ ztFz75+wquX?eXKiz1v;OM!-Ok`%D^%Pvhaou(C7U<_haRYcg~5cEfH<6gjNb?ZI+l zxNn+THqt06a-FZ2ryU9s*J!$sE^|Chw_L3{A<-h149pSRq75>aC$K*wDn=uIJfK%h zM44~po^3P7@v&k-Ws~R4@O_`lBjw5P>!;X`tb6ye}~)<`Uad~s6ThMVtEpY%hrSbL8~>N&aP=y29&CoY>@Iw>ZXpsAY5wAc+& z1dc0Or#o5NAvrvB#dd#VT#k!@e*-wm988TU6e=~%yYNlqCuzo3F|+c`>E8HO9v886 zp-K-0Tw2i^<3B@P`s*K3lj}x)Z06@8UO(pgCW(QCsB)WPKXUkpqFv99&@(mPwOc1I z8}G>3jqV;7CKs3^mn*Aw?B?SbA@lP3<$m@86%RoRYk#^;fR5*y5OfuWbh3Iq~fY{+dhZ6Nyjs zl#iHU=)?yQqL=$H83KPwdTRvA_w^!KE8$BAsKby0!60yQWP!o*2Zm4x|{ z>u3r7H`9y~mYDeQpwWh~%oCsY2gbb=z`ycx$;Kr*D5jU>H?74u&g)eBH{3*+G5eG2 zrJ3>+{$iQDf>pYEBL9gwQ3^7t?kdklwghe)$)tHL3GS_kv}Lna9j#}+y0WJ-R(}$Z z;k8gCbY;$UZ$U80uyV;l1_#AfYtR_Xhr{aj$%CjwL947{@8Mp3TUJsF(YVMwOhpPh zuc?qUP)6Hs4>h*9pTisL^$zWeZZ{7z=MqeDlU9)g3XtO-+^~_R%BUmxiP>BsLcXQx zx%8qLw2JWxt&_Vdy)uhXI6&eWN$s5@W&FEclyiU%F$E611G;vCU-m&TEZd;CsQ(VM zlsh33BM=HlRt~sIbhLf;li7g_@s9Eg#$N^}zQp16yifm^LkrD=0X>WFjjoh02VPib|R&qV32wbR6 zT*~(v49`Te_nnt+^9pX6x;NcTbTJ#uNZ{V0wE(vMMCL^3Giz2a>t0x_Z4{Io(Duks zj;vU&#Hz-3BU>}NhcQ6deD^x35u(2IPdv+|a#GV5lCt^4Rb?z=6%UV`lq*y#6kxGi zzy2q+V7!h<;Gty+_r-AJ%+=4PQ|bt7ZVJ(VSGD-_RJCDdF;y1`cR2#FThCz~8`)@_ zv&Q?unwNNA1@`YH{pl$+@liMc`Ux{vSCY?o#>Z~ea%EfrP=dnB>BR0jarm9bBn|4; z7=HoFKCB8dHsvc51V#=@QR?ZNchE)+QF~sVEBUMV2)1mN4r^jh3JqVj_DJm>@RnGg zGscii;ZagRdK_DF)IGTGY`cDwhSX;y4T|#(ZLiogd$WzBc^%RzCHuXF5(Uor2e{5M zD##!aao#gQkw?@APLZF#*^0k>?Ioy%8$hlpzxs6V9=K4=wosJ|>Bxt2`3+tE1k{DS z9Oz3MX=k>WryW>tCqfxdgM0%RfdR2^LZgWvx!AQ}VC{eW^nc(lPoMSN{D;tdRJY&% zDlmvoUbYbM!SHGX+cN2S|6kenX&ew5`1U{g_UhY?)c@~cXTW2j z2cqrIj869R19&A$`_D@SWVn2rE%AT*`f~u2-+So44D0R9P#BCNYh=vlBJTiQH`Yw+ z267Up)bC|F(;#EGb(uvUUOQ;-hxDxn6|RWg%+UQcXIm%}?dG`Teob}F8KdeKKxfMW zmF2u<%OuM8t_@Vi*O4@d6lmrXWsmQRKGxN8WgTx=Ytj!H5^Rnpm5si)k&@a5qLLx;N-4KozX@4@^8Ka&a+>*lCHqRDq@oPb>ya_33e)C7UvAN3!z>zK4jx0JRvg#xBtvOJIShz@U1)|Yu|-r94g-+UM}1(!UM zc)#|HoFdu%c?s0PIB2%XfUjT1pu*Shr1uSID98EeJZ1Iow3Qb1+|68<$(*Ccc=#Ft zDM7ap#gJHdiGjCNdu*;JjVwc&V6r&4U+H@%2Nzi{nQAwMONSJb^|C zw7qxU-s8mF)2nZ)k7?a1>));d!c3AV5!kDw;;JM3QOph}5!^j(I` zrZ9UKaGPgafFfyutCv;@(l+Ca;U*&e0S_DM!~E|>ixqid)L%c0k#=O5nRrV*VqV*YkYlDAe>clKcWWf}AZ31~QZHd{nGfBU=vraz?`?f+7VvWUauMG} z@-`3j^=sTR*!0sovjX(xcg?4fI8Vg$-W>qI0(<~K%5Av*qw95r$A5YU8%{ra9s$7j z;JqV27A)upIVx2XXQgRQ+x(ocKDlrWGt@elY^eix&^SYd@|MCNyw0B|Q^WNFK8m7Y zq~5|o**}!3O1`bo$Z{2?W@iW{p0L~U$CtsHqbwt=hc-UZ(&wC5kKk zj^0`phCIOdK;@IK6|>s4ae*g5#V6bHg8rEa&*LDoBv87?Vd0?d&NbURso{jZ42`io7=6)7CAd zj;;6zBQU~fM|$Y@afbw~eYvIkg_!MmUF(fhKPF@@p-+&LXr7=-Rk%Orc;SU-qSD_* zV;2)fmBELm?JzWRQMWDLmRrXqjM#+UWcJ_$*-Lmz=i$;~I*4q`NPVeEn;4fKrwmJ-BE%P%086R*-O;2XLv-C>9gw^ zj(N$dg2+;xv1E|TrKjv6!&TO(D{gn?ruEq%t>mf5eO*&AcbXXsc*;e??1eaQ2&&9M zPO$z<#u$0+5xH~J#9Nw-3?Oyn@6V)P1e@9b2|ceWX?M;eD@p>ywv?Iwhp2Ok?yKwi zc9S$tW3#bstFdj{wi-KWY^$-Y#%gRkX>2F|{pGpegYSHeu}{{XW3IJ+^SXA&f<73) zlV!K{8Y#PP5|72#*RvS-36Gf8Auofv-`0kkaQxBZ+o84J1%6%eL}n(0Os{U6 zz1Xt`KLMUVK+IiUz$QQu(F=gEf&`a951tiMuGi1 zfHElVWtufsD}U&Px?E0r5}Z-cyS~PWM9?9Vo4wPp;$S)z1fnl`s|*a2s>5F-t5<@zs5?&s>AahV9k90=i8z|B?h)G4N;oU zG}k_+RO0IChqDAHIIW6TtjDG_BueKp##E=V#m4h#zi{#Y6a!rcVLT1!jVz+slOi&m z6~bNbSVN@;Ad@PGK*#CdIF5yQIoDGb4u;x`gjA2jCpG-}>wWujsZaj3K0duLf0AKx zOzoo^LUo>2F>{Egp&2`(IW+ju%2O%yYRpxAe4kS0)Ghu zD7VWhUnL<9))3P}i4dsPXH9+ob57ory4`_o!drgCdtm z)oIa$sHwyVf-F30|Zq4?(EG1|B_m0 zw3XL^)%%yqL7GkKeSWW_G{qyx;A6Wcd`>}#FK5!FaO~bJHui*=tNvpB7i7WG0hau;)@CWq$UHCyu`gu=&5+` z3W_w>IBkJa`QVMksXa6%%gZzk=d<0?sjHxFRFn~A^wy)mo`G~_cQ=Nd*AO~s-@ z75ViP(LE>(<~7@Pk->lgSUT=D!*}q@-K5pJ^rKkhp!lXyG&qvamC5YcY*3YrLZFsi z^;_xIu3*+BsxwH9I>WltOhzVm?`tyi+ofB|hlT8(rNc4!Kq1o}=NG>>i1dO{)qvR& zPgrG4QjD2b)UiOw-(zC5Tuou+R^V-%K6QIzt%eaq!en+=lYIT$}*~*o&O2KV+)#X^{CU8$0P2| zN_3k{plwCwb+9OE{?4$=G8E6A|@x*M@Akey4!`OrakYOzuRk|aEI6x zA!lCna=jX_Qiv#qx_i@eX?a;eT(2Q`3b%+|!c3BgT-U6rUM7nKAw2M^+UHgPr^J;pv&Wa zo>!V=wEgdtfobP8VT&?MWQ_ZMX;!AEpPok22wKH%&)kyo(QPwAtPbU0bS9aCL`Wik zbSmVSjf;3ERrP6j9--aIh}rnfi8d;^4d4B!R8C*TM^8{0c4K`Mb2WJx-FWS{b6Of| zuzv{d5T4v^!zB5=UECYA&U^(3wgK<~z_yggQ+nrfeyDnr)`qlaN}6e$rm!tIJ(E2Y>SV(q%pIF|;=-AQlU^c_cA0pp6u=-AdX zLJ}?8vzR%g1wrkKQ=H<$v^Qe38+0liMX2R5Lxt1n*tkI6+p^Efoi9oprFu4!AM)1@ zFK0lzM;p7=*p0nmGq%LOg*t?b~JJ&YElBZYjFgJ^tA7JhFGN*hNse8 z-qc->^Mla?&+MFxxBj1~%NeFnsVZ>K&QgsQ+{s8MJKSB#P&UQpu~ga_N84EF3GY(^ zPu6pZj?}H4&mK=_9j2+08OATh2#%V1_qVx?OvTu;-Rb}geEF&(4x0%YVDAbB-z((Z7&{#5}2F& z^~ZF6E3DXU51@%Z2H6bhz}ib~cXRXcw=un_;iRRWBY}}^yNA=V>6|J-#enV4TtU)( z%%BW@(v+BDxTi1Jk+WIw>(5CxW{?+nO)J|n;s~Q64V2pz`GS((gzDec=%5f+2Q>q> zrI%5wdhdw4tm|(Mo;YE|8Alj`d#Vh6`~uEl1XEHL8%%X-_;UL=1af?Kpil)paFk;)V>U;57#kdC;P%|Q`17W8 zHCDh0u@Hr9WIDP`By9WZGcb{V=McyIBh=Eh4YgLoaNqO=;Z5emsyknSLGMB%Qk`-~ z3-O9usAlGUAq8dw9C|JQ{*OHqu<`{})rFF_Asms1F;L~-K?%}`CNI&X_FnO{sXDa3(TXlFw$8*1S2BMP>;(Uk*9ahP zhq7Z3Z$!$%dExliF&qJ>chn$MfA53maa6la*mn$Ezx3|Ef#5$ff$u-_fSV_0)El7a zo#+#>Y5JT4MC&kH3jF_r&3+f~vHVUc+yw%fvwyq;KY$wmpd57gnBG=4EoI_hpvn^u^f;tU`U*22_oWI?zw6l&vyzuh#gk=9qTU3-lVACdYI+`u#5D5+-|$>H;$NjL%Wr7=9fK zT<2WawOGz;J!!~_4`J>Gp(V?=hjG|_b(qte1)QdkFkzqPAeMl+UaGxDWPEkiEpk$} zM)2&LsNU;O_96|`WH#!nXP36uHoCEJ?wt?8Er7`6gYrNF1ReGS1kh8W0D?Tgz!Q** z(E9-Z^&U%lO742tl;p*oqQ6j!W%dg2#5-_Aq<=9`pIjE0{DXpp6%&z`)-IM2!A1Xt zzIC{N2dTegd>#Mx1ZiIDcC@kFJS|}10C5`;?(qqnC2(9n26_Yb#sBx<42>pFrXWMg zvrCGV@&V065=?cNCpu4ef5Abo#uVJfexOR0nOOI$V7BY>2vRO|wA6l?=8+U*uRyN! zJKvt9y^bu4lV>k`w7*hxRM<^|OPOiq7~(b8Zz~I`mbb>o6r@9u_6;Ogxp{vcr1d6Q ztLiY+`>?XPp136?N39K+1h`E~yIE%|($U}sgLmZYNPg3??>V;n*in4q<1XZ~a3>=N zZ`KSG#?Lc;S#*~Jp05Hf?LRo9ZpBc$W^nh(1@dIC8V=IqZqe8rbYoV0 z2Yd^`oR<^6Y=E_X-d=9@1KTg#0UyB648fx>Z*kYn?RzdtWc z-^;>5IUFM5DD#e!Qz?>DKsk1-Ga_t~|M)9Ra5+XTR>*5~Q#ZvNxLipSmiIdp-#&UI&=Unq^F#Nhoed)PJJDFAE`j`fE^a#IE!pCQP_mIGJzmw$y`Q-K~UpMZ@_O| zarPV%iP4oFb1X4~_wZexfv4B5ua8(WVDfDpN<#;=@W=ZL28k{nY!6rB*1|w6jNo$W zz757nn7x1)vj+)3l0Ppr-eRDGyo`;=y&(aC?8MF8ixB>x(jluZ$Hs_)@KahZPgC_8 zU6)^d%CChOeJ!&-EW%+U*IF3wlo2ad6go$YHo}@jv-?KWTW5dsgL7|(x#XE_wuGkN z^bsgU1uAP*Uzy|OJF2R7JAy=;H4Up2L)1H8Ri9}`EaVwTF?YXoq4r^*b$*jm$W+=@ ziz0(;6_xId_lc^7j*MBmuT-EHB_d4NxP1fgrh!B7M?u*g&T-vKiB3tvP9fJ!g*hgk? zGK0)-M3}-XI$V^~hGrO3u4PDb``4k(YMa#Vg2w;-mF}45*sMO%ur!|%EtR1zLke2@|dd_Tb=KY z+ghWDbmzP5*8q{X2h7$GyCX&-OUTS)9M2KB$Swaiq_ zV(%h1*Ti>ixRlWXj)OKX&P~and%F;=4k86gjiabWMhxaP=cnQ0RZk9YnYs5iyA_Z7 z!~#||6zHwhv)9mBduRMYJ(PPkNb9*bSnoiP+r{R14!ePWOj@Zuou;b`>l;TxOMcUq z&K`L}4ito11i3%ax}_*SQWK^ncEO@!cf=L2(H%HU_Ib(Q6l=ce*V|c?yN4UM)tiEQ z&B4Bby%bD_s#T9xYM`I?|HIa}xRY2|SLW@ekX&(S{rOHb3OLLHH}6>h*b86)FS`Ul zy$Cu2(Bki7z}0-3*2X86&*MMr44(7=^xo&e{{{vU1z!R3Bg2cD zPc9$Cf07i;Uck4v7tsAgtT+EL{%|M;KHUK;*Al>m!o%q$59&|eoz!h$GavM{{r>6U z#gcWzcMz${kTC7SO|ZA5XZmCP=0%N5yEcAp=TXl>@ppOhc1^THtISi**y=8L?1R?Y zB_`mSJwPmeUOpXqOniyAa?FBl@`$TRxXXr@Dbb^=jgZpsMVYv%1**+Z?e( zebf1{MaITYpVOP3wC5)(3%9l{-KKTwKOtlWvDvB#*aBtziGtt&WQUBi{)Mvd!0`2P zSebU|xtTV$hTcbC#mI?=+ws`)y&c&0fwQPr(^IL z!86fn4el?486|0@&zn3Qrmfa7UE#Gphe3xZofL!>B|SZrL#>|gClM8k3lU^Jr=zFggS!s4eLRiH-Heph1yY(g=KqDKf)J-u%f_$=lps9ZxQb*KGCFmmRZ|=vgX(KQEQ$ z4jRbp^UzVAy`s_WJjP9+#g$?k`k>PRqU{untcz_5L~d2ACOyMkTSih6 z^1DdoC9l1@&YUPF(ap@b(wc4&9=r?JK((gK@FPV7%srVDn3;CVZV@v{7+g!Y zrQiGoI z?(i-Y3YCIKzB;N9jG2{cfPG~LrzMW%ux&;`)ny^RO=|jls-w8i567ftplO?sl~NoA z@~kKCx5LY^TNi#BLAMmvf(FaLaB}h|JF&uQfeoPhVv=9U@-g_yMH8y+$6W!)XgI9E`3Rq`$O?3ZCLuEljSfa z(ur`vt0k4>b;~eFaE)drzAqlVAP`(uf8y}OWpeT(`+;N0d4iy8=UHw4QS=UTEN~p< zLGes92O?EX!YoDMncJU=Z#~Uv~DAt35 zr(#wJ!kt7}68(j#D`i>xp_DpT3YzO@z6?@U$}@v0Ls;yUNtH$5I!_c)9`+NLNk zyx&k@vZEe(Q+>lZIYF6ApEYrw#|wNdHK$f>wf`{eTH4u-!8@h1Gmht;T$CJ&XkGJQ z^|)$?dvA5$=U1?-WKv}#Qa6OQy$1L)T%Q+_1iQV&udC}KsM5!AkPMA{y=(?&1od~} zp}L;xU38Z0l}oBT{!;y`B`t;9zO=Uir=VOg9+_!arUsg_(D1%qL5p4O#lo&zc||LVtZcLuIU?SXPL zorgRYY*eE}LX^&lgvJjKM6b0K4Xq~5IhJmrG%|l4abOW(OZ)Uod()auS)rC+?qafJ z3DaCun-esE+tS14+HDX(kk}I6sG1;yb7dw4Ik`DTWaY5&hwS3K^&I%1i|Y#;NV&#} zS!zM}y-%x{2GrOibzdmsC=9LUYSqTjPbrA!hD1MLTynu}>VVM~clcYpe+tJcpC9@x z-RBn?c7guXZ%UXQE%3)Ww9W)&<27eaWJ0z#kS;tikNRq`{SxV{6S~CWU)mv*ASE6( z5iCU`c6X>K5CNlet-OR*p;C7j2-$vrVoLNRtaCo#Qu8Vsr=DG&Rilf zGH8yK=CfBje)S}w;Qt^g{O9lVdZM(bvQ8r^ZQNP*9)|U#({!_}VY~z?*loafpm*{G zsrO7!=o5jBrC9~U-UcuT0Y?3K@4d3U;vfI@IQH$Jk*7CKdsP79T~Ib1<*z^2;XmZ8 z2#g3C&D1gAc>CoMsSx_Bq&Sx*EBoPO$!(+pg6I`%5q<=<({u!;04W|bbuted}I zW^WsM0ba}{GF4Ih-~+g+j&L98U(H%Kg-3LHcKX9Rh};R`r!7X{E12SN44kam>?nFA zgns2zkjp8UvrqOTb2KKi|5~E^&F?$Uv>(eq*wd<)ndT9>xFzdlIu*Pt;ptQ}IB^Yi zk=Hzx`e(UllLgz8=W3ZvGNE1Z>k?uThIfc7m9oxUdHT8E`#Ba?;OTNUF8TUlX?Q;SI}{|fWqKYRIFYcqED zW>UlGIHhf|`LuerW^t8+&V2|*Th~Qt?xF5ssS@xo4|H0gQV()eYJdv*x!+<$e)Fmn zTd7`}-JA_(@h41=+60G^A#P}fKx<)HTVb7q5Ri>K@P5UWq}}t{Nwy!}WjtixBB6i= z)Nkq_EaLB`+lrS6aAw8blUHJ)lWbPdrWkEKNN zrN>8N)2teh{xwy49%7JY$F^;Ce|>H@u$AVNeOj2Yt8nkDD!cwYzF;Ho?Q?_#)i4n& z*ADU{&d!m2(Tn5|2hR3AJ{AD>0HWvVcL^!mcR+B$EgJwBL0AJvKuuD>e!!NMZa?5n zsR@Drxr2Z_cF^Fb#Wi3#IK*nvo)r?;RP63Dg(AVz?T_;keWhI^Gx=#mT5x?tE^Y`Z zSL?f1R}l1C${55Qtx!?d=!emlvw4QGBDO4~*uw0E`)f`TTsJuq$k_BjI*4@WX%sE@ zm9FmUuWB@25Bu;~RtsOuo|0E>%v09BF0Bt({FSGe8c%|T<;Xt6HyV8Li-+-ueV=(o zJ8=`@6;!uTr`~F@j`46G-_5ad1XJv zEsi4p;oCw0?spn!()chs&#P)##9Pc-zc@^mtEry6#5FJSZc1GRt+@o9z+%~X1;;We z2R*FDha5Mxf|S~60gmd)-4>UIF^&@$MrL|Y@}wLn9g6z;M;%*yP=q;dhczuIBo+5D zHSj~{-xbYuKZ2E8cj+%ebd;6C^VTmouua{L}#Z>b>1TVi-HD>g? zIO;T5Q!I>D3mv`Q_2&sv2f`VY-jnk(-Z7(?ErSI@_c~ZJDXE~GZ~kOcFoyNz^5eIy z6)!!d9udob*}C^Bvi(*_?1yR9o;&_(+E-#G8Js?`qvnW11$;d*Ft!r-TiwoCY|v$M zGjq}Ga$|*yRdZCKwCqQyQS(0h&6rSx^siTPnBp%a{7apct1J*JLn7htx?~i3ynwD{ z1IPQBbrkxz?t{T6%s`gE^+Ghkj6E3bPfGk``V4_fq$tqB zX*I;I(>_@0eb8ukPhM3X`nIo~*Y%M>8aCW6-)sy|2DtQptwL}Pz#RRcmj{T)S86iwy!N1#L1|R zA>gc=;L6pgi+{k;xR%zdvus9&hkQ3qUJB3Apv3FWtoThHgtnjr&g40$s(?Q;5^Q6}Tq+{@ zP@g(vvQvT^Qr*AD*%P&FU#bc!8<`A2?Jt!Oe}_wtHn&j1=P=2&YDPHGcvHpsv~?-*4JUZdpcqMZOj5NaoP2f^Cl%d5&3R^m@7?UK z`_IppKacPljh;KOwi@|;k6s~NGB{{UfYHac!8rg@0BL|{0l@Yl&lC{MaIJ(C0+}ym-w%FwK6dMdBz1f6_?uKccQl)) z7Nl4?o(8#NtuSjjYo3u4j4&^6Wlr&kE9|4Kh{%y@aU^uacZP{j$PNfrGaEM$=w!>T0>- zmF??s6&cMw=R0d@MQ694*OE%qX9kJf)9vP(iaY8na8JL6I;LN20T6s~CIc!ud< zxn5ehC#>a%@+v>vx-kv!Whd_Pvq${H?qm3r1y4MDY0FElh~%B_a_=e5X+H@U=uu1K z=|!D5Dj-c_q9uHr8fr5&$ks7>{rlhuTB0-%{H2o;%Y28JGJB`0Y>+RoP7dswYfGUjjV_?{vSYmszN|l^OfTGx{QDCVj!` zlB&I6I`v3{QMLC3*nRfRm;?#JPXsm7QtK1^ii-?L4Ek?E0&OaZ#>(_BQ`Xt+_h}5v>Dv+SqPSwD?kZ*Y4%h+^-b)rZSWl zQOZ4F;jpip*oPUXV#$8~!AMintXQPD3e%+f zyr0#Ql@gG;*|((-n6#(aJgT+!$J$>|Q%SZ;L%KrD_c&TA(VC}Nmw%|@HmGBME#Rza z7|!jXUS(@Sh(fc9cGrziCuJ~QC_hn2D^m0`6f#v!c(x2)p@Rc1e;XDs7QA#OlAYm& za_-`??x6qi+gv!g?TLCR`kWeb-m9AAdT_gdi%9u1zGT)RwW}Y2&Hbl@#iQg{l%^4xf6NRfv==b?MsO3*GiFh@BZ>2{J9`qZr+!TT0UHNSiZ^83IxeVyb zd%TG>gE{$~cftXZpJ8!gPXTvG2Xsh@+eANkG5SDok;lQ)$0a1{y|{MuH;l3LK3P6# zZ3I>92yQ8p#cxP~=@9spM!YT`iF7R3Yaz9!>t^}5M6+y$min&x%i8L*DK^Sc(f+x-sQ9A8yyb@jx=Q=BenkEllL5 zmeff&R#Bbwyv|-Q-IJNv#Q4K062wt!n8aj*L3wIL%qSMJykz!>Oml}3#Pr_2m{Kz3 zuC1}!UmB8C|68K>4gO$De7n+y%CtIKP-{(yQ0;Q9S6Ky41%i}QOI<0K*(aI zbgYTJ^KD2{fHgDIJluCFa+cW;aBW~!VOuC!UcK+(KL5eeT65I7)Wkp|llwhlE-ka| zmqr!p<~LJ()W80ZwY1PSU9he{G6Uv{DJ`9%L6L{1Z5hc|h1t$K~;;KL4iF$BfPjfmaI} zeb#Ey3K(bXjiEo;L=~e(d$HPXT2CAB;^@fpq!Xdg5-}nk!?GYA^Nq0*g@75aF4C?y zy9JX@z;Uw=lpipBc_U2qu1UfRNiYdqpIam@Fg4Jw2N8Y3rHw&PL;!8%XY76GR4=bs2yIB-!Z$%>zBaFV2R1qU-r5%Z-Y1b%G4Zd+juyP@O>4%@9B4*vaU_+#1AZmHVhrINOQInHo zX4(=->As~LuoKmyzhKdi=B-q!i!Byz*|hKl@GG`rLP>}YzM|ooS*N~w2Jw5{w_FeZ zAsIPDJ%LB4ouAibUW;CO%UrQK!s&NzIa$yRUQ6XsUI?~+<;!wzyhl9V&%(W%q%y7` zvb}wc#{64KadvUUEgu0FOq)?QBD%f^dphBd&$EKvfZLS9JriEtOZ$zXKO6T9tdF4$ zkT^f*bguzExg!$r=cA0(YK)I0+ng%w-jDl_Qh8rtuT(NtC=U-3GsgrJCd-`yGD&P(1}}qz1YU}Sn8NUUVC;P3+l%gVb-hb@QV<7647ZK zv%5%YK^Kka5W?_XWCR9Db4v`-?#!R`XseEr1_B`Hw^&R(4`4~Bqi%3wROIb0b$CO! zg$cX74)kQIq!2aWlYCG6Q)!&R;X90%Z}vXopuj174yIi3-*|Vq#8_P6Sq0op-~PDx zbYUjAra^l*Qn>8Wr@x&T>l2$nLcpLMo3DCCik_e*T&H953b@Hyd0TJ#OcNt9EbgWc z=8kG=c$vQV++dS}5x(h@dQ3B|*&|&AN^om5aE%UM`zY_MW!S!w5z#Fn4!hPSOi>s+ zv3?Uv^%9I%)WNv3uX+|Q^0#yFCVE_zU`wzInrxMD>%oXxHZ9TD>&L7g;CoqFqJPAwHs8m|PRSgMXN$jgc+njd672ZfN7L z5bcy5yG#J~pJn{^W26zJU-|HAUr86oL>DI(qKp>m_cteOFNy`!G}3WuNOh;_$Z6@* zc#flQpMQE4J;G^E-*TF$FCEgbmoL7{3>~9v95w(Vp(rgdy`nXl$kO9~|J#J#JABTz zmWCm<5AIcX)g_o@8@#buO1qZ%;6g+8#Lv1DSIz1P1l6>>R=2ZX6^O-f zytJrx8gixf=?|>T8D>^#-`!^<`B1Uyekeb`@!X8D6j0jj}0#VjiZW}t2t@k z%vAj*Gar(r$!>=9$OBKtQ)wNOXNHMV9KQW$z88mia!Rgzmhn27aVI{)Uh$6C*AeT~ z-SxHjG*|+}dRZ9=eJI+syEOlIa}~N%#=tCoC0_@ss3xR!WYsxs)Z{frWW{QSmLk@# z=}c*i__ssQnMljTDQk#_K>M=>k%#EtDF3gtmt@|-Yu33k6quY{A_NTg+QKT?tHl6KYt+x5|H-)T;2BuaW6I zw;sBVAHe%Zz|YSzQrljF`R>7FAs?dUhd3tBiOj8&jDxs(SOqgG%Z+~SqF^qJChCZN zjP+P__**x)ZSL=iR^kCdg@w%uD8s{C#Sk}AMc-ag{U-a*SI}gL#iNUWuj8wJS7)2q z=p?q(G~7#v#RnKa8#GD7v>nO@fP#`VKw>Z04y64*GZAKOr=3N*^{=Z``s+hqzOhL~MF$xY^gkmLsQQCM{OR4oIKo&*SJG_Y?q??cC-M;W*TF!Rm2x5@j&jh1$_e;XT8nC?>t)w;I)*?oTl!R9lb%d zub@ikrG|boUOvwN%5p*Hkfo6ZK5HkP_soN4;?18+mdABxo!yUT?liNh+65Z5nQ3UJ ze}w?c(q4Di5KT7R^Zpuv3DL|O3mElj%0_@I*YrhRrc_?pV&PnPn4mAGqUV+&{9~Qn zol=;1o_qbx`XjI~oZs4LikQNQFyf1Nd#9mZ(uU7fT?u)#b*8bBHf)P91tP(AbPK%r z64Jk~P$E1dFqbO3rE9n+*9dQG)_zeEsiPKrgh-jrG_uuc4S9Z6EP@Q%ZVnkz(n}Bm zaz;?3?M2F!R>OD$-VF--A1o+>E z+iyQ@mWe6u-4c(g4{zi~K3+ zKKuyvU{8Pc^JTxq7D)V|d^qmpb28A;II}nCXy%G*830Mg#QCSNFnxw0T3dG6RN>pB zrB$&-MW>pf=(I$0n|{jrdd~^Td~4~~$udV@hsEhvBrTlL+ zB+v|51&V*~`CQKgo1A=QS2v?RaYIIq0o1S(SvA51#mS9>7A7<5Ftjq=8;ff+kuK|h zI>3bcWa8LUP%%UppS|>a4bT3~v}Wd?w1vIVLtCel{tIOiSO9#g`G#PmP4C$^zqPu6 zhA}1+f0+!9aK{EXH-bC67y|y*n~~A`=aWRoGM?^`n>+kt`N-c(jV&q}9sHcT*S=LO zEETezFV8DoWQiz@MOu+d(S-D_Y=Lk89!}tU3meA^CV?XvP+@vEeW-3iMXDjCn{9MK z7(Boj-g|Klxmvj-BU15nu7XzQgPYN>A5OKr=h1SV{Lkk&my9euAb`mOAI*VH3@*sF z%YGu`l>S+cWkNa7>{={Y?gy`3){5rQ(3GXA4(^oRHdxRHbm=sGK)nRu(sDyr{g>|_7;3ZiR3xkqZ|;7HFYY6XdT5xSHib$#D0`@J(*a+%z^$k zl#vJ(I^l99rO^JNg)RR)$%%K;ex&!;D2Ottp)8zSYGAz0Ju_HI2b&hv9POPl+O|t8 znwFt|`*c0)_*C^u&m?gEZ6xYfzn2`tY0`bqOAK1N)lfyM1(oN4VTf`bipoIH4r9#b^v~{g@mcw(O)JUypPdxL zZi+-s%nBQs} zP^7e9ZJ4P2Jbu3+*o_DWHnGEpVMg45yqA^7$YViGmG=pYB8OBgRG4zbyqiP ziI>8-Fg+Qsp%oAqH>XjwiH(ViXhN77QR=c%hNquyF}RURVPkxgZo`!<%l4Sl*si`u zWoE;q=57dDV@2chud}-jB%`2quCRoupI|=`PI65 zL&Q9EKDeU}&$`h(4(A*M?-T2BJbxK-j1|HX)ej--s-c9pz)I-MS)}_^OIX-7Q^E%a z%cXO&3|5J1+A;*TtlBP$eHMgpyHoPi~@o00Qhvk*Xbj{D2Nw8@UxKx0ww?;3xLfA3c`C|dWF&C-pp#h?x42qyaAlk zJx(>RMAslJZ7Pa|s%Al@(uiZ9e5kRYX0+GE^h(TW%~uW!Mx=Y)dG$Sf%iA7KqyfRU zp3~SBwf)GFVzw9>%R?2NWu*23vRNB7@_x|O*ck);T8(Mcv!k{ zgYst)E9oXk%kNZJ{VjeAt`URB;YoFnxekdaCXYFoL-*XT0#108-Lk3Jv7HQzo0}U- z?*D18Cn?O*SCe5fMT8qB9zQuZrsQB=a9K38G#Rk+Jn%;Cw*AgtI$C6qV4l24D0JZd zkR~2^NbN;#Fn&zd;@l5AGVMZTa&(4Bu$0P;{48z$WNT(&lwE*nBa zbifoZRPSk4=aY(lQQ`CzR*vo{d}&UkMtY|Ua!#@}YNbacHCp$NTCwNi_DnRp>h!bp z8a!T!Oban`a)jvNDQQRPjSERiXhDC0DGip>1~#`)G|G9u@zi$q%J%tMvt9ST$1RMd zk%?_Fc2Q>4HFAG=RgU4^)s$SGizT)VX8OAlIx92OQjT3U0NL)`vbxWj`WMOguO5o|h%T-X^a~>rC{TeVc_!+%@Xyg27&;WOY{ql`g0wyeGl}$el|d#C#DaN{g!{d=P6%w!GD4vUIP;Dh(I0yR@&G5|Lci9 z19h*!%%H)C1CDHu%YP@U|AN5aPXUpQcLpdz0HF0*^juB^uR%oK&y@fv4pQLfELi-1 zA0E2++_>*~|BtD2jLs|S+I6EwO=H`(-PpDp+vd~8wr$%^@)|~gcF08HpDUF|Becs{`fh<4)>-Zhf+x#8v{_)+h-4NE&g!~gW@lCXz0$CDR==XQBm(BX1?z2lH?s_pRkWsdjv zHT17Rkmg&@;0Gfx^})zV_Zd)L8+-;jo&W`auo$=uHn`sGgzP<5xP%-c+)6$LbqX#% zAwB}1&?EnKfL93d#loIGxuoB&Q>=JnxX7TSA*X>_RY08IEo z4Hb1Xs3wc>3bXdui5%>?^&~l5R|I`zUP%E?(C)}u$F4y7Ox>8JMg@9i5e20*b;LLl zr3DOU!89j#K^8n|5dX>H_d>ZM)c7-z_Nr^}dSY5REbRU}V$yk5bryuxQd;nO^R5fM z{9HN|8N#nhJ%V1syL&kEfSnCQKo3gt4A!c#``v?4gd10noudP8-sCC8e{-4xQE45QxsLaycN zlFKYwiw$Yb#)NWw2PTu|ba11(+5ddcdZEVROQam*nAR?KcpjIc2<U6T14%= z@e&N}gB933jA!SGJ&r8HgYW-UNSqTQ{Jd4qkZ1_qZqdlg@0 z?L1N7CYa5j*%=$vgwje-{ncZs8yMvZ4TNBM>@@4A7lh(> z+J(@Xk!k}*%$sJ=T~&lUM)QU?{N+r+z6NfYdk5NUVl*1&+et7V!x^3lI)LogPbv)B~O-~$01 z^D+IBEzQQ!Y~a~nLCD2`;R}Fq*z-w6$kIXj^mb}r?7V?wfc_J}y`S-&P+pYhxx^iO z7tvwtB?maf=_vpVKS1sxehJ@W;+ajviAj5)f9=l{*|iX>UL}cTMyBfOxn6e5b2j$J z7L2flze>v=?}CBntI*77d!)97)^;i1{VIj{!ClbgGqRCO(O4*JlUtAXBUMhc2c@Q< zc7oe2Iy|piaXBhQKLsfPltyrC0+%lt-;w4>kI0^)!5;yh${X6n&KR!Huf$XD<8@-w zhTA>DaTPj>O?gK(*9XpC*y0=vYRzUetEWE5>%_Trgc`7?&JALKmOJ-M=R=dOS1)%7 zJ*&~52r7|MO_IWCNp(i1Y87r=OTuY$Ij$>7(r%@j%ikwe!^zULny#$6+fuVbWIJ@) zm#d=OflisPO?N%*D_nZOz7RgDN0dnuL0zx+neiFPNT$U2RZj?}Z?X-V;RnqNwtkWDXSkrZ$~;u9iA^$8SuMB5bhCXeo!tD17kc z*>r)}JL=616S{vEBkpApFr1d-r=n&g|A9|rSjH#O680+11|CYchaos_T-f(y zDV{4*u*)Ce<_ny&dHlD%kHyy<^5ElO$$L&kDKXocIzivDiZTQ#_L|!B`H)oY>GzGT zis`s!_+odTQ+qtdJ`8%cfm?-l!~?*a^2kh$-*i@^9}-5byAQAA55?Zjw=buiR$!GH z9w^VCP5e0Z0_J-z7MmZN(8k0(ZlPM`*Ed9dMCQf+YUq&qHQU8muZ{!AvENLWvNvDX`9erX1SyacPE$u8KOL@f-9ERa=}628dV1 znP1_Or;NJeN=kZ_=EelSMyNR9x}px~;J_Ss6_gcDbV^(gj=#B~xcJ}lM925@1iD&$ z84o0_-10R^DHL5^_UuI2srP0~7hH4s$R??E-D5$$yGRdZ?|mwus@ciQ{3Zs4%9gs0 zHmNGvZftR+%a>WSEff5pb!ghJxYp{mW7FBZAZme1`ha?}Tx2ZL(vViwOu^ZqDp5T% zt`sfwXdC1pblqumo;6Y;e~N%ggEjo*l=|0n<=4z>QebCBXh`d8zbY4LSp@hJp%H;Z zu&BIUz2F};vFl2=eO4sIfnNt?|B$}QV4a{iO}D#-%G(WCghFM9H`zR!h6zl6FQ}b{ z5i$LZW(6YvL5jkU%yg!`@ti}S7m+<)0$ znW#gMr7xW#@Uy30n+WnXECgn`YA=D_JHWOZcsU085N|&TUu_Y$fcp!O^9ZW_JfLi+ ze6+-KP=EaQ5YxK<2DID@Rs!yv1KMM9t>S~q_hTpQJzU^APVjVCG9;vku9j4lK1fUr zDJaS#&s(0TCm{UnUqK_cCN8*$+TK#QbJDf)+Sd>{a0$wAi==7Y3>s2AZSX35#-i6>4le12x(k_#*6swH6KYrbl34s%adPMk=%BYF;@%HyVFV`2NbRmq5DF#DF6Xj>)$Etq2JTn_Zno=pOKW`9z z3KvqY;%J*DWRZ7hfsn;K$L@Q8jy|S$;1OKUfvWVoAN)v7VC24b4tF*OS)$eo`79n6 zJ+(EP@(9Gb6xZkQX30uxEI?70>?Cz8d4+psrrMt}?(q(~Jc@hgT8(MByL2vx z?};rb(nIL?oTwHl`Q@8wy3`k5w1Y#YZIwM9kV2wRN7buQ{oBf(*TLPTI~q2Bkb_vS zIzga@*xDFXSPp#KC%;Q;LH=ET{`otaTkqzb2?F?A*sWWS-JUKA)Wf#;HWOG z8$K!9DQURyI;M#Mv3{ox33~T;9Pw9FrPf=`Jv4}%HKFvi2!H?9onLbGgK_%w&lj4l zUrhwO5@->>Te&B>a=xS}8*h~`YC)yD35p~ry`~PcwSZcsgQ3*p;svOcni5%(U&d!e zvggxqMK#-{DsR*l<+@`>us(*HI!iE75ecm}I@k`OzpHbAhnn10`Fp-+v!=uZ26LUe zR}5x*S6X%kDqc=Qjo)2}$6M_AE{6pkgHk_wx&Y8d&h6&fXXWTg>EHr3Pk`IL9U$vU z2I!F$WPASKD)K`3IK}Qq3xs@LZ43k5I-zRNZTArpdTW!dgbxs1(#=pfD;-fsnITU- ziR*G}TAyHWT@tyZRxaoa9V!laPUl!}b`WM;1v{-##|3N5lIHn~22N=YkAta>&WsD9 z@_G4f{wdHPb!*Y!5Gk(ThUaa%ILcDy{@v^*!qPfA25oZZUg)6gvH0(_)_ofuCzA1+ z?1ZTve!3xIlH8ngP2kDm^54-ei0R`<7)A;47Fm-Kx9)Sh4N06#Db)tVPx42ft^=us z2(TY#X9tc|+~?HI zG_>KgXGf`b)9G-7+f8J1H7bTRgi6&__q#gUt+Q)fKIaV@f9(5-U!3o%D zZ!M^U8SDzZ#YIc;&XvCft3=mqu{FU9yQ>V)Nm3y35|P94xQgE~t_fi>{7&gViqMl* z{-}!bs6Y`(C;yGv6kG;_+^c`!QGgJ(BnX*T<;o6SM#6XJ*5Pcc(+it)jW_Q@BoiEU z7@ITo?JOlgiT_PTlcex2}h81GdPEpElOmIOMH+<@x~!^E7l zW+fpAUGqPFm-S>>QhBrFrvpOvffg;`ee+ELi350jbizCYc;5w=-Y)U4X#v8+^^j-K z2dC56B>*b;0IWYgk1iliDhgrj*QYU)fYUnVS2~igVP9QOIEnOuOU|F3ln~dr3^SGg z%+%ylo<(Dn+gA#~ey?TF0xK6m+Tpa($ioaFuAI z=I~E-;&pL=W|Z@#$!{4AN4*JMnA1{b1FYSl<{OWZmBsD>d_RfRJlg_mx?xB{d3!@D z?1q`hu|?U~4~Utz!sTr$#~WiL@kJYP9~K|&p03<9oe0oF{D~G8_v;3;6ab~*WwNhq zre;68q^gspHXLSDb;IQDUtEPdv+p1(%j;RnHqA5>uFH%yf=6A;XGjHGMYCvtM!cU_ zaVuTeW3z)e6XXfo1V6TG4TdWaq1g@?UedMZTXhJ?OG^d>O1euU(CdkmxKK&L%!axw zB9SPA&l1CZcVul;R-Roap^}7~{L=TOyC!A|vsEjiw<-Jf72Le0VTv1Q0AAZ4igJ0} z@aeD5tWP>>dRzp_(8{p0iQEQ*1@AM=EIr?1I}1QmWiZ|HshiYBIBQh{GnvawS`YLI z8ZE!4uE^Ye-}#7ohju*u5c#z$Zvm7m38t>v8e;BTFL$AZNY?mk5n#{AwFLXJdL)Mx zK09yP-dSRng_qfmMcMmptR3m_)&D##XhonkxfUt8Ua8~^N=o4ac6Saz;4d*KkwdPL zb;SIQ@TlL$hK>0AWLIXgIUx)*W&{WszSRm%Svnn`M8P!)5^)JV@Qqq}tqUoYm2lVi zGG%$j2<5)={XvQEWeSlmk+qp>*r&Ct-{<@H-^%$EiCoLPAT)9_w-6eHw-5%3TyW~QNZ1RGZ;J% zG(qDS`P2P9%JBh`YUTW zvo__NBbg#t3<91bJ;&;4G*Sk1bdI|vk#S|FLYh0YnZese}w{d~9^%ntz8I zP)1lqvn6-Kr!gN{JBH)_%GqXWX}2IK<UXg3r_$o5z2@Ih2Bn0JaCVNIXoGL$Hr4#I-3BdgDe(IRKzZNX zd?!@O1s)$ijTi=nADEC}pR42JAK=GLT%z#_(?S1oB$x}NXaZqx<{zZFpY$H85uj%l z0IC77J3{ui8IV_vZ{nKmubxO5eSq=$6$h9EUN7Gef4(D1$5u3xo2%R+pB-5r4clMm zTL$?4yTf5~q$2i!_w7Gw8C574Gm>O~HGYdF=VHR1v89;{h{kTsF-Hy|BfL^v#bTO4 z>~7g24o$f$4jMztx;zjl8F1U`rdiDB&MP^xFZC6&Rp@gYB+ghy5A-DAM)Xe)X(%C1 zU2th;X4JLY!Acz?ijH@a7*^Y?#`mn*jlb`^UzuI$5&7v+oXfiHCr!$;i6%2yyZTBG zFJ3pF_=r^Ws(d#-utY3MH#I3+;0q2duEBlL?1J#3LvamPAA{I5n0c5)pD;sJ+m$qxCX_!kp0Wl&y!#C?>;Vw3AVqMNuXKr7 zx22suT2|`)Jd*`VKRC*qu0!e`rqBb1V**ox-sx11ZqL4#Ewb*1LlgLwl8i4Lwln%u zbnOy^urqXFIDPy=trGc%h*{vpF5Ce#H-n;uj6%%fw?$N8{92$9bm!qWIp4x&cehB| zU*DOX%D4L)D(~lnn_3a)?%eH9HdKq)R09yWMuq-;)n5}oV@mK?AtIHY`RCz}%s0RC z-}%8DSvCY5#;nkN{hH&`zG-DYkxLa1kgY1@DDdW+Je(X{~f(#mZaHG!arz(jT2`_c*utuI1 z58;X%UbsRB)q~TdrwMb-Q`V(2C zJn)a3D%6N>_pJ@Z;iz>*23So|F|w(zJEL1jjM4_a*vSfD-1DHkJ?P_icq1ZnTWDEjpYWubDc*@K_l~Uhx^;SLaf_$!IWd?XAKWc?+ma>Ct8UgrS^0vbyCx zegc>ZAfMvj1iKZA+PadYB@(JqYxWUsqTo8>2c&tRe>#W0{+#4c;XPJk`MO?iq!cj@ zMH*&>I}txqRM7FLKR8Vb|ceZrs}|A*muwG~y)(cxeJmzAJE8-e3oNOnJA zG2)Oly$gP~seD%TrXng|Gu9Q0?dRlDlGNw^1LwPtlcr=@NDwMY|*S8m#KCO>DL8LlzHnI4ji z9;RSI4#VDYv7Z_p>bJSEZNy9`i=ex%QSwn$UA~6ip4X&xKcjgIYU*fg)idi4xOEy6 z8`U&H#C*G0TUGZBDAu|9RTwyELiUGgd8edY@Neo&XXfjkNBZ+uLTIvoq-`-63}^et zVqc#=Q(^950I27%2L|F6^r8RhD}a9n{dpn$cQp*)R%fxao=ASF{_+C=d+!-|QP_HX zG(ejMdj4a+6MjJ)|8xw1?s|GRo?q1@z#X7;-9D#E>|Z{`;^a4>djl#^cdCP&LSqXo zuGPiKN`@mE`&-p=DIr8%wj1y)tZ*|qU>i{9>+ngR>A5p$l)ieAvm}>1Ir!Jsd?@WW zJJtxV`g$u4!QlgnF3{-r%y{W4GzTK@t%9<~@2nAftt38K_gv#i+{HDKQ)qppl%g!- z3(eWxZ^;f{!@B)G+d{E1TyP1j?aP(5y~wl>DzxQIDV@UL=KsD@2Tt65@e#GO%idLsj<%JG-bi)W;2NR4WaGZfwo8~Yfy)o z#W%hq_E=(|1Uvy^NY^zs3Zp?np3)>S{9rM;*2L2NxvcWr^fo+9gq4E(D{s(z!IkjwD%y2@0yJ@HoCe9_-2dxsuCO>$*(7&!J z296yy8DJFUrQO}?6uQ)r0qJ1m@lTN%daBi=5+zKpbS)eW?e+nT4Ls*$JBq_rV&&Jw zQ8ScQb4R^+x@jGY@_^PE=LNfKw8hiPvpE4&Vle2Ax6*T)Xo}tb$m@u#Hqf#ouV96} z|J`iQnsZ+!#tWYxD`G;RvZ)49*_xE<`6w4LZ^Q9TVV-;nX!`+t=0udjN-#>uf6vnL z+t7&jI)l-A5qlX;z3bTb>T1Zj?gEoud(iZK zx48{sKZbE#zEJ48bX*bBo;m3jgp}g0akbyKKXJ+Wz1XSaO))*T@bnGud>6vRGJnR1 zXg1T2I)vsM$8@j;3*JWoQUP<9bDw)!GL0athPeP;e?D#OD4CYgO&F`JBfkbc)gn#ao}_$-6U; zhaJDLY8_Agy@PMZc{&_tHl1g=pMgtyQgLJBjkY<*TUc?ojJg ztU$?#Uq}{B>%yr-X7L^=PaU%xYUNXq|_beMWHbS$`n2u^>IDnz98B4LZ zvKyVVy)@Z2X5RHZ;NGR$Za9DoBMjUO z0&1ALqB}QpK!ZWX^V71T3oi@*#(3_#_-&+rEQ6v)U`>0`NaP>iBm4HQY@e4QxrWWw zU3ap9c?*>_1THmC-Dt)$(T(Inf~%e9;@sMp_;CE2uPfvn938LE;G1y=FNOQ{#Fw|e zaz#Z)`7W0sp&((*x;?BKgXVZ^HGK~>Won#V#|`X^*uPm&ckLc;jB!SE+jLCGsecL> zpUbk~=_yUe6bO5dt-bZg_m90|thY!Q4zF20l#@)hW}l1mxLdO$zwM4ymLT)G@hAKO zpr1e~9v`=t?*>Fafz8eTn46FR1x1)?P~zwALz@QJxX;IZ(FOZ&J|pb)6JP?oW1@Zr zG;01MKy`mXJOd=CL3e^zKa0-GKS^!i{|#ucybAVAfxdi3J{JGyX7#Dn$qyBLi4hcS zasB`Pcb_wT`E0-QAE4W(%?Gm__L+{$370}X0YHTA&D#`w4nPlC2}r;l^#skJ-Y9KM-N8^Fm!Nh1ppt z_OyDGZ>gcJW_b-5Uv}nffA4^*7K^~>dYpudsLdB;L>3Z-B^hOy@eb4HM9Hs4$rsZh zCnnLfFZNee~%f+l^*Y__hYHZ_PEq^ zlqw3XPeD=&++TL)>RZ|uYMN#oeyFp^KYBISFQ|Ou$>-Z=xx$96h|xGQdm3)38LfR+ zmgBS1#}wYC41$wY!NB}C3>#-rrcJ1nt+{@$s#s>=ux^ogwd_)- z?@sH_sw?6c-8a!e;gss6sC^18t!!mqN+)_DNEMb<&E1MfzDbW!V#FI2-GVdSTc@cB za%TUbvGkzl@2f;-#LVY%IVTomCL2+sVzDuASKZ5M^3!5#&{DF!9j1fROrxG?rXY`Kp^MPHL_bRZp3hz;nPeAL7#yZp z$jHVC>z^$OlNR$1?hwHY{*}SSBzJ6nn=f2T$ignM{mCUj?Cn#o)cYP>%HE5lzsxiJ zif8J6g%Vve`K*>lu`l4ebAbuo?LTTI8uv8ugo?PvCsY*E{wsY(3+A5o=f;;+`T6;$ zOwC6l4BX|xU?0i_Im&VDX#T4%QDK^WCEr)PW)vEECWWf{9a0)_9~~ImyNEyX&f6Jz z1WKI{P?h_x4@ekIRU?SDBHQFk#E;M}w&w={GZ4>B>*yVL*@Tg06pY7$4|9a#YWM{IG~qjv6(u1SEwn6Uj4JMW0}K(r zDS6M7x)Ygd+v={ly$XWnNVColNuA<`sMuyAk1r>3cL%2r_0vO`=Xme;^nEa!@(8Tk zCHk$9UUXBt6k;_2{>Q-;ODylV9WQwaqaGM+EC{0ej4Bl^ZEIw0EiLu zli6GbMC85$Mo+#UjH_jhU1v$TZIF}Pz~`#LJO?oS!*G%44EeFlTT41P+}9q0FaOxO zWk-9~wA!KvVtQ>-=yF+s?F?W*s#9XBJ_J89_{fEx;!ljHA2YzPMbFq!O(6=WZ%l_I zB9jfJHWBz%uo4JCMPg-HfWQg(t`g>1mibK1i<*<@yDl5-8whO137@^7U9#%r97tF= z#s~v%);KpFS9(x4uL=~Xe0DA%&L^R7r3tG0yW#T$O=L0kGAvmW`&KQD?JaU@$78vfofV5Iq41p|?}(Bevq zzn{~%H)?ZLZ1X5WtzQ36bCo*w2K&~}cR5F@muuD?%iRl;NAz;EU2glTzL%$bwxO*~ zEHRiunn{;`U|?--L$L9&*)0M;(SllOj$?u3PaF1%N%#Y(5y&}wJ7fh!K>rzXAYTKh0H*jOp#RDF z26?aPJk)FWAE0m@eh6+Yvrc~ikIHv`UfAJ>{(397e-{qu2ZYB7p+`37EkfnS9B{)9 z`OH73HHD(WnS(!4ehrR7vZkzQ6YG}XLB-}z}UogYJ7Dt#e32o00| zqKf_1wg1^E>f7}0CHO4vcaUb)(aoH0669gwtJoZBXJf*NnqBkx6xYjwd@vZETFB;) zokbff@KYbWAP5VEjb1YvGsIzN=EaMpDYQm!{1IuR!y`fcBWA(UjC|57 z@3=9`>s#ap<&N;w$3+{RfK<2zD(Qp;6Lxz^QPw%v(ZOo{LUx@I8#}?}iD@D4BaGcI zZrgNQqQ`AJdJ}N=5jKMNa}M=X#U_cg@NEtvrY~u0jEI z(D69p#MbKXzS8ZNeI$?QWhy*G7X)9jncSZYAjjlc3_!l)0u7(U2h$l4!=EE)kPyAkc3GKNj|i2(HJl4RPdEG zT=yjPw7)0^2rxN1EW+l{-bsgCVfo>H8-Negm|5_#qun2u5}S3E(c?`DhiB)jFIa1( zHO5CFjmOkOPTr>e69b5Lprsd zrY0}3hjIxQ_YV%$v~~3wWbm(k>+80ZH;s+SeBUb7I_&DDqXRp%VS! zRH7x}s0qx7np;9t@jPKYUl zN52NcYhi)=`p=OWt}-0WH4kEAPQ_`~wL~m^@)O~&fU%nDnEY#{DG%>IQ=1Y%N}W-$ zc%>?vJlw&m>XOBo9e-^24d-?+=;Zv>On^2SQXLArAD&OzY?)NB#RijkOKvIw#H7d& zt(sLMrzEpUMx7588Z*_+E-wkiYJFC3FNp_lSh{b3*fz}0iLMD7aCXaa(<%M^foX}; zVy1=w`>T+hUJf~l$r{hWfxIc1D;WOlLtsH;x`)ctwlhB z$I4dzosnlgCkx0h$2H~7LgPDL&9!{Q+b9cy3p(lc$C|G0z|0OKn2@LJw8e{Y(#7v8 z-wsq%9T^G~;cc@a*`gOA`&%SOgC^U zymUCH|8!DmW=mX8!*y%zhuV@3=;z^7y&?e{!w_nuC($v=#V>Dg60$Gd%i5B)deWI# zX=NHZ6Cr_`q5FRrScB%+Z?}cDPSlviZ`2CjTK~aRp|lt(Lvx!mh^LBq<{SFYbH26> zVj-o)fjk8rF&ijyB_GvU-yv-s_)=#N|#YR5>dc)AF$suX`4#^}Jt6dPZZr)~|HvnFsoCc7e`npnJp1+Q z8_T}#d}CP$KVLQ&Oc(VXf_l1RC3kL>Jcl1(A{kQRf6Izk3ib-D{%I=|L>(+oN4gc; zlDTnR`i@Z5`0=xpt5r=}A|jgrykqAewTn(G1T{F5X1ys?!1BlE=4VZXE3!zR%akj1 z)LgF|LgHDsSF*aJR&Rh83_{R$6WnjitjtH3)wImEN&A&DNGc=v8R%-G%!8>!L2U8?%j@aX`s+E-YQ^(Pm6PvlNO{Wv1|Dqn zwa6X{EKKW68onxgv!ib!X+ECAw@6!4=Nl*`l3m?u;iW^A9@<2f4z z(R{eGUsEmt7kLC61|49M^>%+0ZPu&gVj4@zhEC2bOs&?JaZzJN3&OVw#QwW#96!dQ zSOJLqcc5mUWCxJA2i?63{-@S(K5%e!{`W{T{VM1M2o66pn!g*M9DRHeOd~>po@M~Z z1|UQ~yGdAnm%z@(D?&;a3R}t_?kV7T3hiU@1a$OFhzToj!yaR17+3#Kwz;g-a%dF+ z;1|1*6ca;fbIQ_a288ope{vPXYE8CZM?|4tlH}E zfgia7E$Y|C@*VrvjhBH@Q#C0AK4eMtHWyg89ERo(UiM+8EbQ~Yq#Tx7O|oBVIeyP{ zzhm$>?H)}wJj=^F5pSU05E)%BTz11dXIY=DAYK?-hPw=m;kf)1Yxg|nIIv0H>QcYK z&zFDHC||p`vc+|$OjFUYA&mqoX9yC1zoQXjJZOs8XhmF5lSWk8ic&?d1WbO}?aj)4 zoK-bA(wjH&3hGTJ7;f0mNyMG+8a)q&Wj@|3p7Q*=RMg=lz}Xno81(KT25^;oqt8?D zyhgUc7P3BkP?tW8*8dLrJ++uQ%5SC={VQ8Mx@h34IwUPBs z+3TE)pyPV>0c-2gStN~IMXh^M!gP=YOSD?D&MgNbr_FY=^o3#nVY6?2b!|lmn{4?< zytagYTqPkDzj4VD?0bey(~ak?UDD5jB`vUxzT&_uyz>|TS1m^eJ4mS)ML)AHhi*q~ zim2ZCA&JDOU>=_E@#PdxXcz#0dK?CNrU3AFApp_qV^a)8 zec1#uwzUfSc(;*S^>y@ai_MEza4YQhNdXt$-ztg=uD~j9^V#ZjVHiqp>6P(#jC3ZNBAN0vX65vIV79K%&vzOGbJQ+nAEC1YcK`55$kT_Pr#7AC z&yUzSY(&bOuPsfd1pMyZ9k+l@wL%Mkf=Z={XZw?4iip;u8&e|XIPaF@>P*Lw^HvtF>p>WHy;e=TUEljKSCL1!(NL_DI& z{L=??d-SVbjg23gvOJ`uMj$+O{VmP*Yfnu;+zPY%{Rt=C=h!E6#W&hC?2hJU3vE2w zQMlPm7jgC)HNRH95=UMxdVk-Ktn-M?;zZ-emLr+R%&&NE=Z1Y z1T!0B#+zU#tZU+!(4T5HFdDqdOFzWwk$IC4Q^+(^S&t;^2|w|_zkyA0PcrXj9^Sq{ z3oLRkwp~8HI_oX^Zl**ijh!YEmY_BHrY|#Qp1g#FX8!!ZyaHPH=7sc|`gaKu@oUZ( z-hWL`rOF4!7xR;pp3P%Xi}eYLKvBDc?nlMsV%(abNG3*4k>PS-6-(~z2?vd8Bm({X z>%BU^cCE9-&qqo$z*TK7J>a!T7IZQ=lh#4gKjOHxRx-~k*-w%qCth!SAK5N9i4e06 zmZCB94nbKJUZz7W$s@cNMY~%A;jfm}tlm|E(@?y8`B0y}Kb}Bwt@@ko1GQ3RF_W)W z3^Ds~C{={4`u;jwDS@UGH6DyuB!aDzOp4|`d@f14RO*ka2RE%f-}7U3YLCXBa+WiZ zDx5TmY(PV5opat9Ufnnq+d0P*&#W1vQtTHDI2^@-pZ3=~ee+P;@xbqp?W?|$r& z{ShZ9i*uyd^obV9;W$549a}dF_P{`KVeIv^YC6e@q4#|Xv1Z~&{71gi>AY}=OP%3(n3)y=BSigVZkb8db5n-4C8kv=XqcSu zT?;2=!n}3@KR0)y&Cm@uv&fGQdLG%sjt+m?`U~pqWhlw0s)4qZ{qE7n;_4@+$iU22 z49bV}h03gYhr)SxdlGx&V&I?VvhUEyx}^4#XQab*Nv;10GnqO;wi+ZYiu z1&wfV$JG=~BrJ#a^(~T~#!=HgD?8A~m|VQ-EwGsJ zyvo?bMwjNEujD$~*a4RY1|Zdmzc(`n_*7L)-;cK^OeNILNY@hih6Gd{cKcS?`l|QJ zd+#mrrR_G!jA)I__kHImSkW1!xq?yjEwo7@(R9Bd`ajO<{5~Fo*Pm3q2dNs9;2>u( zOt>_W{L{uAuxefFav6OA?K~51eGsnwNx(WYthxcR>@YTG+7o=&JU_7EFw5BcR5zv7iK|(Sg;%`ftx}I=k3%@q;N4web?4!P@Y4 zJoM_&xz)?kzcRY;Nm_o z_#TbUOY$3%F^KxSkHT398)T5zhG%B&y@v+v+d?#(Jt8<@p4UY|L|?*3xJp^w;3G4zpXX82C%a1Ffz%Wr)))6QMV z<`deO6F>2$z6P7TdxwKZ_d(Kp+x88aK!k|oVp9Q9r%W)XykN8Z%hNJ`jOMzL2dr@n zHLv7`ERg@ zNrv9XPqV)zCBns|{ELsOKzmE3SB(nzl$DRj@31ecL2{<71R!qp0J<>~>RE>&w|Wg? z$W2v0%3r$5-phHyeAkZJ_0T`rw1PvYgf#l6=ck4?SsH86*3@F%4PEyvkY`5QqFr%< ze|{Gb$tun~8fz9zXCd*gL+$jcCoAdvP^kPg*9f)}SFx98s7>A)(m&VmrX@|Z>G_4n zgj}RmB-_JY5NWoW#Yx_BDmLHtGs@9E_I*&hnZ!#$}s*3 z_0O6ryS$NN|G04qSTvoU9;VjoiJw;Ywc@^%pC4pGeqh>R>bmR^YkD|yb;Et2!aOb* zCo8DB@OyEP$;n+T)x6KxJj)AZOB*Q&8+f}D6>D8IynVQIp=zaB^(o80$ zuZ(Nc0k5(9jmhBdkpUEx$?WAwYu6CZO$J0Zx6M1rH~nGHe(Fdl;x`W6yU+9sV+jQx ziU1!2^LFlityj&Xd|hJb7*&7Vy}H8PUy8OV<5%2)cPvys-Vy1q%GQH2Vvg^oB8lO{ zh~Lsv`wDR3`&R1?*j7$Wm`#G}64w>>GqDO~P8DZmR+xCYVOKnVD+r#pvsAZm%Oi!I zKjO?yW!CtX6+h629+iuFo}tSP>-Ip&c}J#`L6OOBD>~&Jc&^%6CL|bth_;S0fxKm? z?K81Tw4*$yKAzW8wzm``UgdC#XL{MkYh3<~Fm-1sD;KiLduQLAVtP+eSFn~*$I6h#SwNqx%7)G;})PLER6YmPAR;$m;364Bf; zSG-wFzbTa1Sqn{n_UxYZXhd@_x7Xvbg-e`DLoWqBJrQ%OKriT3kSP~%|KH8$(*Y38 zRSlRv5Y=q`zr9BTNc#0*4q$$YMz}h*NC3(c;4`v_8+~u!PP75L8TV!D_uM^@PniQi zpMbaw12aSI!~Xdev;3_PA7c{*Js0 ztzLdHo&Fj-(v*2U>?$%g#JH%a^jlQFpIERBkHThewv?z~)ZS}CY2_dYE z4@Jtk)Y{~9l3R#LdqSq|hAQP3tDBWowl5 zHW2Gi6ZUv^stdbe*VZIa*!^Vzru1@ELh?eVM(d_&clNx-=fn_HR)2!H+sO7opEQFV z%PfNq$0ZS?=&tM>VEQpHtZD*yqmS8r9Gr9h3l@5mro^;6Okh-=LV8%Kh-Ir$^oj9; zuD8#1s$LT@ou$--z8!RHGN>9HlIE5sz{C1Rl6hJR!{Jvm9q;G#=X=KF@XL(MLQ$~> z;>&gWAg=G43SL63r>dyM3jzBIbt0HTN(FH-no~EF{@rc1>qwG?8broLyz-`casJ$e zU~ILI7+vr9flyU{IMhU^u->gil7l!7(C$4&ad+9n3q&i6hkxer?337-P?;{gf z#>sPYa*bF1_Uphx6rK2UOr`7xDSP*33{I^l8;{t7xi^UPMP_=(K!(!pYSl}{OBDm; z*ueP^USIU{g{walq*8{v3_@jx9JE56`}&ral|~zoIeP>zcA=?T(OesJ+G%{Mz|ye~3AgAHDec zbXQ*$cq@nkg}wvf%@b_Sr4(Mz`SA0@P{q(T^xGnM-NfA{uwW80FyXmvnPB;J;1`tQ z`ElwXlsA{t-Bi-&$+{{8;gVp4oI?iV@(^LAm(YjPFeFR*G)hS=g6S`eYJa4w_-|&7 z9Q1B`LMuNO4DnMuQ8v1#3U)Lo^!AiOH_i*?VS=c^!vX z^b57o$xBtZwLauMhp%(Y=5DNk?Ul!uso8f4m!9zz5WV+U?BgayR!*J`b5f7*;5{@O zxDP(9?%L$IbWMd=$I_=hp66LN(;$$dM!Ob~eDZXL+NXfv9iV;{T-FCDp`QU(1cqB* zbO2=?2qlgA5G07U?{g*l;_M~Lg zzdoh@xem!=@_th7)cmO)Ts1cMW}*B?SDK0luC9Da;<7`*q}O38XX8t1I7|UJYl2~R z|M?l5%-YDGPZ>Z_+LxNyttpYVB2?ZnHfUVeBP8 z@xSyW7)v9u7I=fDQ7FPyGXV>K@EUlgi{c{)HTPN`%M_q(GR}^pAVXVetSW>h8$*67 zN!NdS&eiBjU=A!!tVQj8WH|8Yx+0!YpQsZXTBVdwQ&BYqC5oc{hWLgav|1zFOm;^NZC>Fltq^EWGvpLu7~VtT zG1$}xBhg~@bZmfdPmfWpo0*c>k;D=rY>?O!#q!e0iW~MAIH~2k$&^L{bdcE$Hv3T^ zVKlp(g#<^tSc%}6i!IoI|Jb}F-m)|Y{|{SK`k88gH2?|%1WW`(zUP*KP`ZFf{Fo2G zlY|q(OA_1<=6^zxdk{cuRAS)3Gy@2BZj%9&GrrsJj4@n`z}@>K`2VV{|FJ$SHi7#m z5X|=b)e02$i{L-t(`r!A_VYJqQDyc2-*Fgky#>BczBvD7IGue!>_7SBc8Kvuu0+Ep zQ{+S;JRHJWnngM_EVSdQTr7J%}mz$H500yW0im4(>Fy zZk%qRrnJ}#zu+&!1!D2a-DrlqRbVsYpEVQ72j*sJPqcSq8^ufHa$LWX1*sgXqM%$N zikAIs`lGJ0{;ynmknAd>D7`uEna3uTTeh)ijF@lVM+SM%Qxd8-5NWUJ;K%v{oi-T} zsW6-5**5HD@Sm>vA^cB2K4Mi117&TB4c2c{Im*3bA7XQpV;51K4{x!_xQD|hN*5tE z*nbVGkky-w+524rbF-(-p0RvP(}{C;Q)r;q?l)+{VN&T|Y%`~ryQUM;SXF<(VS4c} zW|=nqP5-(lVFKjAP+I$*95k1k^7clYavfckxjelseYNzFu2n?tDz_D{8rqe-`F%+> z0?Sx$@sN6>6{R3)yQWFhzF#H5S90G9txHEpQa`E~0Y$LR#R~>|Z zi>e{ow`Vg#c&v^mlXx0kIpS+_Cq;`o8S{B6P`@@1j3mpLrX(7 zpGjxNpLt0F0*`I|zhV6#y=&b1VG%)xdoNKXp>fJ4s2SqH0?AJ}e}B_@P-+})eZAa( z<`U(5-kSEX74d=zh2LU&a>oWHhMK=?IMFtx2)lNvtOoNWRReP3tk>_T3dpe>vU7ep zKF@lE>gcP%(k-vB%PJ+$Nm~j2!@iFj;H%%os^_rzZ&XeJk0&*zWm+@bKq8)*ZhM-m zd+R5IHDN=I2CkZr$aFC@lud~OxkY>r$lrIArii&O1BZuBJ+_z0=Mm1Woz<%do8ygD zZO%`A%0^a5xMW#W%z2?pMY3+K;=1?<^=?nk7en*5b}I_Jj>)3JRO#TkKC0So-i4IL zd0S}qf#b1ZmbQ>6b0orh)j<>W7i{!(&0~!b*c(!^Sv*e&(V@0OaDDZe2HvJm=uxoI z2|2Vu`(BjriM33kMQXzgs=b%E;DUK_*dM7#XL2Z*;mq~#v^J{)mq(GSRKD4Dlm99U zzuzuvg|>WI9U;sl-`&LjU0mBB2U7LU4306p&_}_`7M2bL1eJM}(tZR_qa_+BluB%?e8Gsa4>Rb0C zPg1bYY6a(EKLkl4FOAZ}bv9OhW`?KU%)`UlDFsA`@(`iT+K!-rP68*+TFF>)Zf+ei zl_QPn545?OG`%d=e>S?}h?heWQkUxlm8w}hM?#Y+KYbQ`yr_0Oyp!WLV9DQUQ7n&= zBgKfEu7CKC)u-zvM@~f$>_+5$hZF3G^DNiJ$?s#-O4L>L1ye-L|CJ`HWH2q1i;lK?-;%1i2wg}UIa z)SLHqI3=#r2GFzILq_=Ji_2m8wJ09Bv1mZu;HUNq>G2A643S1NCkdyyaI!Sf`vvy;U9+8fAILK zQ$#j)vv(l{9k7tuyE>i;2NP^l5VcmS8u;SU56)z7GNN-MK|EkZiRA$;zlQ|m&*bIo z_4Uarb1L~7=jwXqGa78A=hv}&7d>SfWO$L9Uva|-(7&zesr>f07=Hfx2=Mj*Fie0X zYE8*#$I2?lM+nzq*CpvrHOD*F-~d|Ya(dkF`SLLyiPy7J@JFURmtXtGD_?35?#7MT zeOgExX7QlM`M}>?AADlEL(??yN_lqW@dw_5)SR5X*b{-BccWoFR9`{&A-JMfE?g7l zwK3BlC1E3QGsl&YEH;JQS@RaLd(-bh2+}X<^I)O^7_3LeIhPjgef2g?60Ea<3}5B8 zw%)VZ1WUOjf_;8yV`4>LyD#rxcfLB(@e+yI&@djkNq#$(+_5SZm#{7sH}UwMpfwtF~ZGWaA; zy)n-za;C@uZ-x6Bc$3Md?DaSrN!H<7R1;Rrp9^w+k8=r5;~c87>fgi(4*cufCtgmR zQ#Wl&Be>@*xaNCL-9JLu zuUS;+RefThL%zN=YP|Jq2SeaV4hQGz-$B*ZThS(Ia#zrw0zN%ACj>2hyESyj>3%^9 zrivZ8W-aEsLAbNm7N_^d@PCa_r>06$RhWY$wAw4v%Kjw&%&)#AT;K^#`L5}(1~&~z z0q#RFcOX%}4?#$Dr0fv>|@#lT@JdG!dN%=<;CDT?!>{it=K^OTk`1cYlS zT^Xn++b+Kl4pYm~=EvQvtXjeDe*|Az^z*u)GFWNZlf9p~OO8zU=aPS!=nLS~=x+`$ z6$^l;w=+m>n3#%uIp3sI)M9um@5zQzuq!mXu&57YX+{!)0!-&u`DAc)%DD(?GxZrc1WqsSUv$%E z53h#Lx_`LWSN^1cXX5B!^<7ZY@{g3drxD;X^Pf<;X$;#RwBVR_LRSBgHk5=^H7glv zm%bmz$75ol4qUQIyJ`J#QMwkD$KZCS@ChoyYku`3Ny4rE!ooMKUOriE>-GFrIo5b( zm$xw5@u*>t@3Oa$SQ0+e^X{@@QRi^d0{j$)4o1n#eKVn)*1ECMD(p?2z|iL{sIFR= zjDE5=kEe>=x1iu$>8MH)r_dC(6GQiQr~i7UT1aEP2*Ll?o zSCobLuRM?{0`JVxYV}3 zYdoftXD$eG+C0+M7C2?q&*YhVHm;K^Ft6BOjZ?*A+2oow*7WvGYQ06v=Bw_kby>b< zKbDHrPj0f%F&lsTsKm3Co8NlZ&_Q0e{fAxZt9-lj(UV5ib{rC!K_sKOlRX85cc?^Y zZ){T`iq=>?P?GoY|01Sw_a=7o`UA#0yH#$jKtdW>$cLKgSpuFexNL=u`?J1gMuPT2 z)NCtB(JGMGLFbL7>o}2CGKWKuqi)Ac;~J`Pjy}50EqT>(NSU$9aIz7L$V->HoHxWA ze&F?Y#;eghon3wsQpfqE<#zjp{LF<&XU>;s@Sk4BQ>rUSFvFIzQ&r|>ceGQfIhcyc zv}>}mH>SK`@r)iZ0-XX4`5koFg~4LqqV|6aCPX-28zI&{RU@n-YgX6#$xthji5^P3 zB{I7G$rT;#U-%tipJ4(QHmy_GDwPec&#d;9-BrAC=8}1!J*WPBz}bSGAO$jW(z#@I z>(HuPEV{jlxsv|nv1(bWGNhf%tL zXz!QQ|sS|N&*be z`WgU4hM&yFG{c7F|G(eXftO)G7wsAVT>p#MLZI)Z7jm~2B&lADoco*XOX>eS<8!y) zQ+IRkhz`I$fd55yA;8iH2>y4T@=VO@|IXvudC4_?^Zjzy8w6&weonCR=POYgS_M76 ztZ#S(!NTgdOyAY6U(nauuMT$?Z*9Cs5v zVtv!fl_D=ja4`#+J8MS9P2#SV^8}jIUdUld8^&Ct?gO=`uX}S zh*A8KXhMeay`bpn%rM!VqnOC&YP<{#rUpc}A_{u=K=Qw8o(^C8Q-W*i(j^YroE%cP z;Br>1bMn{M|0?~cfZZZi3__6+GXK6MfXU~L&#|xTa1A?ku+P|WIAao(qBY`}`ZB5# z<8}F!SrR|>x5E&`8+qBeO4U~*5B>leM+FHZN}OLp&f1LE{PT>fUgMbfvJzh;ibS*J~;{wkb{@dG<}=v5CUA4B>i#%(Qd(XmZibukjD|^#v4s zsU)J6@;tmhCUwT0giBBFk3MWnH9`}};Dhz<62h^FZxUWcau$4JL{o^VHbHTQyEt`O z@7M==8yU1qdfll*O7Ug+3Lb%cLS|p_?qI(u6`*ZxeHt_fefS345jg@|0N5$uixu!0 z(jS5bAF)jD)xcb0ww63Ex}VB+(1&2m1pxI7pnc*Eg+5%_r~}XS+Cx&$Tuw)sNYffh z3aLpxU$b0K$~e;$8xxMpY|L!|RFY5a<(GzDP18u$twQv8T8)BDg5Mh6L2Axuztz`` zcLEjD2SsI+(8S-g);MXYpSi)fw=NBQagai1C)UO~9!d{b1w~cTy{wno(GY_X^ym=!G95)ABy4HB=&&d9y2G&e=(p-5N-%8q=N%?_e@ieaF_z9<}ne zvI&(-{8y`Uq!R#L-ddUZ>bkZWOZ|Nlq$PJVS*0< zbGWx!QHizFmF4urA4@vFZ6a8n< z%qqtYKSk8>?GrMLDI}xBQ9f~JuR=uD-s0?l%9VpFk42Y+MeS++wPMz^&tZ9)v!9rT z@JCn8Dz@eIY%tbPaM0%V_K*2uOV>;nNR(G?H2sQF6S9L4oat)_~P z(Y`_L?gZ|fmRt@^5Lrz|bwqF65IBn_gjKbRl=aLzr-urZmm6@Jdl${}Q99fmu8-%4tqm~1vXeJ$ znuE!kecm}-=%I)AwDFA%xCf!_Vn)Y)uS8{l!Yptz{aQ<>2>idaRSpz`0gV0Etspy> zHPVqKtZkybqEETg`rglnPpGXZ>Tz5s{k9(I#PzJu!2aqGu*`c_OEo|qg8=&@byiA> zikRVtjH{HZbgLAcl=KojofIh&IJ^|~Z1yZU;cmig9W61jeeKd4wmtGT%GD8(+ff$x zH1Afrdk*eDQGZ=%$y-_SH=2gPAexB|Bbq1JOoN9jcgQ@{mz%-}r<{eT1+?E3qJ@J* z^_t9`=ZGU+P%?8O z$Q>VE(d9?I*GG)lp;h z)V6;t!Q(&aq6{<7OtB#7^&b4fL^vM|)9b)u#BDR>L+wKQSmZn z*0{SsT2DwPw;3s}8vQhx(T{vR^BRGqs2w7u3hD?_N|YCA3ur@)5V0YV9lcChQOA=r zmijwR;vnAcO|qG&V>~H72NdYjDDs$su#?WE<_hr|X1%sc7*v^*dT5WQB^vLLT~0Mr z;!N~}RSeEBjq^Vg2(OcHRHp-lU;(wx&a^3ETWYF!uoYqVIR#OW)hi zUAo)@ns?vtAD`Xzc05om(EPdjwTA9zwViP1newK+*=Ry;3@^SK)gbn&_gcpg)@L+j2+_OdKL^8e6xW1ai3 z*GS}RXM7~94am+v;%W%IqKo;o5WaD_>7SvjfyyPW&Q{&B(cn+K<%qQ(l1JD0dx4Uo zH4BzgQJCpoHe?g+bCnjc`O(80``x2cL7U#lX=>0FlExB0<{If(bs!=vP@^(j>O^|g z*6VebgqcH7+&l~DB8>j`vapAHGzPYI2)*idwH>bo?>t<+dPH1i^)~ri-f8Kab3(@y z^Di!v=jeBi;Y9qQMEPX&2d%0#`r9&j=G;CH#$Q-A0x7(mqnr(_L3o<@%S8BGG2f$8 z9imz41Q}T$oU2VF4zP2eb3TcB3db?=Y*~sR9%MoWAyT=fT?_Y>HLuFxUhN0h#hu3& zgj5oQIYzR6=Db;tn0d{xm&gjpilN!9q3-3!>tS6N7w$AKnW(VBcoS&}S?baGmf}w}w<2-i}&yQAmo*$h-l)5!DB}D{a z?)~V0iC1Nj4le1coeM)WFw_9j0EyNu?GJt_LG)P0h;L~z` zT3x^fHIu>ioygOZT%Nu`L9GUs6cH{?@cc^ zg!J!?lrK6EsZ{`@4gm2AXat`7;N$J96a?$~8fe`c_G{Z45a&9K`@9pd!~0TlJCHnU z6q;6Q%)`{6r6(ax5&S!UGS@%m(Su88wn5Lt_)&VtSbx3lXJ$MRj4HuV)gZ4DJT}dh;kRCt3bK~!xAB^y;T*~ z+G=yHCXr%vD!ijEyLqy_7i?$$pq73y{mx#3&Kb{f*d)Szt zcIB;bVA5+--{#=%C$A@OV2}6BeiK|P`n3+9Gn0oTs{it(FslYEIvw&Rm&bK7<>3V@ zWU`sRjd>eU`6>TkHh-xpNaDKcOb?_WF@0&G?B(iVR0eJT=0bC@Wz+>&*GA?9=Iz74Is?}Al;gN0DBhbdjl$WYC*uK=5wcm9;^-8 z?-}JUF#ZMzzTN>xV7tKo!cgSqbDZMK{vHh|Gf;Ui*}(O&fBEZ64AM6OLhlrSBQ(@c zsQc%ELLlHjUg_3=)Ut~W{WNPpvHLrBCLc;0ye;Nxjy*Z0D_fEz**jeY| z+;P`aYc}gipO6vM6~pz$EvKgEt^8=kRcS5Bt`Ht0aYCY@lH$27#sM-&rG>RQxMr?} zBECJ15N-b^QUkJSkE*zd?hJ}d9LDYiOX}^>aeq{s`5SE%+hd?lV6)h)zJ(}@)^SVQ z9_cGps&W-A)9)6#xkbbl^*%NqyZvVK^ZlcN1jNwPWX@qP^O5;z2cr8N=j9deMEfN6 zegENKK~{z7A(?KkA!SjdGGT|Cm+3`A_U9TW>va*1aW%>&K?$XN`7;0F%9J>GDwMvR zF-(2q#H>UZ-?nCji^j^Fj8;H!vMOc6q-V@tGN@x*xOk8%{WTi>7?D@%G#1`pE@DEW z*ZSCotZiSW%@>G<59jZipq|otcgMW$T>6KqlYHfo3CBUkEMdPsMTrsqffRk@$D|q-T)Qh%(Nf^(GgV`d^Y8On#f|w>-EMYGC~t zF7T!qno)eU^l@b<(skr8iOMV%=A%Y>{q8#F60tiTlBtNm2dS0 zqj;&W+P?I?C`9w<w*8BWLc1P@ueQb|H+I)FxF+A6hs^EvZsz9$Kh7nvEa=xj^o>H_M_D|fWt(Ya zrcjCm`)&HEJoh|WOdbWJ>TQKnpPQe#S5ALac41wu6-&3&>B@%?F zW~-g!e|FbG1TtPhaKGd+LP?Co_=BL}-dw49_n^WmUN5zq_@dV>VQ&=&VuQ zv2t$nXIucq`vu{(X-d>Wd>zKy;9zeMr~+WLGg#erZp0ev_!WGa?9boh!l{u>WLZ2GTCJ7UtN z`YAZ~?vpGn%mt==cbrzEo4%TZGrE0hPy7P-x2(O?eM9SbbUO`-`D`roXKgm2y^+yT zo)M~;XP$8%R$Bzd0VK<@xVShS3^&!w@^xQid9x3(zem+wqvC+ncb1wvfLk$Pb+fQ) z3ie{rkbcffS8ntTkdXHB7kY1pifZ#-GyHFC}>I36S&1}(H zjHjRpotA3 zm*p(%U+hm8eXDdhl|NlpS0g*avpYad@9rc@w~%o{{m;!ypii6mRWRelX|mr&Yt1lN z$q3QYq`TVOH{d86ioQ3IZyW?ch-VuE@V0hIq; zQwu^?MD5kmA#<*QO{^roxp`ZlY1F$EdAi$%!d`x(n?kqqt; z8cfPf%tS1g)i1fis-B<136WG0j!vY+%%-DU+52{#cA-&y_Xfii9bE)?Pru9}a{~Es zmJcP@{}kQ(cgo{ALJ}%??KoR1PvFinr}-_V_nI%xl?d-_@Yj=KtqRx1yrS>q-WuB8z=8`4cZToLZweVUKLEW+Pa}sm9Vpz6ZuRVYx9Q~X!Ku(VcdDpYjU9xy+ zwcQj|2ut^PK{K*B6sgV`;Vi(HxaFB^|747 zIcrwqPOwq*D#AJLraa$I)IxDj7?*W`dOI*DD`Mw5!@26Wfh9~sF01!11-;1|w%TZ` z(}Z``*P(-)zd&_gzKe-Au@&}x+Lm3$j6sc1xP*F+LuwNw-t#?m$2`WeCek{UAgJ3r zJ7B&yK~xhkK0QtCj&BrthA?Y#(-e^RSE|qd{pBz_qh}i!(dq4}`Y*9Pm$yO#ioD~9 z=b#YZH5_Z0JEnR|_PnT=&S+=y z#if3l7Z`Nz5Ovjhc1P64a2k5|472$uKKZ=UyRMe_^}et`DiC0bl+%+p?%a)spGz3&N+bsQ&ph=M*MjJPi~Z zxYEK`_gl^!c**c2j z=>pg3G3N`09&^?lb;y!a^VirX%W!F{J0q^bn?FvK346^uXMq)j<=-j`>xmD$6WYyf z^hkcO4-@XXM#FW3pA6i-j#oOMG^9=*Y$*_|I(%8luG;K46VDx6t6fNRf&l-x!koA4 z-^8xugI9xE`dhMr;lF`(hqNixjdG&p5wr`hgTNLC5zlQh20k@qmSHlD*lb8dFb8`Z z1Akq#>a?1fsz`F;<5pY@i8-6a`Voa{DT>@dK$zb6?Z7+MIz4U$%G9ID8#Mkqx^RAr zy$%OE?xa%G_QiS$9-90$lbL>g;yFuG;aA5(vO}!TX&>M^~(k4~FPDi79G@%vnQtA57)D^?j7F@L8G%M>ut_->+V_ zfd0>y4Lm=x{T;p~eLm_P7B|MQ#0wUFrIAy*V=lAgn=e{$XCI;DVP&ecO1N$RuOPiW+O_l|d8X^h1F1Zyl;3_c8Xs3 z3zdc7_FCe#ZgG<&vq&#N0&F!mPYPrk|g z<_s|iXB!(BRp;+wztT=r_t!y4Kf>KlQG00QCI3Wr3NI+;i`86NQ%vy9=YmH*eH;q} zBQr4tJ3omxY<4T^c1|Z7XXK5g^yRrF^+cpqRi_1?Uv9)%%?j$#N#dugIq8m6s>QPS zY3IRx(aEUD@2N)2N)qX_C`pq!yr36-*N7%xwGC&7lC9t`5!P^aea-9IkWvx}3RfRy z2TCL{CX6K76^fUPl+&dJ+f>H~bip~<`^2PH-wx$$T1%=BrR`@_q2gm~v`=U1PJ?|M z_pL&Fe_=Q;V>cNK5F<Pbb{GfFeT!@drER9CrJ8LuzFa<$j6k;b^(2FV@T{O7)&yQ| zJuvT#UA#Klp06aUk#`kK+K*Me7O2=53!I#*oTfgUiPs0duT1W+lCBW>!n*oBhm28| z8rK5it8+QQXX|AOdaDq3C@z#8> z@Y#3(q=+xk5Cn(+cpQzC8b@T6XTCkHo0UnW)Ackt4Xh?wa|w8!T3=zdGL$*g9PNjzT|4T-F= z@uR+r#W{>sbw2hHK>qE=TqKn_xZ2#;DX@{4PreNgqN%P7Tvkb2j@XAKbuDt0HjP!@3C=wIUUxFrVGbYpkT} zln1ohlUg(U{G8;1>OXo@`>hYY!-V`Of9r6Ig_B<%rb$jhBj}bw>1vDqGHb^+sH*h4 zZ{k>;_d6M*^slCh*|XCQSgU$l**-($HC_6HOaqH%NBorP7|=)9tHAa|o#9V#Ct-Rh zGog(I3}%b8G{7`~)w!F{5G@nUU}BJ%#-DoVI^q)ZtbuXwT4D!2B|*mgh57+3q{lHZ z%T-hZZCdn(X|$Fjc>S1mfIJw+LXq$nA~KhQ+<{=1H8P_B4UuM{|S5T;T- zL6O~zn?awD7N-Tz5QT#J&pU)yl!i{tbb|51O0Y8U)XekI=^VG8MDlLVS|`>T3#CjX1}rY!EW z;fbJfzgxe1&``xcxk&_*rT`CX&C{V!8n$f1pb6_c-$V(CRrsPKT(3JYg1x3**VDCi zDpN@_eO?D;I%5=^>5&IfQzcBsi>K z&4w{36N9F9FZdTloniJC&3#;+j#;_|C`u5v#g{FYCvR(-lMXBbG9Ny%1K+KK zW;B_EsOj0J=q|cQ_1J-TKNt$Rlhtq_Jb8rpGrkUXgW75rZEPCp3FOms`#<2Jii0NE9tM${D69VUvJM4sjp!+^5{-$#_Q7sC^{k z>6LdBR&#W|fDhXeQk@V;GaNnmNG~SqFKuYK^B=r^ChiTe9_iR#`@zWLeZyLcewx#1 zS!wkAP>=L3v#nNKwR06zanM6O`AYo8kUg!VK;QF{Hgw{xXb95Yb0q5b$aS79PR($%RYLp*ZGIh^_uFC3#9r)p}q{5VJ(+GMfQaq_^wvq<5mWWZaDW)cgK* z=(hI##GSB-mg{IBD_Vv~PCAD*mA<-jU-{x1r5^9hWZpF+>8Huy+T&Q zbY*r%BE^#ZbQ4;0iXkCirr*&`9+cYi?nWftgGS5^IJSn4PghaomiC}{Mz&+0t5NoI z9ov?%zU0)aB(u)8$4cL}a`U0HHsq+iLnnlVFhn3;s&VSPu zZ~ zID=)EZuPJ{BhkQ@@E-P)k7!o!ofxxGH9{AJi{pz&z)*K`Szko}f5)(~L%b|@azZTZ z@AhB6B_5Bs-1)+P*Qz^z_%-RX5MWY6uegY?_pBP>HV@y^I7Kd##R<~=8cWc`ed+S^ zc$GKdN7bRV8i&T}!g$F71y(NZIiwF#5f!{N+fRddcSD^x53-tSa7o#J3Xk04T)AfV zwi{uoNt>-g#CcAT_AnZGBsigjAyXD}e>I=Sgz~6+$0kY%nB0vv2Sx6Gqt(Ua)Vz{- z>|3bC3_T&@|Jz8}HGH=u*5Lneu0I-ezP>wqgDQ;7^-RtmXKPBiCt(=odebp|dD-Sp zuRP&uV~C9~N%?N7VJ3Kh`y-gNl!asn8tQO>>6=?{&1@;B#?{B7%*o{CsB${;N7kkZ za+5+|H0{`^Tlvr)4z*D^+Q089%_6B2!PS8A8G!u^3T!Kn3kzau0e$vEa6y6^A4F?_ zC-=JspixxM^zy&r!{C9a62Sfm#J@KJ!WfkRK$p7npBd|K5a9T%0Pq@5%K$CpKSAI> zKZZ{Ms6$`hO<&C6o=<~>|4xhrAsF2O<2!;MFmpaP@BdfM0I@T_1Da0Jgtg z8dH}maGSE+Nub8Sco{a`n5P8c#A4Qkdo^^Ed&zox(gErU*uKcwDVAAD*A}q$+R_n- zxne{Onuv`=ALQxpyM#K@yZlcH!$xJwekBC1Hd8&PuD@ixDHYpsjWN9F zlV&UN*HC>=Bv3Argcqw_%8TO&T|3R@xDcKnEVz#QiDF4mLWrl$X)u3_Aut~dyM_V;Z;-x zZLzuSfxOEK79$*T#b>W$QKbdi>JMLjP^d}NUboE&WgPyR6xBf41u0Md6H=yp$H#HztT1ArCF6C%v#4!e? ztJf3hW!6R2Wk~a<;x~!V1fig?V5qIRKJ-l*j#9WC;Et?Wh#aqV; zjPXIoApMGH+FsE^sm(<$1PTQ-fkV98skB8s!nz&155vMK-Y@!8wqobI_019&9*838 z6K29TTvLTS8?h2NsDO!zb9!P^m+|?auhZk(b9IR@xYC7Jk6-p?8WQ_PH z_s6EBW0p#V^y&!slYu zM;%8w=~ol!D<$W)kr&7dW=KzJASR53^`X$!B!i;q^OqFv^aoDlx zxY;=pI2%7V$Fw%2VysO6)6^cTQ!!@gTzg3-L86lsN9eXuWF^6nT{(U;dCUpYAPop| zgLLk(8rhc(!K*yljG)*G=qmYAxU>fEwFs_2n@0lCzDTuMs-3>tB*)}kb zcpDR)iIR^fMl(x-qfHR~M9+b07BAE#B*0s+ zB2Vk}&J!P<;I+jxR1}^>RP8R~nirVK_@^I^r~BCICAvuFfHg!UrHtVF%nW6eGz_%m zM|)qmr6jnOW&q{m`faDpt4y?vG4dnzs>ka+)5`VQJXlc03NrZX;w%^zT^0It%xCJt z&KBDzh6S7IV2A(r%_<0D_d0e7pRMnjEJIS;RHD(*=5Hy%J!!Zcxrl%AvlmnkyHp(h%a)9#arBz9^<&E?CK2at%&@*0LK$g zmrsL#I|(JOlbEJt2Vs}U{Sd}sjiLNQ*8NF*9rHAz!R@u^c+ke%-cz8F2W%-w-W%w7 ziyC+Q(2mG0*S=op*f1g%E=GZzvC0)4jatR41A?2JuTjH{}HIjL1BL~T|Zfe{QaWf@v z?FJI(ou$#ie7*k=SbhgkPJ4}jTp(!jQxc{#`lkOWW<3Qo{`S837Ao`a+igLP>u;m1 z0mVnYVD)#twLx;SV$W$*%EX!Rh<`IsP0yPk2z6)B3Y!?jMT6R)k^j{emopZus^qY!l(9npP~TYTXIaYY`E z0vtLiKJI#+2kcL-sj?BabI;pmH6koZl05m01C>r@X3!X9q5GeWuXkDTuRqPkFY4+U zBRG3sB!)9=J=#{3@3!wbI*(eaJ3Q>|NMq18h8QuWdm=?z&_)kpayjozUfVgnaLJ{$ z6zF8&<93MEhB}yxH5%=U)T7xnFn0f)MFw`Y%E8UD!Zlbx><|Wm%Vln zynEoS=3>>PUqZmCg`M1+17^CCpd945zc2lz!C@4YK!V2i6()Ba*7QFdj52_8e(>>J zhFRXN$LY6hLsZi!LeTZ;%-*4uMWv2Xr%@3quRZ!|ZBFTYXjQyh=b%+my&X*!IF<`R zGjej@rv~sDKmh^8pm3n>6ELk?v-`oQ1`ND@$|bg*ec3mE9{*p^ECFHw_nz{r;3B9t zsKba60?ez6K6%l(mnVYAVZq7AFX%>eFp}u8wQR{pqnM+ZgiI#>)M9rcA(nOP3^%Io zAC1BXTQbQ_Op8ZmQEjOu+&xzgtp(H9pKc1(yBF?C;rYH0GhPBk3=p%B;f zjN@wDcIe#~K*ExPFV;|4Z^(*s)#iBmA((OOIKU7IjR$tsYU#*!%)MKM*e-IGZRkUn zHePQwZe6cCQ*#@sF0hT;O7X4bI-hS{GS}{C#zt;pZ5+oP+EwDQTZ!Ytx=?^6viU!7 zpd3#M+Av>8xC#Xf$K&2vWFu`MfiYGkxV`djA>F;Kz?j>Mn|5VOb4E^tDO+2nLb=5j zomMX(!S+}EEtvBn#-CSR`Q0JlWsAaZ&vHnOEI5pkDq`$8Nq0uX0JCzqbBB{2H^xvP zB&8&%@njkwJEd@{d`l8|+d&g_cvzZGAQmH5kiNs$pH#DX9w~FEUO8>`LYcUWBleXP z6EGX-h$=H+HVm#Ekmlk86X;L)oTg*>e<)$AN4T$4EVI2F z$no(j|B8Nt$(J#VPfER*M<^Ev$+s94io_6@X1Kl82zm=Rh`K|sHwvCZ#)X^`d+pPc zlIE{lK3AH?IC#Cbn=kny%a<*A+*#A#Q(fdKymIHEQ7i5k;7o#tJ5o>prL&4{hc6k;=~N+Gwh6;fYjF=x5Dj-*CN>WRynHUPW!WTd_&;drdIcZ zQL#54iR2K8bl%+dd;vqXE>(OE-fdkiKuvsKE~y8>tgtjnJBBF}f7a`rgyCUB65*g+ z>y6>?S8&DssgZL3|6}SMU@QH;fZeHW+s4$!bZT=twc9DCwmY>kwQbwh)V6I~=e_;^ zzVGJV{zLt(l9U+;_w7_>yK>pt@pCXE8aon|An(W91>m+e%H=(pI=Yikt6# zBz`=^IPb~LeM?M1^h<^t+&}_eZmy0;SF}!F z#iB0;L&b1Zm(;}2G5imAV-wxb8(K13X@c=Gfze|;%ovI$JZftgGNF5JMJKICjAr*-(yXqSw53O?=yLGs%E zY%&-fi><09%SmER%fV^XRh$jw9oO#q#n&b?ey&H6SsjRufL@p$p`ULU*r|i*{u7hU zjG>%uOg5&UNY!r=kM-oFaFDcBC=wb;$>cmCuUY2R4%^McMv#=E!6mzajcVM0s^b7Q zs%0BP#}r>E_W1~euIbEjq%&7| zv8EMrQS_(VI3uM(jOocsd>S!8-@ z*v7kr*6)ranF2@1z2ocZZ#8`E&vjIe5&^_QK&|-R=2-_xWPi=H(I zCPZFgz3yRbjnG@fM_G1XUG+~?Fjjg8lmD}}+YbFn-*4xoAsa{BhH$@kUooLs?`f*b zjCEqNBQZ$Pufup!)3tNZ#cwTep}|bd?l^aA2m7%)>zh*L7m_d<^+VnZ@6t1s&WoA~ zQjSk>X(8JYZgokBq29)iC~v^0FSY0Ka~O`?9i@`4vRQ#dQ_89636<4y=RHXxDeB}B zkHoHcr-qG&0-W{(y^&Fr$$BgGcV^{Ym!uDYeN5XI=(Y%77JXUFnk|8PEONO#_8Yl` zI&URbyScFkL@wI^2I(_*qu~tJD%2}}=StLZ#?>3o9aOCR)*k}AhBDvAsGlQ~!f`zRwsC87mBfvC&k4rWf6(^Ui zbX8YVb@r#X>DZ6m+}tzM984l=iG)dqZj~6xxF&Hkt!E{}yS=#v*N+Q%{mI`E%(uMbXucPBMey_lF*X%a4W;?R^h6;!S1`hc<-txj42`S@}46y;VRTqWgm06F@C}zXJ5u&=CD~C(<~iIX+Gt1Nqh)z% z-;9%c1U=XZlBHbPZ7!4uU)%Ke?Fi!D;EHu=C9(UotwJ5;VNeS{k-kJ;Pp+cxDg5@$ zk*8?~OjehENx#Icp3FXl7c$2hRkhm??49wk$EMs4r$>D|i~cz{PTB5lCgPyi;tyCZ zT=du6@5LdgPH0BIBYXFLyK%AvD{!zM+%Lu`=GC*c5%cQGx9Gn__#G)zT&kx%`@j^g zZF1C8MLDqZTW#E4?A)u(?loklu2LqljOoOlOFOe!IXK9E&>ZRBdT3xv_|#wlv5yO=&aw~rZ5kxK8c~5vP&WRz;GVeru?B9BS~3#c zD1`38QA-90N;U9@%=lwTEm9_#CsQJky&f}-O0*HdF+Qp%f1Zxyt-o^}fgNZ1*Y~c5 z^wi#q8Z6u;uXPIYBXb)}<@PZccr6?v5g52LX+bE1743I4ZnrA^$>`xIl&o4#Mlb`$jK+iczsU z1jsZr*UN9)qNC?5Fig(Q?W}`bS`310F}tk?xX(jzQX~PtoZYyM>=yZ^1HZkzff12r zYf~|cM^5Bz!Q3u{w?*s(-=nykr2HFirizR(`jq2TwCBBdaVdoONx_qYgZH6|{+m9| z`8*WUKP?q7;OB%iL0SllxS{Jmo6GTccB9e>M4WxaZ^zUle0T>vsb{ryq_f25neDAV zyU%%8i2F>-B%XYeAVW|X~MBjlLdkHd@q=~1) zGQNO}S{0&;NdA7fHCi%b_`-KEFP|~lZj+Qtfvg$G=M#vH#VVv@d#bK1#3}3Xj zM7TK)CL}-dSpKJg*j^}1mS5tS&HS<&l`+j?z*>>*D$mm!V@MiYmcNrZwWVAZJGGpo z@|aHWHsz5faJu!0ZepBlig4m<$c|&RH(RXiX~xo2sE%*H|H zRh_OfiM;zR%@q&$fgLMVGoo`J;w_nwS*U}`9-$%!IlP0S8d?=1Ay;GdP8P(La-XZT z##9`y|6H(`sZLb~tpcqj+*OVx$9xHEg60^*Um5w~0@rEI_oKok!E{^!P|@R2Zz%ID;_4LRUj z2&_webfbah0P(D?fMz1NzuMbrR{*FDKv@AC*=Z}H%iTRua!{Y`w+U!5IpuRCP^6q! z?U;RXmaFTla$R;%N)|0f*IFtc5qP8I{wRWDJX4Auzv)tL zREV{(<_@d3s!6fF{zaF%OP(uZs?idmGNi(CTx%!l)L#Pat+;XFn%R!-Q zvkjKeSjo$i);VW%GUxxpG&~7a2I5Yl>bB(TEHOWD^3gM$_~5T-MSm7#Nr?I!I(9C$ z4d6YfqRVMLcXxnp8k)?4-Qv|~yU8sLMhCDb6 zHrcM<;Q7V7#amKuz(V@L$|0bg>e4R$S}_$4V$j0AaIdh$ia(u>wmengcA*#9T@w0s z#=Ci2Uf$n_f|hq@fsN5Qh&GCN}A=hAM*kGSvNT~SKIHVyg6vIg&sOSX& zH>xOG5iv{po|*=3s$wmt-n` zCj_tT0w z``nSdA*3r@buiRw$Z&OF8h1sgaC!UZmm1NakqxwGy?Xwop!AUUm#1l*^xJ)X3Up^( zlx|)Nowi(!LOjalnp+sC->N8)iNZljHIl66NTSnSED9#Ctp1Vv<@0hiEWv3AUuuYJ zdZql#6r@Ub67R^POJxR~DmBHCljzh3VxiBA(c2paU^UaUq0L=7`@;f zlCF3=|NPsgOtXm!4hs22mZ0v$$4r&1EBKAnPU<}EQ_!>pgxmw%-x=Rl-H?Bilky8G~C6CwMA*%}ZO$K1yZIf2nmcQ6Fpw2rjzN^q(GneD=T= z2^71ZzfERWGsA7!KKN-`bCS*sW1VVaK|P$h#UXU|?x96KJ)7LO7{*~}vE%n^2L4ZX zD*E7B$G-%hn4;?$is@5A26MC{i@NYS$@bX*P1WDR%kHrEMHU7=BwGjKJckz1m!i2jSPr&|XLTkaME?sDj+TaOtm z_*hL$@lThCWe(@&mi(-^{Vjr0YD9;QGV&Ks#o3{+lP~a?AP)gzgyMjeLa0F*jV>Eq z{#Bj~Or@^m@2PuLXu^9W*&x#v7-62erP$g(G6OColO9TR?vUgY;VR^STnToyI7}u* zGBG+Y9{%!3OE^j0kQp~=if)@?jkpo#mb5b~l8ezBl>nVVg8A*j929Jjz@Vj!b?X>a zT2F)NQsl95%&vGu{1z`P#n!Gumh>rWF6Vkk-s#3*aQrjh10CA(GI+On-+WCBMKLS8 zXx8Cq?F_+P)_^>J8TJnbrzKjN5LCl#FSfXA z2qMR31u+LdJNbf?4Y7ZK8psoaROcDGCTYN##pG_sof#xFD^z20PHgP zdy1FBm0Up7Q>!%_T_Dz0$f*kHsZ6PG$<%{e+h&2OK9y$*TkuOj3s0S$6m}g3G2}eC1if3X*UVCuPqdEC*mYE6(xvKt zvglcdN;!{T7Vd89DB~Oh^g|+SIog)l^8&8UPS5!7lTr!9a+=!%7E~--imJ-osA=yQ z=Z-!a^0&%45LZ#!zCwSU)#Mde*QEkH__j6Jf@pV@(bz?Jv0Z)T1)jl~09K;m| zN_9#aOr_&XkslhNIgz~t`>&mW5wC(l#FWaKv>tZCGd8SjqO;P$t4dO_f(xCLJ{JS{ z%5X2c9K?H`uhTm=WIfTXb5H$mGz1)TTIHmIHp72UA)j43MEPsdMPh6mZV-nWCaKi= zi58p<^<&*};lJCm=orLkT%VcWr({Sx(1ponf_L{mnRR+3z!}J$cy{&R&|OKIzZzB4 zW|VUqmV|h+8)3*YmPk$4f16!fdJ=3)`erkc6oLpIgmbY|F3wwfr~8jzXqWkBK!3FA{%7SgpSW`GYwU?A5KMRNhG38LgKgOXeSCV{;o4o#jl4a1Og6){ zB%GUUEKn_4(tKDkRNwfvOxj$@=Vonesom+IFWXK}di=39B8IjWVMM&ulU;5xgC4G# zT!N!}fi904+o|fMWun#0i1zH2=4HA1_7tW>>bCd2UBv@KSC9p~EFXz29F38f!Ca-% z<%>-`{8S_6(_3RV;(b*SaXat6BuY_Id9h&3bzeKJCx4RN^7kOnwhvk=sh2uldm`;g zLZ9(@q_C>tdn-2h>L0S#05Ff_{egh|h?RfJNvT8&3g4!HAku+7Atw+1VN6XxbP=eU z0X|NEZP7>Iy?tE_6!*~Wn*yBr?uQpI{+DHbDFr?g&!N|;>^uF>-vBdr9MC=E%Z#c5 z*aiakzF%Tr?wTk>91wJ>fup^b`XT)dfb0#hg|yCf9ZP7^kvUw|X3_9O^t;Ppc~yLeQod6YF* zEpvdp-l?gD2zoJYtrr+LVX;re#B}{ou++2M$xe z6nC9wXxVt`D?te5Kh1g08diO?3?_1N!E3uxD(zk&L}OAS0pdmtX2yxLPH$ZDZ*ncr z8=;t|YGrI3T5}vzM`Nmn&4}Y+GU#34urkoXd80sW1s${TiiySdl zv!JKovhhNzQ&_|!BK%y4A}&)}(}34&uDGirtQ&043Vzj*`CVcS`t^$1N)aAB5pu?W zdBNx}H>#BVc_$ z`&zRW4LQQ(H}6{j(%*YSxfFgN@hSLZ;e5s+AOY~k-TB%Ce}))lGt2(}39-9(+v#jh z(Bc1J2tlY*b3S`6XWlbhTbA#aTXhcDQj zF;XPMp^@5{s?OX+>L!72ZxwVZLvE1V$us>Mh{n=cCsJBjh1vtHsg6DNEYT$_?d2ct zRbQaxaw`m#v+>h-U=ME#4(b;$cVwd)V%Fl5_Kv7Bi(yYFbqI}EE-J3ZW7PsQP?F=- zD*9q@7-XgvQLrI#!ioRL60Xeug{PGWkAkr(P*)FXVPGHx1Y%*&&ilkw3Uc zH%0CTvjJ;@ZB08`)w2oD4E&=f6X*t$t*bcV@0BUmCcjii^nbhcK!s)EY7PFIkJj9w&N+Z>qmQ3hH+^%N1uvgj zP;;KRRI)l@k|+neOc*@wFY+{AiFufN@EHhJ;JW5;CE4*lqC_&(&5uPZTL*2wn5*hJ zaHBj2(_>P<3qMZ!GmMs4^@lb%h2WAxP>^kuI=MD~en{zHr2=R-Hb|qKxM7jWNMR%W z{Pv%2UqIlCMh2Bnt)QOzrB%nnt|y^i+3b^fp^(Ux6Qyz|uqo>LAIjpK>^z^}XzJQdT(|A@^(a)QR{Su8uV&eV#(hzyBt4?83D?N%)>W7*O5a=GjONSNnMHXViS34Prd+f9p;wfI2WUly$1dB7lO=dmDv{E!ou5#O*dO~OR(edb+z}4+&jMd0eAf9O`&llgE@w zd2Q)qeJiXmVVHqUd@-{ia>;b<-!NBxFv*T5KF=$`(?I(-68w`BEvZaIKD3Fsh+$xo z1#*zavX>rNT{u`Bg~?Y;_`(`>o58wAU^_kvJ?ZPuf`QQtGOYHeXv7Z`SwA=`CmDlr@NPn326vu>Hqp_Hw0UB zro`wLFKV}cjMoO;JwbDDOlh7Mybp(lHubcgpK+BLKDL@Q!S=chEDf_iD=StJcyYy} z7hetYnTeS;jJjH+D;6&uYwG4xUlVPJoARkZf_IB*=lkHGcIYQWjbCBJ0=LcL}scczSc3EA^AS#u1M=*4v zy|*tP_I`PX+evD=pJzO`Kj9*N^;C+^A3G`iJ4KilYELX{(SGV{x-NH!!iy7`>iy{L zhGsRVoo1Bz=1H#)?w<1Bk-k#|;wu)3H@f=&P%E7qirp2gkL1PSEG>I#!NwJ(0^^`L z*mK_PR2_^nDZNt*a2vfs{Vq=LHbN3taO2h?P(IAC?B3r5NQFh+L@wc9Y{Z{*5<2JW ztTn#t#nb(*y7>?t`sk!YtUj0Ks!q(Xdl6euuS7GBU;L*#^I~>xn?E713&rMJ|Z9e*pI255NW#_Di?=$0t;~ zH`w<9cq=rB`CRq3vHt_W5o7>nAHW@nA81R{;4ffxEX)ExP=HRl+41 z<+}j49{|{u@4Bu*9ALEo^niqaKNkbla+&xxpNuZ;?cgIIrK-z5^Fx@r2k3`Xx`#L; z2WpT)3$U7KI8Pz>N)uqf#eQfOV~ixF)bD{YeE%gKfHiP3E+;b@^B5GYHd57_j5S}8 z@S!AZ54NCZX0294Ell7!&tX~0Sx|=bQ_=h%isB3eyE#V^eW9EPVCUl;NPL9KH-W<% z@(6MLMgQ16emvVJ{_@i|xgI&lA{{y-o&lxkc{o{j;npS|y8@muEfETm3G#PQXFr)E z6BjuZ=3n32*IoW9bL_kQ*`i4L=G~+)upz*Z5lUwxzxG&zX`}(MDe-%OC1g+GY>DEh z2S0Xe^~^UoR{6*(K75Hl&mV`y;bKx8}kJ1@HXe0L!fo1j%vB5-fJ*_bWKHBvZb}!Pf`{>wMc_-0QwKpb!%Ze|G52PF^)f7MRedGu<*3xybj0w>SAp5>C*zb z2wSd{Jrx){0k)-|!TIx(DfVU8ZToT3$&xQuAC2623x@S*OgvbdoUr%}11tMm(~Sf4!`{mImS0?_uE9ad`_9KRq37%9-sC7w&zkQnmrx8o-eg z7(^ZiHa-LAQ+lyw2zv2Fts7na|6&UX1UTwm05xE*zQ6v6gQ^<=nB)I~3j~hz@L2)$ zF#U%#O;@H$?n4G(g~6#ENZUAkycVX26kU*|5n zGIGoq=0661+0+MezWbbetyf#AgNZ!q2vpu^*zj&;BOy}79TRu{`2DBoZ^^L2^(=btUmC&R7;Ql6LuY7m1Ze|arU-#{YPoYwcxFi9_=jo*Y8Lp>=T z5lAQlQ~2|#T3%Moy-_ByRCotxSveA_ig9-?eRMk+_@#}7z{l?tp`;O|whr@BH?Lqe z{DUVBeoe(s*Y{RZJ=?5G^);hG6NQJf!FDP*F;hU&aZmPS8Si_v_Kz_p$xXK-yQ%-GY;91iB5m)!Gj}@tQTD*bIQx-pEnTn?c9y~^ zxuxvF?e>*AL7(R8YvBk#(Y_7GTkRYTuea#@gBBZEsEO$(%*96^H_sT<5|LkSJPVfX zn92}}Wzpmp?_Y8IPxE4KOujhg(FjETWYOYA(xoGtqmk@z`o@UYAle&>nXCdxLnW}i zBNVfq3J5RJgWa17_6TbW+YsGbhHuy*Gqo!1Y076QgI-!Y>Ro=y1SlELiY;+9xpxDG z;(;&U!b5ay~nuv#ihcx0(6z z^`KyJ`$G%$ydP9_|=kY9^^7hOf)k!!L9o|4rdpvvC4)A zrRlF+mNU<*#gklbVyp(L(uQnUVI`;jYfw?-t8$92oB;U(cEI6N+YT7*1?_n z5{j|P{1a6X@G3O>yT82l+fp%+G{kTvu6pSahNpx1`bCr$CElDboW&2(HPwJ<2HkKZ z-$GqfI7(xE4KG}R@Klak9++RXv4Xe_hW<3D8nO2yqm4tq=$FtsiuqUilpU2d*M&7Y zg*}u0zVLp8B&MIGXe_4QCno0et#s*;{YShTAK6Cx~6!Q&}`V*_y#Wy!7^DL|3NOJT|*NGO}$uteAO%BmNPH0jT`kIf=ZK>W6bG?LyIcUk8|%mbV^2s&Kb(BCGu=b znC;jlc2exKo7pVV8m0V4(3qT|vMsy`XgmlqOSKI#^1gHX$3?FW)qzQFUOo@*>lSwg z&}N?H=|Nmin|%Wg7qkd>!vd#(^8( z-=B3_cY6;xuyg}N68eGq1ARf+x)Dup|L^MKsOJrF4Y(#ZK?u%;J?FtcF9@?Aet?-E zW#U6#Yp2e$W(J<=1n1;-^;!3 z*taid@~`Ji2&%r`S{%F=+WDa+sQEC)^ec;KN928468Ll9fjT=_BaH5SDoNHgNpu{+s&`d}!-lA7W64 zERj!JrUh37#}ZcB6nO1aY`L~l`;YI~;-1pK^p>FY(pBRKR4E{$BJ`qPwc)8mi7<0C z83Knd_h4VKTLc)_E>t_j6FVQL(vx=BYX^m*0w-gG8Hbs01K`0osES(Ec9zP-2Q2kk zhCS)>k=+xG-G)+%%_p5Wf(y>Sg<~0jd`HUh8$i+yFjeMd8<|feCW35@(Pg= z0roDbC)J1K?RvA47N34wrMGyW6{T^x_l&~TAsSXrdD%N>`v-$En7nnz`~B+mwjVRh zYy{uKj%89U#BW9fahWlGeT6B&wkQ;TZz6|gZ`b+)pgaSg$#oYWFaeeEf1~E~6V``9 z0P+Np5b}Dj#eG=^_X9z^`UYGHv*|wg`rQGpI2!wa)`#%+JIVjD>tJ5T@Bbx0c5`RI zDZ$bS0Cp!_lgs8ky*UIE?r$78b<#jM!m?k(ZyMs8)c&IL8hAhM@Ip58GZq?5nYCpT z#NR7Sj^0xBxqfXn0>{;VQ$nLb0r*v!$+) zs)adcPw42FeKSz)MqN`%tyrT};EObJ%miwuK(?56&Q;x2=HXrEA7)b0*54f~I2gtD z7qZ_(@8}l466MeKIWC5zO*`-iUreUt_@K)1Pkl=)3%XXv8R?eNVcX^5&PXQ|LP$s7 z9B)eh^ENf?NMmtAm>Sxxh6N07YWj+=xOftZiBUp|wV9ZUQ56*8rGbHoeCI1{*Dh!m zHKL^p%GHjA0Xxa+4Y`a>!aX32IU07YbY)sJx(<1}LmAhznsWAi+FWXU5>kTuAgylM zi$Rl{U?H{8>SphR|N8+Ih129Vk!Q(Q@8gob(oR_VLZ(Q?YU<}tUo@t!%8Ux5PLW0+ zxcKO!W9Zm-R@tDP_D07XDkf#7rhsB$cJ?<&T;Yeb|ZUvQj3Y&BbB2(%s}{K!iq;Znz+ zy9L}|b{svt%@r6gR%^7}XAW3yNj@eBhd*o)PiTs=OX@iTz!k7uc>1ZpB#~+9ChC!7ubb?>nltSZikw zLPp8OBkp>e<*J-52W6rbvyUH2{unb-Jmv)?S3-2cw|5h6vk2B0EUoMQ1IplveH>wb zwy^;7Q2N$kts3=I_BRaTu+%&G3Q5-SBlX9$Ad2?UE0`&-dRV@1D`1@2+_di&qi@cN zsM4zX3sgSoBl4^UhEX~dY|2;hP^f`V%EfF2Kf%UWOqMiujwVvaA%(Vc(5;FTiD}Cwnv+d<1t02RNXp0d*fBu&!>;l*YqcKvc^dI zRc^9vld1M4S9-kO$1ddSV)l@=D$7sv(q7Av?=XzbqLUUcKgd&~Bsp+LX^X7?8c%T@ z`!1&({J2Xce3N2Dr!K*f$i8yN$Hh-BsM;s)(Y7NQ8;l-A4Kt?>=IX$c63a&&rhGHf z&f2dPH5l?3wv5zKk`cm-nw_QOSAdp%HoI=%eD7dF}|PYDd+l~C1{XR4YrlzbK^oYU-jz&i4zH; z`tR3Ttt1=&YwFD0TrJleM^nsTa1wtTg9!%)nc0ASi^bDLqt%N$&UIsGF5w=I=xqTGR=$CS*u?RBkLFJ5YFteThMV9 z1#mlF@}u-wlW)e*UpzCQ!a_Uj7@Wxi%-el>pf`#mLZ zXHTr{Wsy-}wtt)7iFT60H~PB|@9G;y)nbt^EwwBmufl2t&S{FyJ?qVTI;aT^2kyr z!Xz=P6pizj4g<|DOd(6QR%@)SaTxo)l470p#FMCW$M?u0p5#&s)iGT~0*j zXMpV$<V`7KD2A{pif;1CnPyJ6oUkJQf}o>b?5^UF@#Axcg36;Tr&GeLmXQ z|GHQ(;_oxQltbOYehLqdd^enWvgT-2E^TVgs%I)H|W8d z0$#c)-t{K|-*W(L*LOZPvw29MtLZx&5>piy*$k4{V`AiyhoNMPI=sT{@}U;LkX5nU z@vMC7j`hVpWu8lH>a%!9E?CgprESO-val+%WzVb>u&w~AU zdV}%}6wtwYqlZdXJf(%jpPa&;)$yu2{i~Hv`Bi`5y>2)Pub|wq!hU_#r%KSxiWDIp zhePAE@lOFtzZp-EbOE-_ecKosKe{tr*1%aR%F4)h;-|Es`Wn#w8cA@$j78TJhsK9gpvjw zV&KltI*V2)=fXCb#X`7U_>Y<+20nqzzCjNpms?lDP=3!qHPk1IQ0*fIX4*I5ng+DAa6Z!UznJ|~ zp7nX&nVT5{fVdM@4p><<37zww^{VfQW8tOcP31j4zs~iOom~p;?9UF#29MX0~Nxgwu{M=*9?9%!Fos6fCu5iZ^%5 zf$YAcu-HIo6>^?x<1n66ZiLKYr7~k{tG4DC^X?FL=Ng#cY+@_MhqT?Q$ljA~9)B&r z<38P!8~-n$`QN>NwZuBvJ9_0ErID9()bd8N*tpuEoAK%63r5J1*eVH%M~&aP5IS=# z3?&pJwUjdupY3MDX&}S`=p}4|3Vprs<=9I8vCD2FKJZ#_n<4iL&Q`1etjX`pwCyVj ziknxxl#-1SdB(EH1SR=*gc}gIo|VaG+@Bns`Xrae7{kHgUD@E2>?#$n-kcy2wrKZc|rw)Rj0Lx3>%)^A}RGkj(YO6qnlH z!Hh9H=Y)d^#YCthO zHjKw{x0z(UyOmx+@5g5KoaNbf1L0={H_LF!u78F-R|kka#e+v-fL*2izpUihXHY9fgARl<OD|+a#CMMo8?V|P3Pw}Pl>f|J7^uq+1xk~+))4gflBys&l6@{t==Cg&@ zJ?d?1ax2SmAMa9=B8YjHlr0e(R6WkgnsLl`kgl6$SVBv@nctz({~kDHc#g2%Hkp0B z^19tz*aYr$J%Fz?c0dTa%D{y?Ml)x5(8QSaB*sA29*^*C(}+i|5;8h0O)n2uC}zM} zoRlO}U*M!T53Jc{IX`;kX_U0RJ|(!%NR+fQS6j11T_IEEOV6qPID>;!1*wO|z2 z9x{zumK9a;g4qX9!5;9iaaA-;D`#hZp@#91UoRv>)wmMFDxDJs2U0FbV_nROnw2&Z znq{cFT;3`FvXlm{9{4Q<9WR&a?FKKaP+H7eNS_~lAZ7V}`^V#YDh-#<`9`Jr3-<8B z|L@QZc{I*uv$cfZJIOzwni=GI>j_{8*8pc%pDPf|Ezq4J{Nn3cHGTVk7|Qk_-kk_(5wje({Kz1bXqGMNq$qa%>1v8cwF&G%dJF9#`pPYDZt$9?i_+-? zmlZK@s+bp<;e^Eb*nze~-O8xV7==3K^D>lS-3_^A6bl^dS0p+TF3iN}D?#e73T9WF z()yG*iK80_bUB>Rf?<(A^EhtXf)=$oXwxc*I|SI_Y~)WWC5JD%OEL5Zrzr3u>_G{> z?Z3};UJqB*+2qvN2Bhkk*A5hTh80HxunIudl2x{u?+Pr;Di5?$?GIb2n(8x&C)Qn&7z)X1GRmG2<|DD;G4 zdHEfB#URZ}T>*WyJ&18C%erl6q*GmC|DG`1cY8qy|Bm<%E7x2~`u@N<1Gg3bWRj}U+}~0{D;~^5$H=&Agx19 zx}+;YGSE>;eRRfSiN_G^`u-)C?k46-Wnu{^OM9tLkv0fE|G?-P6^#~|J?8AH_G|~6 zL=2*MO#?X+25~Bdv|9dCu~*|`JLAFWCBC|kd*=aj?j48!?a_qZZRzFKOtt-90B_Lq z1BtLgaKNo|FK8U=a8T}TPDvc1?*s|e*O-dDo3H%>K?0YSmFKSV|OO8FMmXS-W&goBQoox!QgJ2 zy|rdMhq^P}L-#hD8UB-LtbMAj*lA=O9U*Y}4rBsA&T3SWq#MyBtVdcrZ zfW>J`8?hhHpUV~#O-Hdor?vvX!#lX1WFyxlgE+#pWYLlQff)Q8a2v&UMf9mm`R&&vtul^GFUA%?t zpXsPFSGD2gTE$2%Z#8ta+$jon+*@C&53&{7VPY8UZ@q`68x{xv?q{4BW1LS&O+Q>) zD6z%2+KV7(SngEcDP$s@HHdvp$f*2kPMfSUpXi98P|?kTlnxOLZdRN&qk?2Q#W3y~ zIVY|4kgqJi7{p|~BhixWr{;z3GkT%+&z~9hU={RWI+HgNG{W3GQMIJkqOgn95Q;K) z_^s4vmcFSKiKghiswB-4UoZY}x2TOwY`|uGJ=N&uT@2gvL-paaAZ;HEIQTn2zN3o1 zRsMGH4G!K`*-B>fj`ODSQc9gn4)!nWnoHD+;h9d;&TvWb6UM{=k+Q>*@F!)!wR}2f zIvt<=IpGYO4rM_vgpa*V+^;S#?AvKiw;TXacq)LlIzYmAH1|_*;D3DPgi*MUnE_XZ z@o$(>R<401p$7P%h(hj*tTuk>W*#Owr$(CZCf4NxUN6X z{eC)B9E5i;fVnBng8$#Y{NGMg&V-pz$mtoqdk8(@s9 zr0SXv*y+-!=x(nZ5l$2Y_TNHO(m7ykuM3Y=&rvm3Lhb}=6=~f zP8#KyQ)+l#MD=gBlc>@C^$7cuoxD@lEXQd`IrDxq;J5aRHugvpHvnn6gXKn&tH)zM(p$g!Xk7)xiUlVxjn-k@7==~a=w@gpVJ5fQe&O15XWQgl5aTR zjRUxf`$75jzs=+q7NhhF1*G#IH+(5b_?iuU)7&!Ci%7TROL@xG9rjk-CzOXu^eLm;5F>kYi4$ z4_ZfUYCbCx^#pwed%}|vCI}&G-A?*`!^G-oriMN$dIjtwCxAdQB5D`$gPP^_SYcc< z4jUX456HjJ=n4$(RhHRl`B*Hj>ZO79MrN)tm=i;1GV&PBxUHN}#pUe|^8oOIkwqZY zv(y{Wp@+8zJUX9>O4L#pWos_RQ!&`twMkKkoo8~P^Jd1R zJQ%U;2eT1xY`PlVH!aT2emc|_v-S`FM%EP2?u=kM3Wj$u=I^K9c~ToPen|v8&gf$f zShztk2GhHB35Y()Y7@_EuLTSfGM&xep1@jrz4bfVF3r|9De%zBYw@)@@K#$o+T0iz z_bTEj+EGt~Ii)y9YPHbH4zi&S;-Fb8eU8b3tsDeN6?=R&+wE4nuv73Toq}+wd!~1p zTMGMIQ+n{&QcUq7maHe)5u>Ip{&Umx&=b5gkOYMg)b}3N5ibvWdhy5q^P|@m7gB9f za|bM|6O`TSMTqzV4mun$i3PL?2ed~JYbH-|7t!^DD1pk|$t_@9MvIt=Cd1Uf1fbY# z+F4YdS>EPb55zD4VmMxgjtTzW0r~=gzE88`(?7(AJ#j!^5mtfp;lST9yZG*akH%bI zBH+X5>%&(Xvg;j??UfhU*!ysQ{V@v+|6G0}*zv6g`3!%ZecG%9MgYCM{#$b~3e3It z|RE7x51Z-TE_{ebRUY$FkN&xBKn&wb?|P})9GS8y?izE&Tfjd4{kW3niUK0Dh4 z3G2J)mM7Qv>ny=wAQ&X$5MJPXFmaHi<#mE3Qnz&Ee{t<=(MV-6I?%*a!8GVbHCYqV zgM;;xQbeB|{8)IUc8oVLC2D5NCak5##)@=SpQ4lfV~dd-oJ9CNAZh>ul#vNIo+ts& zEx>xt;4i-TDVaUNrO)&lhR6i+#}W#5`I&yKrrOlZd00i4r_hHe{F7Brao2_`FR75%*X^AzksOGbF`Ktwj>;x33Qfb(;(pRpB!11#5(Ki*?iG~{Ay-Bkn`L&zdF5Q5s;$sbUuc_`ZMYg}2 z36%#rmObN>ih;Hq`r3%?{D%!Q5wUnenY){qm`SK5UpCS2gX%hO|N9^@;936zdL_Bv zdBejaP~3}RyZV^Otb_e0es=@jpzceRj6SZqXUITe(;M2NLNe2F@}QZqF*QDx#Q$N6kS?97g;|K#QuQrBP zJ4)1&DE%!GgD);kf={9NByEY2z}g`X!(9kqTy`ERCi{A-rH4t!7<+=F{eLknu-h{k z*T+MIi}@1aXNkG5PxD@}V6|`te%jzvHNT*W$LNu0An(pwI+?0SM!C|MuXbFCUMYem zpFqx?egrTHTMC!TtO>o`F`>U{Q*WHshXfCYNb~h?_V3Q^b)D0Hw!vzqxb@0y}# z-(Z9i6N|ra@MSR&{SD*c3|g&~51$WQP$X+15#OzGMzF;Uqhdkk{rPI3iny;5|J0)M z!_U-PCZkY2-29p5Ot)jA`S`8VvsVqs!>h}%X6Y-=#Tiu&dtQH zo0E@D<%cQkj`U}PXUtfd0;n?`?je<1x9U%j&=v>`@6kqCg))4RFNfIir~6b)nMJwy zM=T}uA*E+K7)VU5x$DGKD)}eZ=v3;key{Iu|AZqJxt!hlW&HtK5=Z1SO4y<7((_u| z=%}}yF>7HlWX7Z6 zPCpLgE6=fV2qN4vlooPzkKBY3&=F_xRxgU^=57Cr^injs)L2ZfzUZRh0#z+S=4g-# zAzsGj&Sz$imb>VmC!>EIOOun31tV#9JfABckNIw#mD$81eM}KI^7J0bGb(F~ts#{7J*u<+n_}-W z1L-YNJKoc!ESrT>HbTE`{G_-Tq539T-k8KFz^5Jv4FB-;Jq3PGLu#*0_@Hxa=er7P zj~c?|GlZlFxJX&)cS;Ephm*D{a|sYtw#ul#w@)>UD+iPH##Ry-l2~;$Rm#F`$ioWA zMS1b*f2T1#Ow;qvm@QMb1u)#8x-M8yr^Yc5W*ZKgB<=IWcw5X5n^wKg7)s$uEvP*) zo9@NG}5k}s9q4zz~p3F>%xzwrR}wm(H9;Od5hFN zwW&OLnk%XCe`cy9>SqsqV=$m?}-LV8~U#dWg4dz}M zx6i9*| zH{IhOe4~y&eTF}8g==ooHJL44t}U;cpLagZ4>HXonAnHypVvXp68C!tmq2~pm%@4{ zV9!fM$gY&V7f8LyiST*y`Ll>Ts>unN1;t%w3OnX^g|@}JvSOz11vW!JL@!rb&1mDV z$4T|EuSRkkc}4KPq!Z`?&`Kq&?l;_Us=fWpdklLRSG>WF3h!zsUC@bX_M8$UHFtO= zY(v}aZ)JZidjEu}@O=fMU9R}(0F5m0(6I>7n0awk!-;vnJn%g%CPd zL&aSS7vG9usc%ZPmld3=BH9(T@Os#?y(2{k*~fc1z_qx^$DD!0mZAh-23D8E?%Hy? z(>tGO^7z^zb#S?!^x&q$sVb1H;?&w~z)Wftt}>MOi>4uOuTYzl{|N1;0;>-99fovw z({@@hyp)#NPQ=qMW~?`Yc^b$EIT<6!BGdqoUwSMlt#=z+wJ<1+c{h|6q$F=^E-XxaMVLlm!fR4ihC z`z9yV(DVL`42Dt=^XnfO4M{9}h7Bm}2OL*JDNBNb>Ryj(TWrJ_M<4$@^VZM}98aT(Vx?&qW1Ry#j(Hmw8Cq*M6UwO7{d!x2qqW zxX>PtxzEwT@xMGAtuZih#J z+TC(@L~gVHWJG@=Jv0R$!9rG<3^VS3Q@q-JxtGX2b+V369WY&T+VqJTo!GYg;qB8& zHIvo~Z2RDel;l=Q?JCc`g(tMX2_XKK#`@VZ9V%H#rz7eSne@-Kr449Uz~^jZRJlE< zldjvg?$c{FFp*L#4hHkdlC($8ez!y>M1A1l1yK1rx)DYA2l4Z5w>PzM3YL6jMc7m zn@TFTuCWcNycP)GanZtJEeST}&=koRH2-_nr=Kx*?2RB3EVv2At~y|LQUb`^dCmLu z2y>8^u(D*kuwLwlyVeZWBK@kh8yoTvbfe zFGkMMW&3eks4l#)^D|(^6qXz({KdBmm|O5YL0|a30(N&NHIINIfY=*gHk|J#zwpS= z`~USF2Ox0h!T0q8I2*F6CZ>#VQyNXU95WV6n_sUQ&_jSfgxuwuGjO~_puqU>VHvd1 zHf!xe+(WE}-hw!w`-}JEL(pMl9u~j-gF0X+Gty}~b=NUDFA;7N9i-Jn6frS#@R_ZA z_F^PMI5eX0X=!MV;*RObAu0rG5_Uc261R~$Py6U!vDE0tmkE!Y7+5aFRSH*6J4wAb zOiQ@Copy~;leix2eAG{;R&QrY`z4y(JNC}6VzMrh3X>zP=T2V3L3Z}x#;g^U9)tC3 zHM$xDP8sTAc7mN5%r)s(z&>;QCYUnoNV&9Qb-I{&Kp$0n`D?0wWI>7VcV<^_Ea zW~=3892$+g1J#LLYBH8O-k1s+FvT%+YNvXL%1ul8*%8aO6`XcfP-d_S>@HwM^vOT# zPG6FFlxJYa;51T{)al3Dblbp5Y>o7a%;z)TYFaFFZ9b&cJ$oo|0AlKKzlC3fsibGT z5Rf?*aRu2%TrV>hL{~$Omp#4gey5pqXuX_7SV)>mEQW5u5rI+_2a0|OV8Og5L=pFH zi+RY{3!xR1v|NS3+yLEx{KIbuX^bMMK){Rs)OX#00}zI1fmuktpZcTnOh^A$I1Rwu zy{}#1ldsp8FWK)8lzR1(vk?3^0`n-Z{J8C4S3kC9f||@g~EtzaU$a*xHmmHueRaA^gbq;*S!2$(iGzc?2tgf$B+ zq9LQtUwfdp;I1@?J0?`9%o)pu_|>_A<;AG#M7D7LBO)kZROcxdQ!u`J$U#0qD#R{? zx39)#7b$MuhQC&B1S$50R!6L$J&^ggHH%&&Bk79<{uq_Z2;!-a5EyL2wjnNyU8AaS z%jQ*c6~UX!0{-gpVoA?w$627jN)PY<3i9757GKl`w(w9x%C_xF5_CXq`y}2v+ z{7G|W;Ks1_B=hmbYLa^o)o5}Xa@2~~7PXC$I!!Wim#P4pI>7}xmjH!L)#{5qdeHz# zZo~-YDt-Di`=mHX!{Qq;OsNNSyas-2CFwN&C%mqg2w;kLEPbJj`!U_00Q!s<^fTAV z5>r8`MA2yF=tg5ztsu~aIMaUhXwq7{;FGKOgxa=wlXv^0yCH8w#hX?|VP0$?e#6(> zuLk_Ts^JBqe7$^zlK6Z_n2*1o^n8=wcY!W!pLf1SO7jcu;sO6~IkunZ@A)zObBBO; z|A4bU4-v34u1{v`nfM@H>__hN3cI{TO=;{|znb{cMbr0wy~Ye?33*yXywjvI#iB+NKEZKL1k;5&IyeX=u%(8}w0=W^4cf}7p}3)S zv+^Fbq9MV=)3s)~Vy!?nu|vJ}i@LMNtj(#Y(fRQl%;0iWdOnH)=}<{@a%Tm}Q6rt1 zhv7Qh@gZeIX6F(%jE<3O2&35|_8eT`D~7VnL?$J}qbaOQ?{4O5XuL%*+Q^Tt+AskU z8>oIdp-|8}L`SS4t7qGmX**ga2-OZyYz%a|bVVceh)G)!wTq#G(5balCp(7-rr>?s zD;dF3gGdV?kEh$t?1FNky*D|9eL?)uRfyu5_mLOp;O^spw#Y%4SFYg*qCjVRW%7edIm;`5X<}W?{F-#3e0E0q6E$jnv3g^P?Ql%01)-53R z1*p9BP1>vl_7B|wfU6GZZ~WoF!+*ZBVf+As6X{Neo%lUYNICbf+|N&XpRZhKUH7F% z3w~2XHStJDZpq$P8xBK34F(c;f0@v!bs+gRR%0WwEC<4C?V-MqcOpLtK*c2dsz6YL{`Ytf!rCmqX zSB>y7@IHeT{%;XMp161KipYMB+zV%I2%EK#_4WZc@HQ=Ni@^dzm`4#KK zf8vxrv29Z%Sn5l(2V#HV1%jwvp!(#&`O|fKJsVtF<$4)z2QG0c$#0ZJ z)0(`j;*_&6io2IKIJ2tJk*&F@r z!%*4go?OaSOsgmA*vIGKm-i;pU)ZQUi+4%#b)`wnmXJTf%UW)tG-LRCUj#S6Qa51X zYwyD@Vi)k+vn>jc-vhqB0B0h;s|G4thU-S+HVdru5=hHi0Lh_FoJ_geT)on`-X7`vOm-736z zLO*QwCrF~$rSn!hS=7Do>!i>c`6^k>V8R0lybm1K_)AFW!DuLGbxb-Jo1H&$QooXLr|n&y+J=S?dTFFWK-v zaj>Q1ut*KjSFP~%W~*zrY+oqo1ELQ&tY7~zXIWizYaDN^gLY2(nWK2^+r?oT^cdr7}?jjp5w=-JsXR>#Fydu+8^0eD(6F?GZ)4|E_LZ}Dr zf6Uos9VJ!>y!Go5t905)`C59SjNsl=_$EzM+uq5u!k}3zwE^kC&1-g5{`Z@$URC5jPIwD;VALPas8G#vG z^@vz1bDu;*oq|>7nSDt=1NAlaY-JqaD#CHjrq?SepRAp|{p|%3r^92i=K64ircdJCQTr9kM-Prul z%ceEIk^D(zzWJnF0`{>6kM(I zsx1orsOTEBD_VEHg1odh?15QSfxN5Bqe!N;ULT;Wq0tg;c|lakYhU)<&X>|+&i7Z0 ztVZ;oM!#m}myjvasVJ6{5dD_QEsnUrH7_XD426VNU!-H4B0u?3r$w39?|H(jUAW z&ggi+Uebvp$ijm4DS@m!nu*7Mp%8&;{=?@fjc#rfL?Rh(=7a!hlgpaS%rhG0@l(L1 zVZ)kPwH!a3f2%TKunfePp-R%h_|GrrFZtHPU0%<}i{zruzBuB!`okcrxWl8yY@u>$G=F6c!zK{Y=;{bxWTwa&_&)BbTJ;~K00^OOPr9i_0wwn69c?b65 z6#V%4!;C#c52eFWqT;!S@b4ec5$ZLPDV!}mA;PPXyKST&+{eQd!Id48E?u`9X*HC*pKAKjXy`{pxiHsve?~xF zwz-m896x*%RfjLM`xwa7K4xXz2wd{DN2#Aiw_t1Ce%=q*eLHu$tJqiq!2(DH+>*HKCn}7`#YVx>#q{g*zl$S9kphF1PEU}kFZ4}|S?witlNbgh%qP~s7+W59#dTiV?z(JHE$)uCY- z+{{V;C+@nOsEUgk)uquY*PZjH@uChl8La*v&#=a^8SIckt)>Rz#=2LUMV}#9v_mEiMIYS+<9Bew-2F^NybupIWx>q=*NSk81CPLm@Y+O z_yy1c9B@~iKiL5BzU_sds3prxDuxU-YUIFbM|?0LgG zVr@e7L@Rz?Rff$D7wE}m_112vbR1?O$!QBl@a&xdX5q~`6ajL@Hz%;I!XfkXB$kMJA{FI}rl-(`CmYg$Z zopxjAvtjRnNYaK%v43A@n%L{fWJ;vNtOo6P$@Xzz)VZc_|MFH1XIXmAyw(0RvtmGp z;)dg(Cv(4|^HcpOklrofDfAaZ+b&MU-RsGAxd2(pe@`UG6B5b1J!Z^)Ykp+S7bLxS zYd_iMdHn$>yEKjXPKttWJxGf4kA0AnnsM6LCS#k;)p){|Q#5(WRud4&V0z-#2rAd2 z_hIXyYrk&owfkC6y^` z%#zWUs%pabtg>4TKa8cPEl{t@#p-4o$a4y(ttg05m&zj zw5m6|yMDZne5c8pBH4{>P&4FGw^-3=`2_JFHXB0W_qCO}j?ceb7&sLB?2xRAYg2<> z=czZ%@Dn&dmTa?YZc)QHyu(=z&+>Ida$xMRgLE7Ky|WAD0j>#{uv z4xsbI+})*?YP{O{3AQp z5VC}iP2oilNQ>4}_G(w?(R8B$y$D)U-Atco6hW;cB>(wqh1H6Z_c)`(qtY=~eqHff z%xvBtj`){4UCf%m<01N{Pc`M4DRX~JQ@tf{+8gIg#D;BMG`0r4gDVE^^+MBqNzMFW zj@|6aQUibjH*QR>T26y3%q~~JyW<+cDId$dN&VIUJR2t_%}s-uKK07& zX`(D+_%AZ{(Hm3UP84q!Qw8$Pqb2;Mi^82j8+ursw82jeq;Ci47i=2P;8yf#+HRU0xu*jcKW7DVr2-AltMCip98UP~gc7?`T@DhXl+1`IFO)a{)a zMtcdB8kAJd*Rp26ymXDt$rh80;|F4iXhhnu@EUHv7p!`=z8~H?VB;d%BtL0P+g0Ik z=|4}_dX8&Smr2TvBx$8`dfAxJUXb$j9`8lAoy*GVgBtNVko}?9R8I5Two=Hi(CXu& z-O{{FqR*Fo>8N*s-p;A^&UFV|#h0RP`R}$uYFCsg2H*(Lt84V&cewH8YtlA_15dkM zoeDiQ!kI(Bkh?GanK%xOFV)&pR*^y!%=^5$%iGsXra>pl8aH*TT~A8s?>i5<>ff8b zNZg2Pm!dV*pELeOl}};T>;E3|V!^QO@HAJkU#fymDWq!9OA*j@7uHb**#wS01L^Al zAEQ9Zm?t2>wLRjgR{*I0`s4Z2m-TZKsE>)|@zP4%e~;03;%f!yoqE{?d;;qMy+Atk zdH}{aaP8rL>I#55_|)syl|B&^X`TJTc$=W7(b8X!w0n||Mx6iRjA}u_r;?U za|-81RDh=sm$|_l6cYT#W|=*LS3XF(EL~;Sfw58J2C)H~+=fH{*nq;OQARFld#TTh z!a5so3w(n)Q=I$JmuZ`5ex*E=*9%ct>GSGCbLlFYGJp}hc)cqD?49&|0;+Ol$!XQB_@T619R_%!NW9D0z;u9 znA~hDobX^(PvAo`VWIl^8$8R=1Krg~fc5V@F=FH^MZ+6XR7n$)lidq>I-Gwj9zt0GjC~EHspxNq@t^S@z{DUXToE;^l)ty&TnMIRSJ+}- zJ>B`@5vJ-fm&kAZgpaJEg)sn%$x^MLBf7?O6SN=Yd6D zwH~`-Sx;chDjNSfo}y(;zoEdpC(loUyPFs}I@_8!+ut6i!n1r5S`;ev65s7BV`+rT z7=kk-^*~A<#B4p{hjfD5!h?mJrj|1BqWv0m87~65>Xk;dgxjAdLTa zc~tJ}f9*03P=EbbUTsFe+2JSt7Jx6Q8-4B(qu(HCUa0?+K8Tg{E~#!=^wN;)mYS51 z_}cHAfgTWbDvYguM*aQ+N?L2$R!{5@-&t<2lto{^IsoMK(m)O&QR`iulgsD!4&xd{IZP->U`nDTH?mhOM4r&aDx6l$mQ&>6zs-5tg<_IO4_-gCvRd(f`{`pi2NGB;ZTs4cZp!E!VrpR4YKDCts_tn}}C+;ES+y zB+!NATV9>21MGeMB>0wEOU0RFOr#jpq@^Qkp9nnbJ@W_M|%5v8<~iLp`dS07xWwO z`uo(6!7pin@;4#jHXi&!_OHMMBG#aw-=vhBu!M}ik#8>Uaq7?meYbk)89b|bO?6jy z{_T7OrL2TAC7;5<=UD{?w4EJN!)+NCCsj2yF$qi&48&(4&n6Jt>9W7xJQL(xpA~-+ zap*A<5#7rt-$@!sdj94i8ngRz{wep8Qd@{4{|cF1;EK*kNXVZqrnQ#TDWepT?ArU8 zw8#kjs@_6CiyHB^ma!`>#=1WdhODLrqk4 zY8RT@>9)*^_hY!F>%Z~U$w7o}J-&_(O1`XfX~i#6hTl!rRn7`0_I8yFRE3K+Be5eI z2Hvj;{pFEQH{B=M2wk!+t@vdYb_2p?zf_3PSQn3hV^1}lWE?3-JlAKXrqDw`bv9i)|`Jc@%x%)6jze)XiV_esE zJZI8tF^yd%48GG36}(gmn}28UXJr($4rLGi)*cODR3*yOf2_oKgiMI#gbcjB+W#tE zOqLMG9$aOBAFPsd+`k|ibQsLIt{T93BfPFNt-Jz_``}9dv7#x2&=B(De*hoHPk`_q z5FihG3Iui<5Ccu`fPJ6zDX$m62SZCKjBhp}#D8qUf9=3wOqwx-u5XKaK=fy>AkhCS zckQ4S;28?b-*qYhLI?6coB{vnm;ASF9RutUxC?>v#;^Q8D5pVa@B3``sHNw8Jctg+ zKQEx~$qRpf-`nUf2|~M7n#l>vG5qi^T^aL3;d790E8R+P%2)gGJfseldPkY>$wk}l zqhU)NC{nat`I)_9aOmbZge#{9l-@WcKJjFX&C;7FsvBqjM1H$yAfJ#dEI8L!m8o`F zL_AAr3$5QDjkMu?O)Jj_Zow>@O%LlLy)yc$HyKj%ew}M@k}ykyp3CRrb2){%^L}Pp zc^ZTp)ZoT$5|}C^vrLCT z@n1cph9Ln+=iOB5H&g9Vy7!9V*kUCF|8~n?zY|B6SJi>_-1S)=O^73tkT%z=e9+-J zX%I$e^cVN14G{4uJEm5aiMhMp*6rfFca0V&l_`HFU%j;hvKOSnWbkKhLwa_!y8>;b z24dMxI{G7^^EA7un^EKuy;1mT4^&=HG*j2YYr=L^*s#sMbbp`l#0&p;IW0^xJG2)o zmEgDE#}-_4=0Iy>5Up8edcwD{RUa%5?2V9@6_!jnMB`O^`qNwAlTjjyeiHY`Xz4C| z$F$CNrknF0pm!}7xRyS{fBS`?CQ*$BeoSw4f&XNh9_4#&LUfYGN-@s4WQ57Gu5{*u zC@191F2AvfPSUSyhG~FSH@Gsyps!{kWUYolz18>Gk@|A2K+_YdOWuW|wxK1$NKpZ8 zu|B82x1OyTtdpv(xwTJW=BbXg z@W(d8X}N2ccbB>9ZhZiLuGzUjGfBDRQw>6dV5lsQ2$F%K3C!14tn(qIh|xwfYSDo*?!!=Fmzm1 zH~ja_r0jJg+}t84TKnu#Usq{OUjz}+?n5!vj2X0M9_W2HuwNeIl#LXXJ6?6<6UdO{ zI4Lg|b}wYRba?Oe5DmUp(2?-raSj^Zgov)>)w8xy5cbgX4#<=h@3{+q;{WPDne$_G zUm-fb!D66!#?MMT*tH{BaCJ_KQoa_xZYQ}^BQ|}0yjAOG5VwN{(0T}o4$`FeJ5;!@ zs_h%_C9FQFNVkpei#E4r9*!knb4eED6dcBTge>iBPb?f!d6OI+Vd>0=_}J4w;4xK2 z=tW-l2G9pxk60H@}#NB!$8%}xg$RC!-OJ>@<&|Dokw%$4;ddq5@(TO=* zYmWdrlcRumIYP1C`Jn<|2ur|!Z-BwvcX{x#Z!cF}JdpVbC<^j5`pfsiSNi_>I`W(B z3(E2Dga0;nmm?DR4+DILpRH|MllW>m1~nUXBu;-ryJ3&G6>Qxl~VCSyCDJ`=CpF}|OdR)_)N5? z<#y`*ai#&QT4aGLH*G}8Lu**z9{bo}MDbQc3-w~D)Q-fIk{PvEI3lHhk{4`OQ0x2( z`k2d~(p0hOXZ*-(f$uD%l5TKiyw!Nxp}|jip0UlIQ?=(wq;SvIG(8^D;*g4qkNrXv zN^JruFAAvAW{p1Px!mF(yZ`v z!xm972oUoYaBh+rlE z4?0>LUHIG??TUv9S2BD|^3pwYc17YJT(V{j!@lHL_R`NYhLn=aIm#7Vz$plH5akXt zHy;t(WtL1wTTl@#ST>=t`E?)68QRj*`0MUSjm%??5wXKba|kgt_CA51bO_{N`RonC zcNSnmx)i=V6C6DLDjD?tPEEZ?zdak=Wl!?+tMPE+jRXpNo2_v$8cm4E8-mQGZ$qgI zeGz-KWYj-GYG77gh4TN}*(ERlcmos>`2hsZe)lP`7hfQQ?-#$k!`7+jH|zPwf0P`~ z8DGFUaOk@=eg4<;&VfIk_+4HrfdNaz_5Z(~_cjX<|K{}kUmbokp+D*Wx9s=B)bal- z!y6z3;BzGX8tC`MuVMJ)2?h95$LWgrfZ)ggDYX(hLP#O#96+pR^-}rpvndbZ-$|YY5Mx*QcMEVB5f&d1{`8?hz&Kjg8nK>7y#*SP|Vx>7*)@Sf-dETDAWO4hGlmbWp4<`qJ#IGkE6t3y3!hZmj*xvqK?d_IqG~+x z$ptl_{{ikry-@>3YqSoRn73RULj&Kv0+>WLyG%><* zZG?i#f$WZxut-PL`M(jO-a5atyay&@Pi`;t`0){kHaD(!K?X)~9&s}8=`(q4j?T4ySEPWiy0>e7+ zYvk6qMm?~$Ps2bguFK|~QdAT?pbWV-u%RYUhE_lhvbzIgFEwD%LDc;Iy(}hZq%QvP zca$}_5v|>$5B4q456Iv9LBJT#|HAJF1iJQ%0cH=rPfNQR2em+Qr)s0_|DBfn9QlBz zxxgRKz6KEdB9W4XPUG1xidgArNN|c<%!s=ZI@f>DKIoI(Kfa-4k{Q{Wms5N~WXjzQ zbPP|0#rZnwA9mrDVJ`Jnq!eJ#AK_kxv%JCWh3f>&pV{JYpjwE~EdvLObSL5#96S#9uaatJ=dTbxf& zmqlM<1xeb^@Bc!7h1NzZUu=NYJpG}g z&HG(kOqeN`xM8#=QX{L{9+D&t>NihH^I+9if8jaZ z#|f*y|M@Z8pM3$BzL3}7>Zt!UzE@uZ0HpTk6EIVAsqDWE=RcyoV-^tglNm?}e+uM> zVY~ND0T6r>Gr+h%mwkg858waW=dv1s{0|?wpMJp6Uw2gIMeCUKgoT7Ak}CY;9b*wa z|MtlIjd3q_aIuCao{A6on93!9OuODLcYXg_mWifnRRxJ{noGQ5_C^Q{_n|g=5*?+b$M>k`pU{0g5Nx|(T#++IM{3lYg|W2Ar+mh zz-wZBc>j4lwA?$9zdTDapun=Vs-76b~O2ni7#J$V+%RA=<`)%JgQvV^P{9=OaIVg6=uLhve(#c>C{d> zwAzd(<0MqLzebcziIIoesHpbBjY+id-#J?`i9OV413wcFt2Kk}$+p*!>Ys4U2w%h` zXy-Y@!QZ-qx5DJgjoXab^X=}bPa#)M@9HU)|GWURd_aU#ezh=j=Ly1iD~E^@0_9De zIc}%pTw5(qk0R`%mbG-NU1_669N6g6UqjJ+;d6*m4RClDl_UoAy}weCc0gNIUV;}j zjbkaMDchOQM;x1k*MVwp*`> zjiqfd%!pInwk@FR0|_NEag1vv^D5ta=@wO@@u0aTcqLAl4bNpn;MjD=_a$WekH^MT z(6>u#$$NiseRySVmrJZ{uTG5rtEu zSlbZP3g8m#vn8lTDf(~eP`s<2OGf4M(!;&C#uL@a5Fb6xBTu}&ETCW&q05vqQ=m?xb{HG|HLF^6cWI^sp5a3+)3 zw63qRnVtpE%2j@Abyl561Bu%%W%pQP4b78tS-;_GxLJ_?UCxSJ1WppO`fHWeG&e42B1F>se}`apd`Fff~EaUchnx%1*E(+T4j0k2*pfS3G?3F0Y;pg zfZ`uO>PKJS6aJ&Q2Yw*kDiToo&6lzXpbrCl^ZgTebK>hsY6bu``x3tZqP_FC^FQf- z|NXuz75}6!2A;m}KHcU5F5UoHubb3@4;TU${93?TJHW^6hy23n_tPHT8e%}|yzk=& z!RnCjd+zW4#phP_fD?MV9{X|t!8>5qvVSDL-SI6=NDv#rNe|}xnq1)09O}B> z(hglUBh!6h8-kKJ>r^|=Xl+vWudBS`((ZT!;|qL~H4iEq^meBp4UO)stJ%et1zS%V zb;_HH2a8Vt2acsV!|PG%$nuKo*Fya!3~l3O!M|YAKv$ShG4c_-s8CilP>Zh5gF?*@ zkKe6&Ar=|^f+;AfEmeg^obc#1QESrkch|-XjW*v=v@d6m_L)iBFz~j+rU7{7g8p30 z4UG~A@QBo#&as9XVAgcAO+xxPo&2*&hCY6l8TdrmG$XRMduN-{nJoDB|HsrjM%Nj2 z?ZUAen~iOA$3|l{Nn_h??4+@pq_J(=YHZt$o%`f@-gCb9`@QEF>mGZ|HP@VLUEr0* zOit~|A`2V(vP$=ox{yxvB$SA9&t)sbQ-?*!#mLjE;qdJCo6eH<3-%b)*89!QiP;Mc z!id63u|!HjK)(Wfn=5(eWoZgUZ{|nbNs6TiI!4Hi9BFtA*a}Lpw(-ek%h9!t7tsz% z!WC~H?SK+oqmXlSi0k3EnF*MC+5oVsKUSAHW!hHw3U9j6*ZpSEFXhUo)$HPE+fN?E zBG*5LqOP9NlK(RQVIEH^<_j{SHlRD%MM=8Ch^fq{o+5gy=RRh3Qdy0^!I`4f0jLn6#}rMVzZWxk=#@!&c4VYi=)VI{iw z=f@Z=1Rn%Mr%&0Gf13Dqrc)C{5Fvajce8u7M&aqu)-h zh2t%JhpP5)AH5CsT(ogjIyUlx?~k()zIs$xypuwcBKhZmH+_;O=f7*syeE_7XU=;O zUNVELp=0gS++5GRh;!F1tOUH6{urn z`@X;4DR2fZo#ivo|uAM`O*pVylI zDK-bd%p;(W@>!MMmA>%&PgT-<`$%2SvHwm7NCCi7-ym|Km{;Ru)JyDjL;!rf-R6qn z3jW4d{@ZAVnTe7aSosl3@~AfW6m)cD*V?tHt2EL|g0ebBayjF9h!*1STxH^wJyi;1 z`7W*C1h8gW7(KC;T@~ABm2B%ZP~t4{6O5g|EHOMs4qwPKmZ&u9*n8QTZ(kRsAG@?0 zM<~Uaa1BR(^!w7UZ?f}YK$cTNYURGiVi=#0uwQ@%49hauw)K`%g(-s3UQ>3gk~1dy z@WH@yF)dqoi}uC}N{1%+dl2c3)q`>w(`bXNkC^w)>n3e@NhNkIyJGh;{t(ywA>Dr@ zeqiB7Mi1Y6Xt#Uk#ll}Vk9e>g8eP~LIX5kTg6*T-LQY`T-V$JlE9%*M{(C9?74>h8 zA`H=i5}}wMVRT!o;$ZEMMj7^A+><|>f+RU=>LmO3-+HA2em>^s{+MSr2_ONpgcvf! zEFV%K)PLzMc4CVpf|O@fr?I=2l{)@_qa-rw^o>31{Z*Wz!8s+r4zyHC60?x7mzq$=VFo5Q+z8By_UwJ(QeRJUyzTyFULtjXWUetQMN8RbeLvd zX_f3Ahf#Skm%tCbs3iTkOL z_o=hK=G(mTJ!7;)h)~kOh$opL79(4D%)%a`ZZTCyq?m1Lk zTwj6aPA`wnoyHtZk48t?8RS!+^o24YHML1oo$;>9%G3Jj@t{KEUhinpervgMqNqNqE>3Oy{L-~Y3u9{|p3URrhrG`b0@^t5h&1T( zI&)pWb!GI~$o558k3}5VAN{%Ua5Gu*s6Lr?A;_x~z)m7J9#AnI>!jc)E5srXv*d4P zkvDcCxQNd&+Wh4MnQj*j9XiA(-Km|KPcpqc=JTFJtj}aqU_=Guu`Z$(A;PO?8>AXP z=Gw2_5?b(ct9Z|PY-y&NOAwrYu>dhRP*win~VuSowjPIdv z7j#Fy*}ss+k_|Vc1)iYe(M>O)WV=beOx>wGRiIL4C(eMSv#o2Xp&+~qI< z#u2CFJ`~$3V{|5#W8W9_xb5yr=*=v7@Oj~6o3e$wNplx_^EJ6g^>L7ohS24g>Q-Ol z6l+F8$o~FvjIfMf#Rdg&ZFJEUn_q4nPk2~^Udep6|8zjsNJ_X5-v8%Z5!nK~8EceI zQ_nj{`Y^nOudzv{HZ$?n}>>h8=t zgUi00vLiz{DG4ZmU;DHBlgo-6l&hk7@LJDyr(}{O1p( z2;)GHcuh>oJPJs+;3n(#m(iAXPAD`<$THvN-&xWbO3<6wNWBP$(UQ{TJ(Lmd$32|M zH9QBCs%JxjBO^vuT3)8QwV1=*=?#N<*T5zr#I z@#yc*1qKAc-FXG`H$gM{*5`1O#S&g&i^2}71xI#lM#6((H=;1<>@5c`!si!McY@PO zen?*rCFZ4AU+XU~RIM`!USp7@wqyxf?8j&PT)(0X`*XDajnwIJ(+xa(3#*+U8V`SAtli=$s~-3}oF9v6sIm zmt9rkCD!%7;5JLl(`8;nimkgKkYp6DH&|4l^(=s0((Jw$Mnib3uGM5oE1X9H6BCX; z@k-gW%dptt8YF!7Q$2O8TVJbq0Jn{ek1)$>o^YlgwQc^M5zN*^X=%*ALEbL?g@~`H zox>;XM*Q8AX-X|k1j~5iBTPbM;NtM@D$Cou4*R;`*TPP*xD7=pf9uaVY?0T0)EOq& z(JnOM$1g06Tokp_g;ddeT&}$-LSKK?97BseWP@LV(?VF$icXgjxhvNhZ+kyJ$_EwI8VN@%bm8_vqG%=V)2bGN;FE$uls9smd6I~Ynk}j^ss`>*p8yfwc!;j-C z=ZLAb)I^=+rzcLIyEB#d#-SK>&a(rorjTdF%>h_~HdXu2)SeT=yZQz9BlRXEBoMyO z;0yD!d0g%-;GgOkA!&ZwS?S0X_aeP0^|6q($tu)1{Hw=7K604b*;50Fuoh|_Wgsyh z%B+z+*O6;K?1sl{#?s9o6e`!2Z1VP0R1v)C!Q<6d{uWP0NnRn|p?sD`I!#M~HvBK7 zG)?SF()ZkzKZpdSzWqZb;&vlSatNOgEou3Z_v@IjNbgRAYN2zt0hs`u$6`lwYfDxJ zLrXSajbqHzHJvEi{eqOb-iF2qOI`ShwSPPCA1A2`xeJJ&R#M|(x#RRV=b(U^E;gL@ zs`FGA@%ucHj1~ABs^^2}OoOQ6Dc!SujW6m$c$zH!c)PcBKI*mhXm8klLMBOYo(luH zzp{>D?JNv>Csys{#*i@lJF39V1UL?Le;)A@ax@B?g=1cQ^_%Nrp}wZVv61k=UcQea z#7#BS#X?gnj`;SY7-uS>7?0Vw(exc;o;qMiLYNG=uh!JlS9@N1=*2aM*H`mpn7Z1~M8V zss1z;|J+yqr@BC;T5~1osL007`$e*rFnE0~IQYQFeUM@xB zzO-_XDJh=x4v-I`f+?rZFUMId6l zN__Uc%;91QnhqrxQ^d7Rw-HHf?K!7bKuKfMou5;#s%RW3M1H|cgL1>cNESn3kKYFQ z{imz&Yvw6{vMp)$Ks?&zwu8SCY+@=-(Mm&3pZQG^PUb=0#J-uHpWj=b-3PV#*9?}U zUisDa_1WX^E8Zzb-lJE)&*vJ?%iDJ^&}Q|$&y6>nuX^sIK}*EpzCH>Tk-yc-^jz=d zhGKS96!r6J{>mSLFnlWyUF9gK*~3X_()FQ3&{)8qa5sbR)E1Eabo2 zpxSWe^*KUkco+=6`1ENF>^*DLU1`gXtCRgNzAA@yYkU1d?Uv}On#QWbQmtb$qm4A$ zhH%a*R&yB-x#Kyf@^lG~3*g#Sd1eYLGoS)ma#);ZVU$!X`CBgP-J*+d-Z@`Eud2?UFF|Oj#^f%Q{MxBI>C;Q6%X$@C$hYY zJ;od0EkQY9@U@AnOW|u3wkro+H#%3TD0stOu`lA%^QngGX?jQUXOR(vs6^}OXmPD& z&-D9^Vz33xUqX|n8+LbwWG#1ytN41npH8;`M<>$CWill&(9o-*j{BX!*mC&aMJ z3o$6?XXVJWn5(Y&K6bG7m60t-(Z`P^>6~kA1CykJx*=~~0PnE*Vh0b8ZdFoE<1xI{ zjPi6l>ZZO7zNT(CRVu4X!ueJsv7cDHdSk;8G39)TqrOp1rP*hZ`Rvqo@dqkDb~Foi za-QX4x)+^~Rx@9S%O2g7D1*pR+8es03#SisT|*HBu2GTnZ3F`fr&l&vh40euTHUiI zp&j1%-FNoJN@_UUIP{3oEBnL`t~mjQl$jGxQ$38VvhB%$y#app^Nf#kYy6=1%=y-U zhhJCqQN3`;BtR#f>Q>y84gng6^rH4QY z96j5p-Ej-91Eps^lcyFnQ3&wW6!P|*$!*`}d}ck`mKg}=jq$G#R~zb5-zQV-TapCh zP3Cm%-L@47hD9{uk{!gwoOq0$;*BY!p^1Na;Tbub{9DJD3DMUbrakcaO76fJ5#}=A zs#D^mnQd-ro6dCM*}f6uFpF=$)?Ck*ijhK9UwQP&3q2v0AL#UJ7(=&R>_$bsQ|DJh z{*VJ*g~N-`|CDp6G=hkVp_38+UVWJnvaW>~uOytK&;IEVatN4veB8DGK|Ftfz1_fw zw;ZbvlqVAQ-xGj3YN*=LC&Gsp?1|(bfY${K#x40vfB^M;xv=6ayX76VMkJ|CTcZe7@OFIPCiGS(@#8em=1xgMGlh`KKIp z0~Y|;{{%sv0XV4`KoF5|&vQ2*1qMKVfW}|qhTlmB{&zJVAnJ3q>AAJI|Nm<5)q9WK z6x%=)BGrvQJ`e?V53u$}09kOqRi{5)d7Z)jfwlqm-#`RmH$Vyge?R?+gaLlyzj2jK zZj}EUS6>16bbKKG2D)2*kO9Ry0PHDX6p8^{d47@1cBcS4>cPta&;&nX0M-w{`<-O_ z4Mp-RTaaoHOg!7X%!Tdm_30GPdM9@&`qn5DK>|rm_TG@JccV<(T1gRsW})xK>pv8_ zMZUI8IEGbvxQS)kygR6(TIOx+`rVbfEA7X<%OCZ#Wo^xu9$0gPqH_3vHC1eht2GvI zEYR1=jaHBlk}73mkJkHd7=db%3TB*Y#8=OeX+Qa`22DA!*ctaLE}(P@PRYVII=fl8 z9~do2uq}iMX&izmXvHtrmw(e~Vm_uA$2jwJ9VT#8tQUB5dy zzgsUmy0Gs(?pZJThnC;zm^n7!F#la^wv5m(W6-9tR=4h*Z}8wCH{lu8AasjJ zNZZ8Fed6E3*KeX}Ip=M+T&o`JHtaS=h9i6%r~|PBZN2SW#8y5In7V3iru1(cE`=~$ zpU@;9Ajc4(h7X6+(!zZD=&t#n{DD06HwsO>0_?d9j{i5!*uMk3RR9D0Jka&uJi7sX z10dOdKN-xF;w=NO{!stT*$RN-!Tz1P2f)FO-Um4T<9PqME1y7*j`vxgqBGlsU~ixJ zJftmP_z^_$^A&`HBmWupfdf7T{P@h-wNOdJ{VkZ5AOGEMp^u=rSCBN=JBW!XZ3HMi z{Ey4o=dJ|g?u0V_yT=`d?)`g*yRSh*fIkR8JO>PeNh4d}lPbPOBJgW+9l!saRdLv} zqAF21i~mz3C^yjv;UtK0%)cpfBO{ zNyJ|h;Rcd%Wg^)_C4|C#=R(}K zqne_YQ>4x2!cru9KfB3ybi4tFrQ1}+ahlApAA(d~dV+$wlts$DS&%ed#-o}6j%&Ye z4^}S>;*PQalkm5lD-}4Gxd09~4+&xmnUVoKj1wU{y4)i6l2m0wYm&nq{D^lxhlTVa zsb`Hpw`*T7sP*iwBzEN&sfC)XALDEOnIF3mj!v($+j6lWb-5Wcv(bJ^t?~?VK-*KASF*D&^)^Aopoc9o)jb+`_NsoMb+K4|WkfW(4Q4e)j0U=lK1E{L5@b%CN$Tju zUE&HE9eUDRj(f}5y5zHjiwyYV210M=!UKds>}OeFf*jrCwnKYc-DWqFk(!-?-fjn7Vlz+i7gJnSiK^*(CsS)(ZZdy?VS0nKnHrU7$gRYj9kKu_W+WoaXrWL z8$SB%{S@0Buly(EWShTu>%D)gz^!~yo)16dRC64b?j(HB?^MhLbT|+U+(wd)I9Md* z`H=PAY}(E5;aajVWw*7}Lbe36GsU%Fe%Lz4cW)Ipy7SIhtQ<*yTcnrkeXE}4NJ873 z*Wd8ChwjcA#oa>7gI95^4-jXFD0_*swdgFa`b}AtGZ(g+q1;aA5d1n!&S;Ex zUNpxY5w4`A`{~kLA#nLPn-A-GV+Hu1%2}*Tn)zuZ3C&IVC}Tb@L+}2Du{pJ>F`q_c zD3T?&8RnTW{)+@+3ksD{l^_Yy5>pbBd*3(H(K_zxdRAyn2ph9X1PbAJY?$h<$ObKTdbqZsI=tuQH)PdvfDjII99RhdjHsANaIwZs8 z02W2MnLnICU9POSaJDOf3J0D3FxuI!&UiY5PzF)%TmK>Zx z`^wTA)vS8l28FL4%Nf~09CIU3B~*3ARRmDgU|AwYstR3+db0QtWg*zn*9xwcGu|H}i???VN*H`V0(pS-5 zzmsb5DLIqw-g$FPJNf=3tQ(LsYwYp?ck{=aT5e!+Y&nONgQS1exq`0egNo=~tQJ>n1 zu_EA4pxc(AbP<}Fh151Lky$(}=YYZ)+UE+#SfRylCJNIYp?{kuaCRu@Qp=62mYm&n`FfknKQ*6|HTd&Ar|j7bnL>=LK?}%cRZm00sEHyeCf75+WPsFWpryu z`Biy7q#ETY4Xx7DG}2Z)(4?932F6EQSpZ7P-EqZgRKuRhk^~>N* zJ%(d+sU}0YZOSZ(LJbtl?4lzT7Uw1H4?r<8{Cv$)*afc$)REsa7p_4tf)s9a>+6~0 zJ1Ew~5#rAC?%Owx#cWPj+1ePB)5sR-GkCDB>`rjcSf*x!H8#`~80L$bE3RgnA>gOhAA)NzQ@9@2JDd zL7{yhn#$=-<5=~=t+AHR!yjdXpdJ$G%%zH%wBzR0d^x_$F5oGA!HG@ZP`S!V5X@f2 zWxd?RJ!HaCgl9Bhb-l=_6#X~q`fH0)ktjMQz#8cV!R(*?3*p3wIRjsgU)^~0u?wcx z{Si59@(;YFlhE9hR=HG%puRwm9}l>;HP>LJnFfEH@3;LdsrxQ6l8y^uH$+SR3?t;x zwT*R#vcL70fcIN;mepGn*d7T6=4A}ud&^MOI%)>ac0Ms`wp11m|Q z`^aC;cy@zW$)j7EflIywzlL8?KRiySl9zW*ZImZgRUpU2{Jrd4Jdwt3Yfh@TH-j^e zHVFr4Tnn790uU0$u2s;g8@RlqFQnB5L0DPzPPdPnq!ZzSP7Jn&xj|iJ#G>6P<|PgZ zZ#txnEYz3p4X=R_dM|POg)x2IvRDjBookgTxK>R_b0bvgACX50+00V&A#>!mZ8FqJ zlU};kfsn`mO%SHq(dc#@K4y&B;j%#IGxGeds95?MG{>L~x>O}O)>gtLvRVT$Kfu=SP=9KMB@+2W+`V(sOQ@ymJw9oTqix0|@anesLDJhl3^LN0&m4=yulFxNX+-Mk?a@YvPQbZ~4c#4n`RlEXEq*6* z#LHWGa`E+5PpQ@mHwRkl?9=^ZF^@J-t0GCvx=U`PM4Vb6B%eC#WDPEA+6Jv&@wd;( z4P?$T^L@&?2#TS4;4O35-yAoRL`NsQ9%$;>?zqmdtRIp2G|Rg>WUK#hY%n5?_!XY- zC(whWbuWh?*V>oGhTC98|lVr1zqyUA=%i_A?(Bv-+osk;bQpaw@7fzCuw$rYQpKP%|0D}%oYQ7 z#kuXSrf`x-&Sa-$PWHTs{&sW6+5aL&C+VeDxMrH24pZvdESO1%b0xh5-7$KY{VX_? zQtC?K?upS#_z2#Qb@mu!o=uUw1bfrCGG$E7Qr zmI0n`iDEvIVMbVzGd_RvAw}JM)Pt8*x0=gRE-3#^@1Ln!Me=^b4-dvMSfY(np1qEB zKf(IO%FhFQxjP^M{tb3&*7ic@~0{7MdZs`esfJ#F4_$Xx=X%f9vAqU^sIN zACpN)c6jDWw8~PLue(mUs7{kWu(k9qi(3CUl6I2S?o;kcsc54*z+7-6&z9b4ORiH} zc_cX`)W-)_w(zxq87(Lfs>h2KjMB4{{Q*9HW+OfNCU;cW;lsFq=yt-fD92^Cwl9^$ z!dY!I@mFTHMA+8Ob&g68+PZ`k-nXg8>Y>enp=oDi+$=&z+A{e%gGlJ(z3;wXIV)^# z>!9|WpuJgjzoS2Dy?CCEVjc^Cox!nTw+o5aOZ`Z*_z7@0o|I~PO>yHW!WF{11->Ne zZPRY(u(Mg-EW+ugAt*3*Fn@jKA2egSNJE=XFug*(-4(88!>KaeeooRDNYOo1mVpn? z?$b(#p&e5oouOYOF-2;reeH+9&p+4euRe916!b@oGi`V9{d{gGE5dz(nRu~LgS99A zN_35kyqvnOxwSZ|ck4a=Z#J}=8?O2!`xV55k5bb_yPomBN(|3tz(`ho2PA(0LANCS z8^Eu&*-t#}$Iu6m^3h!e_=f=}{$zDte3g%~ByK z8zGo0U&MaM04hq7Xh$?bC~h6Pipz)qeD91M=4;dmQBV@q>}vMGbtYz3(WPIFFYwuS z@;M3MdUdT^6RX*CMD0DFbLs>vQeaK>eVvy!=M&^nGRZwx6(5=#kF>)JS&6d!7jgQB zU%v1PW{toaiX7+ZqjW7XnDDuD!5gfxgmA-Xb7QMQ`@qmdu%0uRqCNlIq0oGhTE^fc zr1D+>o+YVhy0;09K*JhB78BNQQPeh~mueWrEqoU(x8P^1>}^Uki?~Z-P-umr$1jNL zuYvU6selg^Q+QbdSodi`{Ni?K&|kII3UuBc%;xEZ3}8$s+Vv zJPHbC6J+ZKuxP?)aq2lNymC8ZVac`~?L=ve3PU#su#yt6j5TK*-r@R--1d5ny1`So z>SEZYNl}}adUo+wUhiITU(s4swdEao*4zi^jL{!-mbm!e*OYU;)dy@} z7`^MpU|+G?Ja;7Kx77PQYVr+!LsCL8G}NTF{iaXD#o_fl!w|4-V8^-B==4$7$a$fU zo8?a)$hv@NEK@Umnn8P^fMz?5?Xcsi+u3Aa-}Dn>|0|2>`}Ny#+iQhctCR2~DZ!iT zhnV=M3F0*@K9DuEaaVsCS=}MPT&0H{c?GA#VBt6Qh5NPt8RZ$vxH}Qhya%qfiQ)O{ z6w;(BPk;V)3bYPM)R??HraWSBiW2?B&z37rxKmdD3l=4>Lv}38dHA=A(g}?W4HDuJ zN`e;l45CPy;@x?i9)NH1He%%lPuc@l^RHl3bYqdshdk~gd@rSNyI*un0T;176KQ)T!Gp4J=aI~ zb()c;+1PIpmR?T+4N5OS5UuwV-L{R}=LvnmCzfjYXKM1%fzs!hNNJQL*mCG%Te+!M z$6-3Jg}g69;Rw@g$9PEgjJqR&k^7BGlobcT=Mxk zv+E1&4TiR`c;skjwJ>gQ?{;msz$7!d{aXOw_0j+H5bA~y z)dR0{GJwV~&S-NoL9uf%Cie+2T957igJA4PgcGuZGIy1c{0~Kl(u(}y;NrL&jxWdI z3Ow7AWrIfS5CQMicimW&w4|R3L_g{5N#*7>%VXoz^G?lJ|Fee58}ZloDXEzabaZxW z-YSjW#fK*XdV=!^^o}j&9O!GMhxPJ&`+5H$%nJKMe5WWgZ`mMC&r6D>;|1KZjGUvA zGfNc-|9TqB+PLWPSNthT>I9ixNsYRv{W_VR;RfBd*%@@RcRW3DNglS=ZO@~Vz+qM4 zO}J1!$sm*R>Ax>87)%Za66Zg*KTSD>jk2bM)x2rfx7T-^x9HTLU+hN79M|9J0#cXk zf;M^*^4z()F-w@Sx=O!V5PkB>c~BgP7upsgQdn@Gw`?4;xC41N)bB z?>m+%e5)~hEs`&s`PR|rhrVy6I8^O*LIyUQK8N=WoNO!aHxQd7+M`l66g6N)-v;3- zwD3-r(!AFT(tel9ZA8S{#d1QDv`Kti@Oki!bz=I$bFI^8IEF1YZQbm)F4}V zO$-vr9=@ArSyUTFp5yBvFO#mn;1-u(bj4sKMMkit@yGc>eoQS7AA`hjr8Et^eb$9! zK=aD~4Xa3S=f}MnOr8>7oNu<24{sR9%@cC4cjjS)$E*FF60%W|S$u+&dg2^P8P_<*Oe195rR7Wey~1c>R{h ze0qo78o{P!)4fBnBI25}X$O}-J`|3OdoP(kJ2IuRoGBC>?| z@}!-YKu!=&DHr$p$bmWeDMA5aSkrKWzOR4zUW~$BVeGtE$}6bUR^GF3wBKuURjRU3 z>Zz9DVNMs%?%)S7t^U%{EL>`{xWOl0F-Ax%l%Sx9p<7^j`W)5HWeinj z=<~YXv>sN8%zlb_GHI~CYM+qGR4VeP@p;=F&k(bq2htUIC|c5aJhMNkjC@g3$Z;$g znen}NgH#-){83C=28Xe{8Hetu3s!GE9nRFTDZ5>`u7IfaQ;62CcXI0>%r!f$>jD24 zJ=yP&N)R+uY2JM2i*oA3mpT zEgy1kk+t;iz?OuRtX(jhqKXz5Sr0vlCW_sjBM>QT+;koz%=sSgT;v=z!RFLG>!hIVVM`4BR)|aAp`{7?1?d2+NfXA73#3k8Hx+#9_CYT^cv{dQe?WN! zEV#~HJ_JI_@WeD`t_+7CNCEoDb}tHJ|<;x`e~^t^(OX?ypyt+=wSs9=-EN(Vhq^^pMFTB1m!Vn6*a@YE&Hw#;$Dglu8;nx!he71q->f!?JS z#!D0T%)F=CFZF?$rs|@aOcvg{TAlFB1b4MiE$7>CT*Dn4UTB9u5K1qF`ue%pTPN_M zccw_DfGvOE%<5y&^zK}f#b)%Jo1E4Qd9W#d5n*wNJ%T3SkSvJ zj0Ko%26Q5A_7XUaMj) zR(CyH3nv?AOD1Phgv_mpmV?VbJK%(TvxLuuCb1?S-h@Lh;#<8asbSnGw8UuUzbDx& zvJ%hqFq)E7G~@|nKu3(NE{o)a+`z2cn{E}B7*9`iZ+mC9I+-fbSKJz@(k9?}`9zo8 z5==2!?d8ei&UnL-N?c`Gz$=ToMDTer_hPuZkfAe*p^}Af#xS909g9a`3(kBYlbwIO zetN08uUgV!-|K&Wg}oHCu6C}n{twOOj65WYY1#HWnl42;e#ezyzE=iU8y}u&lvvQk zm7u-$^Car!n@rLmzmrP%5ROUXgI*1-0tPI;9!FNJTtfR1nt&>_K4Ot$^yeJ%lv4d^ z;UV5Uvd5KMT9HAW8=1K$1q-vu;FzuNz!fteTfhK^QvQBAS1@m!=M-O241a4(bIE21wFx4a~1V{~+H1 zIYhb}|M(9O^FM%u^Y3E1H-MG~w0Ha=18`>iiMKzP=N+m5 z7E4%`Hga%b$e$mOFL4vETT@g12oNv+mLd`#%mqLX;QybfiD#ym<5(!LFaM!QgV>Hh zA8VhY9^DOpC;-6!{Rju<&|kT~^@E`9HY)c&YyLcc^q=m!{uEpT-rq>J0p-2|#?U&Y zkY0x%Q<&iNkV_AqI1OI@VJGb%_D3)6eru(}j_*uzEhG?{JK7)n$-RXw!x{et97BywYQ? zajLw_(emy@_VBJi^L6Q@cjw6-qx{Vdv#HqXtclHXtal6LW31cDrb|)6kuj>u+Rbl@ zfQ0m4g|j9C2eGUOz0SJQ%V~dobi&;~U-h_nYWwPup>(|fjlXu+8y?pkq-1geI$73} z)3b?*Za}ck1JCoC7fwKjWW32~XP5L~4^lZ7rXrHveuhn2EY_p*=SUmhfk`G9@CglFbGT;4+ zn#=1{^BWdE7PlLAO7!0tTH zP=>h320O@l7jB*K4CX|+z)1YPGw7NnWu(!S&8dAW0`Jsp6OkmdR)!7#&afnTy{@_J zxmuD8RnNGJ*y%%op~Plr^);gJS4T_|Qs%n>Mi+S^Zkm0wl!%fK4fS3Y_BaVq1JE)oj1qmIWzI4KJ#8Z zSwpU{52fEp5#;WtCF%h4~XH5w24 zkXR5CQ;Bt4;2pK>rS$3Moja7+WQiclI2%we_C1NE(@KOEkfAvG>J;ckcXboH=N>n! zim=pqrU>4i(m{dzC#BUC+7p=@wnFd-w_MX4q- z{9-FYm(UulBI-8_@XlTL6K%@jXjFb828tY zYTczP1S^d}TSwlvV~E~aUEY89hO1`pbp33z`DA!Ue&qBkm-<#`D(r~#UE9VSLcU$t zfV4EDreTdh0ZT3J}edJd3GO^(uEnnoe8$t&jFtOokGYLCIK-(;jW%{rYby4WD%@MoL zWJAK)ljmRY zOhU(?9;)@0jcK@{dfyAVbqYMJz|1?T&W$NRs{KP+3XA*afKQaOaa|>Nd|1!Gq!dhv@E4u?h4z zyM2hk{r&H;hjI*19)1VTq%>^){wGGb4(R^(y4CYaQWDTO6;!D0wfVJ{7Bk!KrdWQW zJiYzjo-ua5#nD?PAj~y6EUDqlD0KXE%k5~L>E^PV$-4r(c}3o7*>oaxOn6oF9KkkE z(csQ;;#y8#>}Zdfe#jXAH!%P^VJFy%te)Y8B2@>iTIzBWOS2Jz4Y1a z7wHW1C1_DanS9jhz;=;|IeL&93e?xIV-} zf6WLc_*-G%$^tzKaCt5PVy9ZRD9>jp?O-jjd&(y&Rz_b6DB`1i$G~Fu)|cmi%saoJO4A+_=7+a=8Q^byQldyE00t zK>aSSxWuMH#2b{@{k!S7OK00@Ws}r;%N=<5_%E(XYZT_vXzFZ<{2&Gm-bsVc|2Mfm z5{ipoam|#0(p=w&#TY;%@0mpO`$e6VZ;ej$n{34( z4ND>u$&tt8_lAuT&&8%biW&4FIqnZa_K66BYauCrw0tzREp*L#H+p!3GdiK+(dgh< z)_k&9lH{8KEznf+d>W;g>`vC=Ug{|agjFGjPgs)K>a^2h)(S(rI?x8lenEE0|PEOXAAQqpZp*RI;)Rbc% zI)33O>o&>r8-bMG zdREh`bf#4ng_~j^YAVsU6fS-%ujkf|Uw$_h=Is+k8s7rkX5agVfCK;27OQ>YL;Xv3 z)`7Fs2g&^Z?Xi7;1DpT+l!~La4~V{eDpq~_Y>HnkSWEK$zaVJ=fYyZz7=%3r3Zb!{ zyQKjX4**Bv_nH5eFD~(aJ8@}!AkTcOMH+~U!_fjtgjp+POw9_yoq4S2<=(W<^$s*F z5VGFV!uj0n5=#QYgVJ@Zm{dH zo1m@pRVCFqKgM`pLZGKNA0xn1f2a)Yvr}by-U82glL1_dLz}Eha$+NOS_;OpT}i8 z{Us^GEYIka6Wqf=(db`&(N0A~dqPvNX=MLa<*{A$A>41)PthkY6%e&znIb{NI$NDv z=bCZ)JBL#Zx+*0V;^FETR+I$@y=iH;j+ZA|1$J>kYjo+Fw&U-oXsy0UW_9y>rCl_6 z)390`^~|-6PpC=go9|wP@_>xUWm0eAMN^myhKMeQ8tNXw#PZ~z4HbOpgVhVAq0XV2 zEL}CPCb?g$b*Q5sqfs(4OG$jBJsf8n&N;i2YxR;q%*dugzt#5bm92WQZ!)Di`cj;w za#~$|#io0W!J2&7yxQ!8w;n;oPA0dxqDiYJS-5K@_%LPZ%cQ)m)a%B;S<4s`Xo5278`3U5N zI|J3rx-&m7rNRD!{Q-V{kUIi60%yR#|2M?V1CX15#FwL&9MhDA*#Cw(ErF{4t#C7` z0FZ}(a{p+h7))hRKx!wXjBIzE=G&l>cM%@sr@7g(0}V+9l8Pe+ep~Y+#o;$r#MVL@ z3SA7;p=B}HHR&&ZX6alibaLOWJwTDFnp>C_R%%53^;td>QE4{Mfp+2GCHVlel%9LLN9 z$AIc!yW+l_!vB3NFORsobjx`U~KZQQXu})pV#Vtcp^7oTc=o*qsmG{$nMNIkl@f7FzZzB$DyV4De z*^@2PR-_QmzTE)D43O0U^h2}*F7E%G|5X5p*Z&ndGgfz_{~hfA2PQ~m0;tb+AcX(A zFPU3kun!mzP!Q_$@if5d_y4V`1a3iXpUnItfQ6-W7zldx?ezl{{BinVnX};*`J~mG ze;Il5)oTWZAS!(m^{L0vQoF&mUPvA%8EDI^Wc&6bA<@E+XnDSIaFDhnO;e6YO-eyq z#FNH5B~$XqqN1WUKtaOACs2ZcK~cg`Ngo1)C8n0;#g?Zf(AT^CYDbrqA=s29oS$~1Q2eSI#L>q#Ml?Ms(hw>TutZ&oC- zn0e~Nl5(6A3?BZMycvW#PnUgEC27%jtgL0~%BD4Sr#F=)7{KNpeCutbY9Jtp+~HpT zF&E;dUH!ZcgZ9XS6$ZYQ(NDjp-`y$VX= z{+2j8cPHR1qx{t+>1MIaTp0@J5f?~mzm96Mk z#0K30tg!da&!A#|(x8&fWoxuPjbd>;FVdTNL;5Hb@m@ znNIOeG(#-g?z6n+(dG2ho6o@4ydxE2i)|r#Jj%arKvOs4N@WyW2eg(v@}3-p>W{#( z+V*cXZJ-!^#K!)8=p`u;KFM#-eE9nN{<;%q087~n81d2=(SyB~krl1*G`6u7^hZ`@ zLPB6_v0r>GO7uloam!D$MXw-TyPxfgmSr|UN9V~j3bWrYkv<1e+`?YgA;LRgWSUUf zOq^?XMP0p$hkU!>nz`xznh4o}%`+=zmP;)R?_oE_B=Wf%*~qWFdq}}!#WVs=QgKD| zYt@9A4A?ixekb2br@0mvPYAcru$Z2h5i%IgqU&esV`^R&OiZ!zSVmkeUkW#2Nt1%)JJ*lBUr5vuO^`Kc1U$&J;P z3;CIJRvk@xE1sUSD3sSIQ0hNMG+;)0{zt=WGoKB{P^@N_&LX43Uaa|mb|TI5)9nKU z8k{Wh%>|y6u4QeiiV2gLbnem9%9(&qcNH48SA)L()FF4a^slu#gKi!d$rVUZm%*~y zXEPFInMLMwNzI${+Dp(*IgiNsEFxFG?9%%C`EVzyCn{^2ZCPL<2C-zWnjfYXsSiH- zz?*gd)z8X8>%Y~m^!Bw0Wsj0!E&N&^Gj7^R*h;ld#Hbe%ZVS}8zuvs|!ogDO_}(<8 zxmP*(UfS%#>$0GFhxtC=p$UzZKv+N$^*l-OZqk#>@kL@U4 zRe2WY)wwvS)T>q(e`gq;1`Q&^w)9C2JINUZ1=2Nu!=n$%YjSHTNky!V_2z8yDOgD9 zI8+`>fh_`l4_yfSK+jVfAPcJwyjitGzUP<$D5-#x^t%Qy<|rE0c`Eg#>Pz>h%&qGa zRLGOi_hJ1!7Wguy!~sq(LiV61$LBo&f3CAv?Nc>}_2v2VBF^WT;SFTQY7b0pjhXBN zgrF%DEXa32>2u{4%1O133*{HJFWaB!m)`aZ%dl@I@OiQNK;YUo@qti2fU6^4{}-W@-qS22lQMpD-J@7ZzBvpT>Qz)h^40zVv??y{Q80-P zqOM;UEzIG0kDLaP$bcZZm1P;IZX-l*V#^0*0X8kXLdixZS=%6s??vK6s{Pn9l+O2^#b)2cXms?#H zL6Pzg#I{d2M(RZmp7&QXXO3|*4Uxe_gYx$b>uOo@3mBmhKa9}6xm8c6vYlR}xf~-4 z$<>${^3+K$om-jmt~!#`-vq|Ier{J!+A&m#7-4%f=-R0KvKNb)L=Gi1VHLQPKA!C$ zjHjY`P$5)=kes+l23K8$iacd{=w_p;==P;SE2?dR-qLa6xKa|cH&FTak*fze?7rW1 zadqTF5>33nWHiS!fQfy;SBZBX-?WWz`iiP`bP`kJcB-EjX=DQCGNJ{La=VtzJF+kA z&&T#oKeiRp_jjT9$iG?qG8*sKJ;(d=l!mOu=YKNnY-_qc9l;}LI^>1kB$)mH)kiS|~n`SfkFSGB|c-iV3X1uJN z0^D=!+nzRCKL{?mG3#UV5iS9qrw^d}rE5%N8oX_JjdI26*R8Y1n7C2F_W{?H;1p~J zQts39;=-f13f}O~$ZOX|e8PMYHKBR+>H>9lE_D3mG+k4!np~m~ar!7_d2j|ddgg$A zYQ#iUI}G$*H>9z*uocxUDu#Y2AHuvi*!g`2<@B}IHZ55y=6?ilaW7djI^Ptf(zV+q zhG;F6h68bEGsTroJq05ApjJS)57>}25fQMH-+84DFTY2D9gzM@>g!ingYHD8XSKMR zBu03cewLar-%(FSuIa70I^r=at_rb{ z){fN@Av%kEs(1coxVN}hsaK}hL&o7%gWe}MHP*JzQMNvjZC6xgtAw)XEEpBScfW4% zteEhcN0_Ksk2&K0ZS9ckCHTDa`!{Nxk<@B+Jo#C)nK0XtdM}1>H;+-fIW(3$^XHz$ zOc)iHu@JED-v~?#%LwU4kju}!k2i-eIO0{puNECHhZk95siM?)(%9l*wpwrRVuJRi1%no-llcmFl9ksjli4Qb#Mt?b*Dwr~TiOO0nJwWlKV(jB zC#dtYuj-*Vuyz|FR-{cPb^TjJrESQIJuVKo+35#6<<_k5mqx7VYR29WW>*-Nj`8+N z;_#k(l-GLqTNcEw1f-t-IFH1*FlK1X$3Z$d%-pwGbivEff?Au#Y>26Qpe z`?mjoy9MI?SZT#K>ok#W#9ABb(90Z-2-~?ydTs9*;R`n znxFPX?21cOLypbmq2ocx%9hbXk-4aMFUAa;O`STXhHpSG3i-rFx0e^I`AFzn z+VY$Arr-EHRXU1I61QZ@PnKFGOHK{Dv;wH9#K4;2{8)+Jk}$;?&GJau z1D~E5-IRtVmdO55SpQJQ`Y7%Y);hJ{M3fuYwlG90$bN}G7oX)HZ`D7#fe zdUy*rMfunUf6Ag_y8c%wEXUKVzk)lP+?b9A?k?z9Tak(4M&}!8K@Hr+ZBtWg^V1EQ z={Cyb)7y$UCHOn}#J|pZ_16f@*4{wjIz0~>-}v9-6?EN7ZwoZ~EGUr(FuBcqg{cqI3% z(T^O=fgdUX{;01#fspWppR?&{p@auhR&A7htNEjdImIo zS2vU2N>xQzNB555U+?d%mkl(gjjRm?9XW;J3O=~pZ6>PYlFWyxAWLal*MT83 zsAuAhRv&S1K`lau4vh7eZ@uEA5{+NEqs^!1#+pnXrOmT0kf)@->foNZ{tXP4N7DyC z8&tHCOt$5|4-2O9B56=zfn0`4l;`*kk|7ans=23QF8&CGy-FL}@fN(Oa4%m<%@8vb zI-Yux{6TttNF**!eUHexL$s`0d*lvqKKW539ji;(Fs)@SN;`{~PpVTWHKb4!=nO6< zEX)fFMou;BiG`|w8mC)`Acdx|efqGO=6N{&Vb9NUz1i~O?ICDvdNtw!dh1&Bc-$OS zhxxZ9OlAv1nPTSxXEqF9;Sl*+H@2a|;=~>po`k)rgkViHK4sqaB9-hIQ#a=!j(tg{ zbub2KugLYUMB~5o7?3tyiH%Iq*+!io+hhna>~7pP3b;3&eaDH)uV!1rx2v<1T83C( zJ4G>#!0=>XD(4%Zo-M6SAm!NuqfzN58fwyl3%gY+|8Wjy_ziOl z?ASZyjwsCPz2tr5SCAUbY1@3hFX65e3;FVT-zbK0j2<(NBPIF1ig`IPqPsvcU^EG2BXxar zK@22#ZnF2-Bfk%P2<2W!`4aD?^ox+j0XhKinM2TZ&*F&viMBa?QcQ0V{lLW-;YKyF6ZK-H9IbLQmh9>`vkITnD>Pv^a!$zW#}?h((1RYe;ju( z&Zd^DirOY!=qIgnV;EhVKm5qT5QqmHi+>ik8b2+oX(HS*>=iNbn!n*>A~MU33gLZ2 zg1&WDh3>z0!mi+THWd==-)XP)h-o1NhVix-;>$QkR0_L+R(WfWvxYu>8&Qqusxb6eW&fK< zu|Tz$1Wj$L1|-4y`ro~L@!(lVtxo<%=;z?5Aj0Rw%Yu3f4dd2s2QQwm#Hs1Ata3~B zOT2#wm9MBdY$tCmE!vukFelG$IYHtOD7FK+x^A$2M&zgy-X`lxT9qE&-4iDGmhyG8 zwi8}IS2n)L*Fgldg~HD#nclT=Gd}QQeZ?ffoIALieb~sOVkOJeVG+&9*WO4k^9dWZ zQt0&?SK~tT`$;!#SpJn_S7IoP-y?IhX}eaz%x*Tm)?z;db%pD%6c<%NO^sXJ@>{O* zOxLgw{Zq`TkQ~U3x;u?HBx;xw-C|*o_`pJnUGIoo{*@~W^$R)p6{pcx*6&}KvFrkw zajSAQ+nG^>@W5&!)9)w*RgqvjS7i>0e+_Fx1PL<}O>YNNf;Y#DuWeVma4&s}}!dZjz;V zLh-3dPK?c*qP#!Qtn~Y<@yNbB_ptC7EH#E&8tgL<&HNV4mAJZ`zujNhw$Ol^Q6X+e@U;py~h3G$WWS zBRQb)1n;Kk5bUasW4&NzygBdiGMPAnkfw@GO;c{7$Q9G*@pt@adP@ZUesAhkuYo{L z%WFclgnPt0s@BT5*8FE`v5!YQqgXAxFWQE|NwkbGzpO$<`fAhGs5Fn^h@p8o?mbw6 z-(2sK>SF(^HIs;qdAm;xC~M8F{LJE203J$HMMD!`l5%Dg{eAG;HAHKWwD6sam%q|! z*1e>x+dOz9dr|!AO-Yeh>hrEa&3JfwMa^ztNt;PHk_r#^dVBJpaKjuIT+w$(8>`|j zjuG88N#l~JR^;(HJe-)xbtl`s1V(H>8(A^y3R`y}4{{hx`r->NE3V*irY>}MQYNNV z+KagUSv3V`-d8{sfCG6xkeC8XE7xrxD~1Gf-m5fyZkV5R0PsS~7<>YJ66~5l9~T5d zIZtw|nGn7%z^AY6De%QV6*Q%Bt#$=Ke0bjAyeB<>L<*t)2?Nrumxch}a{v<)`U8JY z?=ww@0hBM~>vsdx`)$M|nU=5l#Yf?f^i=p};nC;{^q%}#qA)ZZQ%JVbtu-WI+s-Uy z9Lp1?Enic8?eLp&vg!&R3ZFuiBzmBf&ssGtzV=W5KSQa^s*VaA(E0DXJzS3;O% zA3yXsbz_T%5#CAA-P}5#ABmX&2aWgsQ5g7*Fr!bIDO>SuXpCJUglh~Suiy9 z{P_K1RFS6wS@Osv1hMUv0h&hpk&YpT|41>L_{#j66s6z#d_eD)vDNNS>56#mSKNb=7^eIu${XdmICZg#FNUc2At}0x?Y3$}wluzXy#I(bTq=RomR0+i zLM>5gLt4nS-{@pjLk2=Ol;hn^(}>`QvQGNym$WfO{JZh?IrnW4DZXwtKVLr}%864M zR5Mj^3y96=j!sSiH;U_+`C8JWKJ3rf#-%zZuJYPOXTvx|Si5zm% zn!3A^3EsVUd7pjhuRS677CJTY=;ry_p_*igt0%0HH!rn9%9Q1O00 zQF@z+y(WOS?sr9Ax}5*OZ1#j?q^g?WLFl*edTqS$YXeJsqfo;?k$9Ew2NDA&H#T!& zm2_SLNr;;{+u?M6GGSb8S?v==n|jCsqB1PWSy=j>5s!4iUX8egPkYUxV@Au+UExqH zMOSwqkyZyR@@n)Im;ao6MAGT~r!b$rk7vL_*Km zc#p@<(bO_sNNf`Bq>W_r?>u#VgWU)u$kk47`1{Xf_lGE81b!3wBXs2-@4spJ6d?WrUMX8^xss<5j zLu;q8G+W`3Wb=uQBI&F{$hq|uNO;?V`uZf4Nyc{DeoBNtn-ubQ{aiw^9~u$1FoOdj zRX;qI-oR$VALZMhK`D!*&c%a4`h88XE*t4@Ar3P^l~jCMY0u6C;o<70@OkB%Yb0P3 zl>KyrcPB2JF6q)9JIC^duu8vpcX`8l7qw>y!}^xojq0OEv_n(+*>Xb)9uFmo+}Uzh z$rPh@l+J$fd#{PMhvsj2V|KKl2!oyLYVt(b9SqOMCC_#Uv>DkY?!`dFQhXGFB7=c22T4E8P!<6B?0W z3omqqOa*yu>g9F9G&ZOjSJ6LT>ozTZCob^|9{g-$Y`);}5E&!Xa@yiw>PNQs z_Imef5gIG3o*J}!e&)K%*7AbcR-rM*n3!drCPLAT8>ZBmLnZ>vb)@igVf-RX zMx5GKXr=vcGl5jp!;P~-GVCAJ3KTuI8oil7eBw+LoU@rOA=A3oqvJL`3W2X2!&<}k5=wjb(0wCzR zbOismow0&qZUE(T0Hg^(v4Vi71IArI@qj5Q8gD}^Kh`n?k!u0e>tXSbkv;vl!^Z@U-)m}^NE6~_byp92$uZM2 zRKn5I972bqkq{1dSTof2jB)yVd7V=V@gd)BH?IpCD6Mb~kqCUG15t0);ONm&{M@ui zyeVPLq?os0AIKf##K}#Sw+%XWu|V#!wk$PHGggvS;&-y-juRRpTrev}h+oTS+4{;V zuk^zw6o(!b;ew8moX`yxhOQywLjo<)lj8UcXDnlNPh$HhJMokb6JJ_bzU=$Iha~j` zp3XZ!#Rh5ljm&hK>7a%S#GPP0_r{b?aaW6e$*>(*Tf#m;K#3SWAM)s0$(-lDzCRrn z)g#!-jc2}sioA-Yh9DVoN1F2~cT1mDb7%5q9~RwZFKs{UZM^OAs6Izf=^n+GuA$Tw zVWm27$cu08qjjMg3OrmFe-JGQtYumC*}cU2Dz$x#0Pn1oC4ECP1p#6ZR*qdSi)!c#mM}s>9M%6F9QT9#zwtlc_LuzZMs;d-VR2 z>~JY~{FTxBaN`A4V0eKt06saO93u4#iz&eEH04x)DGfX+A@Po6X66erhY6hMoJkba zScD%>%U;wfiMHI*xER4Mtir`db^;egw-0jV;2rxgm({IT-LF#3r#>;9EVk%+zi(YS+#V>a!=N!hV(mwjyuVWTIzn>?iMA1x4Z!=0 z0<0>cNhi10mh{r^uso@DggUnJD~O!Ed=Jjj4@#7hHQ4bPOu;EkKyA5HJ|V+y9tdN_ zHzeD6)^TjpMG3_4+%8|--;oOp_yq0ld?Imf#3$Ymr?}1@(uZ`Gak|P>?Nl8DxYxwj zoS-=>mEJet*{!1_BDAqxlO~??1H8GP_||F(z!`!KE~PxHro8A@C|pCH!B$gvO~5{b zF_2>5MD)$f!>G5NXg2H3KL&JhtMMkYuHRnwC-X5CUk~Djh%}wnw5C+cNgbZb(($1f zIp?i0iPQe1+c(C}*hPh`jq!{zBcff*nm!7I z*7z?V8Nt@9qjw%ieFg3peYdAx<)sLf0l=Fl;u%l`ND_b?q^>hySMD8jmkk%}w4K-m zd>o-w361`*9FggbPQHDj!aIpV;Isoi7*Ds^gf#r=sGT7$imabrbz&8~`Co=YuHkKR zzHEty&G*Z0$gdqzR3n|5TiFu($o?Kp#)FXO?F1K(}I~KE@^gQ*B&Yk#OoynS^9w+%8nznp9ce_|mU(!RDg@=B-eQ;3Z=>$coS5+e)3qMorRH(o(U_U@9g4b1< z*dH>cNSo8s75pfEn$T}))A867(t~|SX>@(h1c$4No3@Q|#Nqf;*=pXT6v>?`S4evP zQ;O;GYqhu1xe}b0zM?0_dEfvkoA0z*38_RNUe-nvtm@-o!jL(EL0=nD?Mg#>iR_Kv zoj=ay&C8qQDQlf;oq5SPPBn!fd~@)f!emoG_H}RINSd7eaOd@K?!Wp=8^Y`&fdgWW zU--2Os_^z6<3Gg{8yvQHq}d1esSNjpaOwC1z8);~WbG0OF%^sXb3-s4FA@&7s#l8+ zr4i8F$p$T&E@?Kp1_Ur2NIO(_tRP^`OdiD)a&ley$#Q32_y|tcrg_%awtYX4`$n*= zP3%80%~2oWb%w0NZIwvzQ@ymRk9&_&SY^8D@?4AQ?+NOU{ieiN>=EL}Ky50onY;y8 zT|3tq=q~?jLtn&1{7X5`62osBHmO?_vW8{ksX_)N7p2dPa}7QfozOs$&Pm9_#ty9pV&B6;CKrjT^5T=HNR!3vG8|VM3@f$Iqe>!@uI6S_8rT z;;+3_%p+=gE)d>*F%$GW&=;kn;k2%LCF?`TiV_lrwhk<1-K0Z<`-_e)#}~~qA#S1T zl?Gj}bOd+2wMRdSbsHmZWwxzPXIe((4_q1*e`sBK!GoKvCnqMU^d$H8lizg)WjjNv zq+K?-teQevKC!KDDQVCarw2}JF5Kk5S|wQ+G7iq1mQk|w=5TUoBQ|v;xq9y^dhr`- z!wuGGY%SX+%xA4SxR_(EZ6vRsm4fff#(yDZmb^ z{_)96ZifEy0do()A9W3Yp1VZ-KEt{`m4qp?pMT@O+X1^_-IM>jUGwfMaC_zZ<*tj? zpPvhY`Y%D%WmzUxX5Kle)j4g+F$qIwT#4mR?k8pq21>`9+^{cU&(|(`u2ci^L*bnC zah$n@H+zbj>o{Ufi7KF)cZm(it3kM81hQ&=sgQEG#=$dStdwxtY|^{h(F(o#Y(fsV(G6Y zl`9Be-;z2S>=5i)BQ{@WAsG2i34=@ue71&+=--gsPj`DuuvndZj<20BVPJE_r3zpT zhkeD9N)?IB)JOJHMH%DV_k=FgFzk%<)H-K6-$?a)beEM9tOf6qIBn>&a^&A9Sb@Xg zIXY`Dd7No^*pIR;CjxFUyQX+Va3d76=Awspyx7p}=c@IZJ zbR0Y>@7n&9OF^kTXLKO~aV;`PID3iO>;*x8XugH>-A<46B^jR3H~8@WL;REkuZ4mc!X>wKygVGH!d>x(3gcQk=DFe`h5I}H6%Xbjh>+4yhJ?=QtO5v&yl4tb-zsM%wYN&ujVK8lLmHGJ zWje|nv&A$=>8KCmki%1jK&vbx#tgFp-=k19j`L<2w{K(9Vl=J3y4f$(mDD7`BtesO zEmkvs#$$8aaBuyo6EGI|GZa$$aL}U3RkNP!v5=af)yS9W5XdEr-?LUv@jzinwKBey zIe@4r&W{=PlXurY?fyH+=f)q&B~Lv09r9?(gEFH%B&18dO}O$Vl5XmG`FgW*AG1)A zOpodu6pg28u*Z|(A$)_6X^qFxgR95b?}1iM*tc+VN4<8_hS+pKm{%A|)Ape&a>p;h~w*veE7vg7Du@}xv~$Dy@dNO z@u@&9B2HD$_|gfwzIfJ3BPHUu=dR8NT3YYJ?dX}|?}pcYv|fMW0z}aRU$`7kT-?I1 zpe)9)x&WH@BO%wahtH7Z+&?mtf0P#;qPTvz@qWJ3N%0wp@n$m(4Q?e9|4tTQGk>da zc=M%%m)9%VZe!SQ$hkIzCg2TG{N&hbRUI)bBOqF-JvI?*-WZc}>E0vljNj8kTqUOK z##4efU}q&>CZjtD#R>B~51J@x^X}1^*Q&D!rrK?|=4NW(TBI&fi7CO4FLdHM4R%QX z9$U}dR6sRtojc&;zF9ey1ujzG&D&4dYoY0yW^1atMtK~zw-L5c9S22gHmdT9BsiF5&9?yTNR`pq;R}*#JXQtEcq6KKBqx9d8iF?R*;zbv=Kk~rR z=DE{P)eQ4f>*LjFY1O1=G z$fY*5r-H;(yRJ5_v8Dh$pN~%=L&2Z;IQ{#9KJ=@A6xncM%M#qb=s*4*06!!%I&6C< zcn_<(zy5YT6t(emzFsrWV&dx<3u6oc`zWA88WH_qc2L^MOM_A|kJ)EU;p1g6eJJ|= zIt1nYROJHFYqiaaQtX4YFOQI@2?1XrvM5YwPUU+};nh1jLz? z?fAiF;fiLL)(hh2$+>~MfyxoNsyJ5HA^Gp+18le;IR$6fG&upIRy8YqQ!?+^7 zC}K?%;F*kso_vW(;#7w$tbq|F@z}e=ptBt_RHV2v$kxc1pL0?uzF}6RN`YJm3NOB5`^`3bug2i4~hWe9M);QwU$XY z;4QJsICFt_JCUm5YY9Xrvcr6-zoIz0YYvDQ^5b% ziF}aE2px4nf_(sH|A`d}KK&44$U&#RdBgZOE}ww|NZL2=R|lUHSdb)~6_21zYNO%J ze7gSa6%7wjxQVxFNMB1pMo*BF)nfele!Ys>mfhoApBijV-IbE4D=yvaP5sZ2-P}GM z7P;@=znf^vOJFhdtGSe9rEJK#S(@u$EwZ`!XZv~P;M1gPK6}o?D4ks#*js) z_$W@;4b@9e%re+Di@bvP`c6i=B8at_R7=(`*SiUaZzuBIyj}IhXuub@9i_5KEe*(@xlt1szEP>t+U0 zF$NZu+52Q;ZIh6?T|E>HGpPv4GC{_qmVhr|erxf0*Crs4raK{5dvmz5y+738Kz?ew zxCBdfB9upIx`J8s$fZEMLYEBu@Y}z~7BH@rU%S@u)ud{r%Vavx|0Buu-s$;n`eH(F z0?%&$Jv6ES#2Zk$WB0MOq_W_(5%)zKLd_f4%Gqj``(T011ir&bodqEUDAf!0w^N!l z066Xzr3qjmp&}j4DEG{~bH49fvJpbrjyUx!guQ1hgM6O!6~6_KR#FEK`>!9Fjnq{z zGJO-1eX={%7gK5xL}IQ%EZ$-*TnnYU(ySQ9dl)^}h=U1Da$#4!4E}I9%~1QX}Z&^QDWGf7?|nTyC#(*mvl= zCnw@{TF*E->1V&SBuY9ogfxG1TQ4(NowuxXt|Ui}BMSb;NFU!xOgO}Koyiix!fvQy z6&)8so7QLiev^GJXybp}NPHs*5qRb(?nsE>X~${?w?6;id!F^6rlN;42xkFya!FiN z*d}5rm4q550wtEH&`R(pPpeg=^}8rO9=>U)NUSYgoR?k~2&NM1xz7va-?p^+v~IR_ zlv&?|&CjE+Muc4N61?A^i zrex0&scdzlPYKT3nnn3*hzodH1ouJXufvk?Uu<660y%n>;%#60$jKUFXCY-ih9NGI zL+UL<@~w0MwQS%Gbq3gz=w@&|Q8UflHtH%Yf(@1?HCT5B zQTYB%faS1~dX24Ee5z3Xi4~1@s)BR4vAu;H`2|}Ueb+k>zh~W|KvXqYUWQaBi_!<> zD_VC|pj#bln`%lTrP`3=P`B3Ic%=Ff=gCRT3^Yk8@~hr{u)-AW^_Lj4PxRXe6aDz> z!9za$C{sHHzkGIad@v=i<}|GV+}MV)WESy-L%0OS-`^uTe!pZ~`1^G-lnYu(8`(Ea zmf&HGFrq`d<6vMYI5<617ooT+D`~N0w=Z8ZBg3x@M)Jwn2{HYZZ1FzbG6iS1>|O_V z9mX}MkD+gH&&u5^lX>%12;ahsEw8;ZG+2Smn)^DUE;+iIDHcLu%wUUP9F{7B6K&Xs z*X?K_ja?Ilx)UjTmSN}2#g8}wy)ha?DTv=ui0JX6oz5z>A?|42glY}3m3wtt9HbO} zqteaE8&YK6{J~`+vG3G|Jug30IB+NiXhnok4^ERav!9j8pt~(IE3GOzWItu{_+{sF zTiC{NXU<=*4zc70B;{xl8(r>~^&1I_bIRa5_PeF>*=52|u;mRjF7wV9Cm(AK+hXW{ z486kz6gSb?uXExsHK$9Ev>6Bd=HM5CzO@QeXs-B%<%2eAD$XtYY^5kl)ty+`@mDA4 zwF>zNoU7Y}o^vDF>Jg~+0bufgZ{UnYLhXE*voMvuD_3e=&5u-(ob=M0gHXdb-;Z}P zWC;r$a~vLB!XcvaJ<_#%Jj!8x!_pm9(SG*Aponh?YQ=oGQ88|c8TNVeT$U1N`MjMo z61F>pVCM>!rW6G!LV@h;IEFYAdzv~#ZjLB4`##iN4D9a693vkoN zWM;xn`0+DRWKC7^k8(cx!0%rUGE5v5U#Zi-P%ix@#OjINHSj%iC+j0)Occ98Nno?~ zsMaO$2J5DOfy|`k@qzV79z}U%rsLBN~pp>edM1|J65iKNMBD@eZUoLW55#_F1W?^ z#2a~Tn1icxZ&!S9{h`bKw?v-p{&@XK&?b7Y+lqT|KvQ!ka^Hb7(_wk6`i&LB;|m%V z(#=bJzs_u7PiX7vOZ;|+$Kx#zeWO1?!d_A6-6BTKDLJ&Xy%>3A*gJRAgNTFxm;TI( zKU;9^9$kkp$CDLQl(vufq9ShZeReeUYd@8whVqulIo`C(Ah$X zr<{L1eYkvZ-0yVQaGsk$C-&pB(LCilI|nETcFM*jGcQ$c-;%kD3FZ}dt3trjH z9xSq*TWze$O+M-iqE#LR`j*4HE;U^s6h!jvdt{>$W|dN-;HuKwP0FyDoO22}>vR6! zoki;0pBGJRKhP(HdkXAxYF}y}@ry(yjAnl9cOxqp-#R)VOtZp&dwUA63rfP1;Z-pn zv19GgIk5Wh=~yB})3iP}JvoLBS5lu}%3Jwj<Lv&e%hovP)CIbfkfH^U0 zDQHsw#Mb*nI=wVdQ8ts`*X0bi0p6mlcZn``i6TM8i1<7q-&Ie1_x<9gDd*%?Sa385 z=Bdonxf6UgW+UtKSw*joF`^$y2Q)+Xc3M*LH#20g3%y3Uv%@x(y=kc z8m(>qbYKi`6Y2F5D)S2wFIYqeroygOG{xF5jAe$)N<8jlIfJ?c!e&{5#J^0yq4a&%92_xTnfwdD4I` z8+F8eJsO);VB*RB96!2CPcQ!KXiK?ED%M~;4=S^<)=7hzT!y(_P$%p5+-sdB`w($MVbn^Ss)g*A`tIBRUQMuD z4UjN3$vUmYkv2o2zQ#-gR_#C!u=RoR=M#}zmjYGi1EF_a-}>}EgwEH8n=hnL*TI+C zKtUj_URdDMl@K0YGgisyuJB#fR1n`DOhx;A5^0{EuE>?843bN>&r5It6kcU?6X)T zMz>#A9RFAPWThO+s?J&zr4SkV&GGgFu5vnwK4NO(9m(iki5EBNvLO}6w2t%T%~oa` zHkWO0B6mjXH^m@T_7b&vbF05F0Sn7wJK=$dzH8V{;9nfo%Ps$NZ<#46M4vH)1BQzZ zf?rWu=_upIiu->mntFJ73fnK*cI?V8Ej_R4){Ku$GRlMq8zR}VYX)M5DC9m=zPhUM zby?4rpvPjza>;H*5`b)m9wS+tcN5dHKnLki?nrFv?Tv!^KbS4~Jq4n1nW}*^(Oy#B z3#${qlhY=vlmV}}j6I?9F(Tb!H2&(1EjQxgGNY(Z2J?=u8CwPKpYw#Il}KBS^^Qt? z)oVZXzAwXw;QRf1&e27&bw2-|c~7?*wamCTpn^K%=koOsQmaar&Vra%Vocv=fB)*b zYMpeTt$)z!aNt-X8gdtFzFOxmw> ziIgNmPrixasoax$1u>sjiyz>W?hTL;r9k8-AlthDvEL^Fs0aW!u zX{!!x#6a}X*-rrkg1&eUq@D!!KX0OYGk>8FZJ2`Wt6`4dht(Y5?UauWd~ipJ4_7l| z7d3(>=y*(T3`O%@gm=;2LsX5&ws=Vyl-@ayV^@!zYVwGgBL@qWFJ08)RZws2ou7a{9vR-0+{5X=6S^x-$HX- z+&&W7k`bu)wzy7;!h8;p0R~n{%bIjH1V(ufxOmLhOXE?8V5*#=<;5&Tt^E_~Ypdb< zEOae;hG=WN43qU5r`kCS|DJ3{L;SQOzLyFOjAkV@$$RTZ4v)Y5F}Tv-^-xgOd>V6h zT;uVn!hKx1Oh7+}q^;p1AI3^ylvas28fqm&DdLl;siS<;!fh5-FZ#f9Z#|*=1UvtR zHUV|BXC}e67mRV0gW2}y-Mu7tPN>KJ{Gn+_tAkNJZSK+>?`o!apeaCNZA+3>&Wp(< z9swxUb_Bk#?gPj&b%V=v1$(1CoK$<0EH4}|Oji*_BDc6h?-o*S#*2?jNWsqofXpKM$>* zzSy>nvgG<)O4@E4D7Y4>wx#>H&BS!MmeTKx*5%R3r;*I3H(}iS`OptsRAy&k-C&~Y z!3WIIjL;fwz`7bYIcf+4hj8bgY5ny1NU`eMpZpCAF<)};e|e>JQsN0>B{XL`1Esx3 zAuIZF;K{X$F>ItHC`G@VZMyan^gFYlOA9ArA~>T4)d!el9MCT&Il?kGsZzZ zD-mJg<{pN8Swl%=3DzC*uXe8bHq5B6-r)(}lZE89B%}c$WA0zOjbn{HyCMuu1uua- zKOZ7ggt!LRO2ntEi_#apq+1RywOdUz8dX_3eoti&4d2wl^)m4)tdn<^C?Or?BdJMR zZ_eG)_V(h48}st+g|#$HaKg%&^>K(cXMWq9K?((tHjPn(IxM|FXnKq;{3YH>wsQa zR7xc+opDTz4f3yx#t0`O`Xm?SK-`gLRhTS8(~_hu78A?h(+Tb%-+cQZZlEjg^2C{^ zrmqiCJH5z+(sM9osiTAYhE^ur=z2L#+LuO=xpyVdifZj~Y5eWF-rD4(70S@8HUjw@ z7xMPgKAXJb1%r>lOeFKdydE+~9=VUL#tR6X;c+c~id_ZvAp2A94-u;QQxmG5=2bA# zsa=?*f3lK=U^aUx{A4ihlY>BtCEJ2rcEFbG(mEGvO3++rd~C=Uklvj0PORJ>C5#Lj zD5+zFEq78opAXaMI~?)eoks>gJB}}#oc*ih6BmDsgF(; zk**OHX#Z0PW=BLn#+c-h_s{+pe0;Nh?I3Dls$y>tM!wLB>=c9>-lL{zbmvI2fZHw>-W~&0n!= z(agc^&h`t{#5JEmhQ6rwka=J9)+XgrJ>Hi|O)S)_X7+RZE9j#^CH7C1IjIqVTNU)5 zHD4n-S-=@>x`MY zGRts)J;qJ0n;XCEm^zjzzuhkZP5)W>5@$7Gsm&bOZBiQTtZ)Q8xZs|e`nsWlBYR<` zH5XA{e%OMVUo5Ylp(}RhG>E(zi>0&?vR3C3ha{#RM+cA1uL=YCjL8OwZ!t9S0a7)Z z?DiMrjXWNXM;=N!GGoynnTxc4DKRaZ$_f%23hm(I1CKc%YHP)XUF>=?G32T72<`czI+Yu>d2~*(@C->!wdx3?$HnRn7 z^X=3>v?vxXw&&CP^$-UNMiMbo#A0Bf&#CxQk0skjLdKM{lVDFxK27r)F=%5V3dr#x zayuB%j#LbmWsm0aW9yox#tn(yGB{uZ-Jwj0Xvt34m$Kj8%ul^9*pJ`lJA;rM7tl`ru8BXmZRj%}e2*%C9g9~P5r zqjgstyi3GMI-__dsvavnLd#_9l1a)&Gp;wJEA3{8R{x0AW@8L<*u_&9;|K~?MA6A> z)LvR@YXx3Ww*I)Yun5kr7U@;Lgl;DGbM z=qURH+SZn0cmzL#OK_pn#FS7%$`D|sU!@^iO zsN|X1=$W(y+T$&W($ehM=!N_QOK}RgCrUY~;w*^}%Y~KrDXsQB90wstK3J8o|L2qW zp`OO!@D^KB$&i&jiMh^(r^j-TS`G<^IaJ!RfB?0(O{*9^{FiCP-#1D6rFb8B#F{{_ zcWwL@K)0?+P`I-t$EWCAIryyZQFTS7HpzQJ6wQSbIA|wVJs)PF=eYUzxMz|CHg{}| zatvF4{SfQPX-f!T}5hUK5fHXi} z;oIh``Kkt7NR^uHI+XyS|K^AI1845OT57Q&U_(K^zUkQ{K(lio=sECoFBj<3qzH!j zjSqnY`A@p-(G7I<1j4xkPwoR^KEY3b(vt!}Kd+NZx4G{(3XqdcpwsdD_eORozH83D z_4qrkk?UZ$H=4@eMy&!OU7)a4;fQszR3k22l5vwPRFSmQV@0*>le)*wi(xs*MSkWD zDix&oLb=4RZ)AGcMxdYL)Vkj*@4cHuX-~X}rV!m8`A;wC(V7rdG93cs=3KzWCz68@1xx4RkI8L?L8is_(o)=%Yg2nxU3X~+c=y;pc|wE7>`&*6gDb3XAvYB^_z={2Jn7j3Mr^P-e#|D+u@9r48Je9C84*8!Jy{(m}igM zhQve0na(8tsW`2E)mxc@By5oHp3o{zJ(!P5K4XDJ(rB_$Ygn|HyarGGO`4D5lfIthSH${>E#=u4#$=ff9~+~FLT!M9cBp-@NAXa*>RlO zhvUyM1?Y#3_QA(MXc_BqSVa=h#MdV>*4^1Kc!t^^-XEE~$$J`zI5;eZOAlF}vV4-E zc&$D-{3f+%d&agd5gAv=gN8n+a*f+*q$3?Ma>NJeaZ@$XluH^=2FOLXgYL?!8ThU!`uXqSY?ExyfpObZck! z`~5eRdS7WJ9{2BW&bQO9GH|f7OD{#_ju0K-Sr)-oN&H;I(HPueIKA?t8vNrsgyk0l z*dYe40byW3Kfbwa-1mO}{tKN2;(uKMhmKNo=($n+;r?eufB5|RI0Fj!e`DEBM}g_* ze!T#K$J-eo+n?l%|E~hSP1UvE3#;Gibj{fWV8iRT^<5AV_z4tj$K(WlkF5WzQm$ve ze)$i9zb`=g|NYeGcV;u>Y4pqdr~4iZ>Rx;=>0sFADpRXk7QzpwMTa4H zYP~o;iLjwx>g3IJ6FqIAyB95NG-C0~M~SWqLewcKAH)-PxU8702XZJZ6DZIa^U0q> z1fQ4WchLud!xI~^4pGvSC2=x?m5|+tx?!=KO_}-`hhvC{;EFHLFQc|wStIm6CwrFb zT3-9LON>c>O;fVdfV`&@Hw35t*0cRJ7kW3+#Q+@NQ^9N?X!$|Ds{vO;iET*)6UtEv$SSNy&znJnIS zBT7xH^s%sUz*_B73eIszI7~`8asG8L;Jcl^Jgu2}Qbj8W@`%A|D}W*}AB8g6ua}04n_k(D#+0PqI%je9dA@MRW(r+} zy@*jT*`~iI=TP(gfc&heQ0CrmUX8YL)VOLxuPIreQk!=Bf{V=h-<2xwD;7iB)-vp9 zIsP^Li&NJVQwKkbB<*g2AOK}W{SC<;@%~`_Dhib10jUI5V@5U(M@aV(ZpjhAD1}v% zw6g_MoSB8pTw997_BlcXDE8?4XrtJ(v(yF7N(5sTg%S>scl-zvqIw;}y{gcPW?aSK zj{kJz-s~Qva)ePubQkK>tL9UID#74xO&eGIv07hNcQ%JDkZGPbebJ~Y=e)&L7T?IW z*Z4=P>8$Wq_L7t%rd(Zo*4@9x#Y3mFJUu>G;$m0FVFkN}wV6!Sj`-o7lkN>Z%0XX- z=cf_EPn)%KRF02o5Sjg*ej4x!79W;jeUtSphVzHBwDws$DZPtm@-i~w>#mBohIE}c zM!Cs-=CdCIs*Yq7^TfjrRXJw5T*xfdZmL;vS2`3VTg-D2pYJ4j9Gp>Pvqi1!f;aK# zDmHNjcgX!6bzLo{oTf@^p>*r!ow2<-NDeLQRfUIruf4Dk;V1O-?e*Cq0voJ<94{ll z`?3}f*_EZwQ+-wsEiyT(H6^Ui@j!6OSIKXg?jZeaEqgNaojd#~#$&QsT9#ZBKHGMB zg}EO@B1ShxSHy1~#T{A=Z{ePjM92s9rG_vVYDgSgFIe|Kc;Uj-UiiX(wJ-5=x_&)I z1-g>mhub>oy)Zbt6QUL}o|1W^dV#TOcuk_Gb>fHql~(NCbC(Sf#QL7Dv73Ze?VwfX-5EltTjQ7JxHRS?968Bb%GUytJR z2-g@gDQg6(S{3*-)?c*$uu2dJu-kua)Dm2CuS|8SP+)?h-%1fB1W=*BS);i~BDLaE`U>&1I_e&@bfvG_MJb7a}wI-~^j zlLB06mS9>!tu!qBtrJgMw*E#g?QZ+CbT)=b<}Z}5RN-4R$DSP!9v&qHO?c{ralMH! zk89fD7lsPgXI2A^Fqu%^Fmqk-g>^~f+{$*-Hqjm#xznFn$WWQ~!m}J&ZNlDKb**&L zMy1U>hoE`cD6v(=Ih6P!5L`@*A^)Ao(%BASuXDvQ4rvXZG&7)bWue^n;bH55bvewo zEI?&g2oSV?J%2Wwi*s}>na5f`d})7>-E|$>Y&qosZx@(cQ{EULI)R}W#Ds~cTZlgz z$FJ=ZN^k70<0Si`Xp9~s<2_(HZ8Qg_ZS3xO5Uffpe_k*vR~qpKmzDpxSftsqQW5RH zGW0^7w-3i^Y0&bd+IK{2&HtrV&s&0803km~H$LR-5b{L`0~O)Cda-hNzwabMcjMFx zLk2N8jHzR@XTNkjnB}fAFZ#1d@-Lly+5VXK@%JMxL&0n%JZ4L!HiXn)gT3<+r4US8 zQMu#y?poeXFdT~{aJ5shl~fA)sBsomarhd)k?tIt%<3YuHVr@{9L|&OI5#X$2YtE^ zIfD#{5=RWt+vgSZYE{vN9*Mxmnz#xVL{uCOqS#mS-eIgn`;5xA(3A*J>VHCn~F)v?R{)J?)RDN$SE~ZAsC{ z5*i}z#yX`-0$&ot&UvdTCMJmGouZ57oOcH%zd!!~pvtj6yR&Tpe&2(aYRVn*7zSxr zsX}K-j<6(uXA)W|7zwvPe+jC2s;x-Yc{=F^;;k*EiF)Z0O3InL&m6T~t~|fXlb@ST z)7nlmFWOF1GBa5zn-dGETU4Z&>S8yj{{m!TISz32C$N6_7OSE+Ci&;Y_Ormh-Gl3H zWHLOyFPXiGT140ndla*iH`g^8NPnZ!#}iN)iFO4_A%17*H=8M~2}t+xKuuI|MbA z^ZyE_Dpvi5pLL4Miqh4xXfrAUT@Ac=K+?PGrgoo}dFpzW5P!M>4Zjjh%8L)1G05&P z+mOwNODhSNWGe6E9%ECcSP=wi5~FcN?^SCsMW+epMgGn=hDxrwBx@+S&q&J@=wlC) zhBd0yyjvL)=^;>f^TFO;GXK*X=aRX>s|^#c%z^f@d7oEL)n6>KyOyguI#O2b4|A!N z(^lRr{3w-bz?L$HK6XJ_N)}p0a@Fm}S)!^UD7fZZc!K@=!sN$3MFTp$PaHw*3Nm8=`=~^G zOyr8~tV{!P`2h|@z3b-OexR@=3wmU0v2QOYBtm_hS)N&r=D!0_K@81&ldgoG$Bl}u zJW`U;N+MmlqPuOdn_1{G3RA<6$bn!^|yV{+>t9Vd#+n)E$Op-J(2S* zGz$`279L`|#!Z@wrXe#&X0sI@udE^*xVb>;|a_vxdOe*z<+0M zdB#G4V_Sp0S96q(EVZ;-;~aK2*RcF@p~We4&h;(Uvt|!IT?~mrcX(v-ynexSP(Y=gOyO*XH#8Kb7bow__1aZ zY!HTayl?0+mb@8ucbi?n{1Rn1d$l*`>Siq>u)n+?c{I^>X#f&4Y5F6{F=9-oje-2$ z+#qcDypA$xT&!4ar^3PFmMR<|+445PwlciQ%Gz@;y3E-}QsHQzo%|}^SK@~GXFRQ0 zgB4P`Ek8^J!+<4r6Aom6-K1A2Q)Ownw%4lFQ3v{+D5zBM zq{a`KFqDV5OUTLPd>k)di&vnSYB4FEd0J4VETX1R0Fd7jH-ws@&e$4OcxK_wuZC&! zJo%m;iS2g8m+?O97Ae-0P!BKD$gVhQ=nu=Jj+Zm-F;ePhE@ZjdocNU7yjdyYCs#ya zU3qO5eV_94X0elAF!bX3r968{Un$4x8t8HR5tY9W+#xgnVxf|I`9dCqtM4oBxu(r8 zjEFq5IkA0vo^pyHwzQF(a$Xo|X>dpiddqbmX z70LI=usC#jxynShFVu)#(2y@?sCflsk}bFf+CRLyKWa|HRc>)-5H7O1q({L)4yDH z8za-nLKSoVaM0T$;x(7DV3^b2f3Y)4C`b+RIn6Gf=o)7;wysKIS#_^K^Z3jdOD(vV zzAVmHe9kF4+z`DvogQ7WgGwMih18GUG9PL^qA>zt=`k|*6lb0(3DY);bo3WWBM~>N zA8lYU6_BqN4wpzgl6Q7seA{~dT-nr!Y|aW-N^_56>PeW>fxDqC{>4%+FR%O6I}mBi zX9aWig1{v9qw-w9L>%U6o=0wZos4 zW^QE`y1UV`A1-}UL9*q9-CbWvbSt8{sU_wjUZl(1Yk-%FM0I zj*wo8i7I_i`b3@lfSM>tyP6YEDLiw{;#sSKuhKY`-!1S-G|?5_o& zy0^QR-v$br-(RchZ@ADW!{WydPJV{LQK^!^PEn@4uNUy?d4zaJzPGvCe7WIm=?N2KeZdkf}U zhm$+lkQWrRxfdiI4f~i_=Q^8NMc4qYVSdY&mb@7s8`DE-pMurp+dOI;&A)L|sw4eB zI5pRULqn!nT}h1!>VCFHZZrk(324rtHLvx(MY+IX+7# z-~F(Og8_wyyncL4S#l(<8oP4OXNflRM?_U|xd`Qb;T_Z0)jLot$x7_lGF~pcPYW&F zV$Qc6vf7c*Zl(a0S!$(XUlDi@|LAf4z`9)@i}R=6hEwXL_9oTO&qP7_sTBC|4#IV)Ml&CGhS-zT*Sr(1;FC&QJuNBo}yd z%kbpX+wMri!c4pioI~nba6@13*aV5CAQ9ah{aq?;iAVKw7y_?|L*eY8A-MEIt+%3B z1C=cX08K`nf-P&b2fP@l_zwK_BJjBl{QCW)7zos`?FatXP@aI9djfzP;Xbw77CON@ z%-=_Tf4+K#-)nkr-fMnP1HaQ~K|sZ~?;Tg*hd`5_C(uS(;S;i&;|mhf*Y6!b^B(vGO;jx?Cm=k7E~h7FcuP-_6~jNlIfkp$3$aKpL!|%Nf3g_An=1$P^{;^aK&D(s4AC;g zbpk8Q<45m%7WKSWTWmF#e41V#f-kmV=CMu7qm+O@>~bWldTqA%-NPyl-D+Yj-2rhlCymu zv%QFO4RY0Z{@J;5GknbFJ`t@$1jElSi`RoFh6ECJ-7ArMo}qzJ{(#?CDpsdX6i)Cl zk=Xir=IQ*T4PV;7sSwE3UWXafb89ZH9UTN8zvLWbEO?cN4T`gf?w{0b3=6 zndxrFU39qfg9z_dxv#mcYcIRrVY$unkmzg*SC~ujx}=fb@!QX0`WyoQOLgnA&M#smIJkwN`lhtx+rm3&b-=lwQ|ieKIR38}@>bBT*$ zb{4uYJ)Dw@|q-k#v(IpyUty%L7saAk?oRx{dXwNa(<%hBbGN43nOyYVUYZv+6lYNyJ%X8BgZ&DL{hi z+~CQr8}IYRkXI*WtU{N{`sWF3E)0j}VU&v*k6=fn&UqQ<*Df7(=0!VL6aZs;wqe)D z9&1V*_Tg|v5<%=CxkI&B0x4hf1$~-UZLp+m(yp&zsf#^Hn6Ka!KD_6<(#&s|f16OF z^2XJ!e@#NAbG;U-bC(?%8jFip?ZZSErI_A93L}MzTq4o4Of^I6pwJ>|FF+lR@a0qs zf<)H3HF{P?Lwq?G%`ea~QV{r35o;idA3Qp(US)IFu=&Dii$LycG>Jp-@mMsYvI zla=-Bx|?|!0RT$@ejMO@L{+JOUy4J@Rff<59Ho)9`CvKbfkaJi* zY~ynub70A&ZjPWxQNDsK+&6+o&w}f>s0rZu@Wz|^a&q!DGzbnTw5CEt!Ujix%CeV1 z?elkzWY6`$Xd*e5HnX+jab2p&FAa=aZpS z)?e$Fj3tJIsypskdD6t*R;#)yQP$`gA~3z&KN|M^w$#k)eQIO54byP6;j5)_rhDHe zQb?4e&#FKF42!h?h+Y;htXt{|R9iOK@W7s0$YZ*f!RzX+G z4v<(UNP(<(4~u{=42#-tS3m0PO6g$mPPV1_V|=_Qc-qU!{s#5%q6=`mk)7@Gdi~ZW z2T;K_T^)=P*A+R3j*?K(5^en)4;WMeLy7F2k7OmL+oJ4BlE7`(wxq%=ibsn>5fPH| z+{-@U+UdpSGCSVdp=dR`ZaaS80e-;#LMq7y6fFL0OGc$;=XGC924B97_^ccD8fS!s znuP#Jkqh#-f3@|8O=;-3old`%NghY`g*>P;U}@idfQ$A&uU0@{YYsl(>eu9+Y~%)! z)3)1aqm2DMJAXWeVQ-CgIvofp!D^A8MN`pV|1D5XvcXfMOe`isLEtkwoPpr&jI?M< z71E{$hE;qZtHm0;_$BfdO%%xrGc&U9BczeJ64Do2tyB9&unhbNyb+iKz6q!UZ}cXj zH-R^=eyg8;ET31vC502Aud_t`H|W=aryd~O-p(9QX!R=>XYc4m+-j*xVAGX=_^lE- z3k2WBpoRHJq*mP$*6I|e3lDXOuVSWX5;V{IPR{KhiR|U;aZUqhmg+!KmK4tcCZK`vT+4--`asyc+`-RA#(KUW-i1O7xoab5kamL8Vb5h_% z?<<7R`4vl=7n`IZP4+vIa6?Z2shmqc;m&t;@uoIpVpqOE{xZYimDN8o8Ekqk#q(gX z0Z?b>2~T?GRJMe3FFo?a5)Md~*YOv`b0#~}x{(Y#UmIMcJEGaR^m9_i*X>|)W*<&r zlpX@E=Fy8q3za32^VDpteyP$t>E2Lk_}_w!v47vDav+$--^=c_6L<*R9TXw{u&-VH zWfbI7fs>W8^IMTL>=eT6at30(ErISt=Jqny7oa>(zYfSzxayU_tqY&FZouqE%LP|F7C z(Wc$Ys3AXHLi4ys;5SU ziWgb16pSDxbKIBY_Rz8W0lC7}^T{X|K;u7vVa5M6gxF7&gydLZkrdU4`A@0Vz`aV@ zi}ddex7yx(S8$Qc?cF$6w{s+IZ7&Vm0l|W@y6w*w{Pmc~?YjJRDL;uG2zhc!0r+$0 z)G|A*XoK1DeTkn`bz8F$G;jP_6*_XD{8?GJ{#2XdCQYi&>cb!_=Jz6lnw5WQ66gaz z1+57554&kuS7-_MzA|3JDIrj1o8WbK0i&ri-tw6S-IuhoQ}hfex}|xGvX1s5ft$ui zj`J=MbGYyt*J18wDap=pZ)sxoDPtIe<2R^1VzU0NpfDB!}+wpY{nPq>cJ^Qt@J?-%v#NE!`9bX7`L{x z4%A?`&=8xil=NlRgd#As4PT&1W9wp-9oxJSHw(#uL5 z!=l2*l7W@`2hlB6&R)i?tZ|xUHV{6->XmN>040oRTz26x-4Yf_c+H-Ta0ygG*(6qb{Vi33$bE(xhY!$O@a=NgSoVdgAm%~|QKY9ad?^RwC82hW!n zSn8IUdLUZzgukWTG`Vt8B8$juA@@ihCK>-cP=_!b3%ULrRM+UI$&wsw;F2JW#JgfP z;V1?PNg?_mIXGR1AUNeG%u*@$B&MQH51G~-TyM=n2vnE9q*t&{;WOQlJM*nKOIZbY ziO+;2?wE0Iwx9a)l(^;FPZG)qCj%ot0@r<)DY?a&$QasW&@X@nfF6NU;OF<7_75K5 z$hAnd)$6^DZvHJy8d~jj9@Bwg zmxX%JNL813?xtUEv!$XPlcm`vNj%kV41MHRBm8_Uqz|% zq!(0bi8jKGI_+u^G+qhX@%5b~ke7Li9uwmtW|6+?!(>?FzQuGI5JQ~MRK#0LG|RCf zejjpWCnzl^IMGpji_jAtc+Xx-&*d<&x4<;rU#g4-vbu8rCO>c6ixOVe-D9V)8JRbs((`#P1dvg za7f0%fU0tABb0I`O%XWh@ANa5oS3h?jN7@qFgsC{ktG-g1>d!*OEfbq3G(u z)0}<_?cE)NuSZ}83BW=hB^0c)+WDC#V1d^?}f*J;0uQb-=gtiTwJ% zkA^<0fQopEbihZ++E4J$y;mRrp!eVP1Jo53Q_TfhJxQR~afT?VMII^Mj zGf-Dkd5d2cJ0TH?5ci;IP#6ioG;Sf9}?4uOte_l$=* zYCt2NwK~}?Wx^2gVMWI4kb`4ow3;|zp8mX51oXW)PW6)qDkn@{EB3DqB}vw!uF*$& z426d<)XKoHfpR@cV}q=_5x&XFzZQ2}573U7zX%t+dg=*iEXA$PeYDRwO;pmwJn_-m zg9oQvMx=t0N-r{=VGA_lb1~zCa#eT{rtziveG`qoDAiEgY%mJX+U%TTERVjeQ76X@ z>6iYwd0V2<5`ybP3V)=;W|(pYKQ|I21^x1P0Vm#Je%QMw*HM4!x7dpHDAGYdeLj+B zG)^xlijlz(a#3L$D|`uae`cYK$lSYIlpmF%D5f{RFuF#GzLw#^Zmw&~&l#DJ7p=G& zb<)gfmmpQx=BCvmv%85HG3hIKgRrRb)H$9z8(y~e>s@cqJT7bY5t5aK4dppNB8(ha z355?$Djy>%S0+s@U_ig|+kF{)`@dekO9p5g#PYgHTI(zY()r*``AgZwtiE(NNUYb= zK>Ue>yPT0ZA$PS3=xkXN?p77?@XX?O^{Vc;{rB9f=0sA+vSphyAB*ENmlPuIpGGVc zz1(kUZHR6gNcHpIi=%=yB&fX7f&2ILA()Yh(Y{_PJr^_y`O3l3Hg>^OBhuoY)YJrJrV5O&icA17%YOO_f{v!*Z>TWqa;s3Oe6 zYf+IWTaISiS@}9M|1}yLl}xoU#B>-x53l%`rLRpf8VCDQoA@wbeUnvM(tEG8PY;xy zBqMpe$Op9AKav1new|V{_D~6#paW`(S~ur$Hkl4u^@gw%OXNvNPAcqJc|>u4L-W?M zrm8Qn%6}OyT1`-5{KAY-HWfO(x01lGda&2)B(;E3Yeyq7k@$NjtQToPRY3T&i*3YZ zp$$DI_^XfQpOS8K2Y}NKyNC3Bf4%JDx4Mkg13^Ech+A=K?8%2u>*pnA3mL^?G;R+P zGo@GcM!ji{wM)FXD_-O2Sy?MPa?#fwK(A*97+D4ELcZQm#8~&>gIryg2%aL_PNkfn zAhf;_BD2&>Hl3clM*Xe(&vaR?J^7|rGRtI98~}-9A!fZ+Uey3&d|Yi!E%uz{i1II* zXnIXNeE-d@cP~+7EcpU1Z`%Rb;EZr{^-SbTTE6z+Yoq7tJjmnJuy$ZQg}$(r&sAd6 zo$=Sj=h2`WI=N_4!=t%2>dk- z_a`UdpaRLZ_uKnLx=H(MBq@D2O0Vo!yx!`vO-x9?w|{suN!FHKh1w3Th+;y!8MOT~ z=O8v({7oIc`+~RIpQ!^8wNQ=;c#pOxYUn{B#$BcWYjWoph|_gX1uYadM7MBZ^Rb|tZ>|{9iOv# zGcsoqqceIA@8PIiSQE5hef@wxa91BtHV%yW#=vN2l{x-DqqlsphBh_RqC-0Faz*_* z7D@%yoAi!XL9HYZ)fVbk+~8?f;cH$)W$}0oO4sVSER%ub^4+Z5`y#v|Z4q}nd!ucY zknvNH+NJ``XV8VhnEf?>VTiM(nz=C>alBLjfr9pyI1I$_eeu>T9bKWTBwG(^xbTOb zsK0|5eY3d1r3?qGP16FJLi1Xj8jh-XMe%>W(n+DJk?C z+(p9e%D;aL3I2jPLs2(?p`wB$g%l|&<_}<7_&UWnus?a@@r|IG z%xatY0AxAMbP*h%Qnkdx=fEC45)IxO9lYZad)ll?!DFK{6+WEr*2oX)Co;TD=`aa4 z#`LS!{GzRuad9*ik~<{C-3;CDs%TLWH5W74*qk%{S^Q^bhCy_^`*Um`Pk|JeIo5Z| zB=^(0dfbG66Yl<6#Fg!H>YY#ATZ}=0WepM>YR{F&MLV>Ft$h2*V$pSZ5?kuaVC=vu%9pW@S< zKmK*#+^qRx_3L+X4j`ZpEQDBqikGyr6COES?VDYh!=9VK%FxU+Z%AG(S-wX2pn$h@ z+8y@b=YHsr%D9BF*p@+a!0_hR*k$1N{M)r^{Cdq%x>SDbiTEXIOp43FbkWB; z5a<0@xI>685X@n|vYm>LuH}nZ_{m3r_Od^vo8xn+n|M7;%7-Mfgt`cA0Ot0Hn+z8t zW~C|E@>;HEfr9_6bHfr&7U7tdp^vr^SG|QcNvwvxL9#2mw?W0M8QDly?H?K)sc4Yv->KvkMZeE!+}j9e@EswiB?b}7pO-BIkL{Pbw(N7RJ)6J`*A(S zT0k@ZjV`*mM!x6bW%TJYCIq{t;%WY=!(;9r}UT$0Uup zD)Ew~wn@k=gUJZ-qpPE>wK`t$k)vY_Qvod}{~uT`hRq7vn5i}Xdd0x8-`UJ5lm#IZ zm>fHt=wU@T+ssqmE){rzdM{NyRyT(JccA^hw+DF)fncY!_J|L)L=Eg%SHh;y5b$v{Z6w@i=TeX{S#3&lurS3}S z67$wC&}IJ)ycoWx(8>dTgFmZpKSRSHbMx{*W`Q6718ywUThFBpdxVdubY&2hoO<2$G0w z_qY)(Q;sql1gQsup{>{s>}@$7&Qb0yx1u{-(s|BR{~{GJmAF^33C|ji=h_p;b`8@P zO7}XSOt@Z&i$@u=Kft{4vpyZ(Q@NQLx_FLGifu-b>vbe{SVY*AUnzer7eBZr!y654 zv%WI|4TRW1dhECcJer+kb&4h@2{(;dj+rmqQGw#!yHK+L);WE+xtQxzqr&Ax2Yx93 z^x|nhF0t!EHfWR2!fyOEQQv>d=a2 zKS6*qi275T`IH)K6LEQFirLJi0lSg0fs8tFh{rNofxW7csAL8M2)Ixu^t+__QyMUm z9UI@j|5dgGUZ#3@<0L0_#1tQ{XB8Rm*G;zY7Ic+&I~ zNvX;G4{J3DL+MV}ytSs)YNd^U&b#X?w&IU6e_4st#2A!xVxZrTH~1%Oj;QSCc>RFi z9=Z^&IuYFBl3QVYyQA9NlLqx8-BFn{{BsFaJ#J+#e=G}wv@2@kVQ#-SV|vEFY9Mp{ zDZfW7{{zq_0in;o-SaiT8O3<;Z(J8}6defE`}$(g|H34DbbIExY3={Ho^T=HJ&>+d zv1NNlBXEJA47>D^n1yTp6o@&HkiNK5YWWOW4~cMdo#ciC#2DmSc!;v$a zco0%}(?>DV4qq+IR?9g0!Var)>7lCP6PJ-kseW0XO0G4j26g7XXZbx+TKM1(d~uF4 zX~OWOe8?O1IU^8M{?4SLW3`eS^9dU-=Ce+ z&(12?v)uBqop>m9=2Fcczs_@M^uXxXqoEkRIv!2*Mp;~Lv&Bd5$>^-B5VYfb(ZKm9 zsJ=?lne;#*Bf{uiSG?~pjwiSNV{OZ+_n=|`;q6#nc)(mKq4byxdlL6l+IRfS7N3Hg z)oViHBea<-@gRku;hYn0b2n@C^dzX4+TraKaGaczR~$!uJ%;pZ%Sl5u0Yc=#lXSvo zV{(o~%xPMVrSYj>@s!1=Iss&qaVCw&v{d6WR>?0Ld`tVft@BhES;H-jS=-`3-d0=fU!B%=ka#oAS4*+N!Lv;`of&8yzzG-fl06oy}l=uG#$iv@RZ~?Tt z6<`mbpU3_GU~U~i$RLL0*6(UM_EQi-_;_a-5w2*iUR>%6T(CVcravmstT)%X=!kSH zot(oTDV~=OuH~xeB9lwoLGsJ`8Vwwl5%Ttr9xM4BeX|GEtfBLb^;GvkV9>dgI>zS{ z*GGuqCOEB}C5HLRYMU16i+7Z@;)raJ;@`;K!rvNm^_IpN9ATX>uq%*I%+VvBeLr6c zlU{0Fnb`YO`uu;WI>+d+qOMyvNn_h~V>@YM+jbh;YHZs{W81cE+iuKr^S2_@I;(8p@KyYdZRXE;ry$O+O2-o!O$6|HIubA`5WCtePhI?d79I( zc18`MHacKYCK`m*PtDJl%6WYja%j_i=Eb5tO!qNsL2MRbjEMi%e4BUM7bpw2xtzoz za%wTKFQ9=FLO^eAS~*n zYEjz26mV+{+e;I1ItJPL#&Bx+CT;xvUE#Wup$pT;U{CgdRQ}m%h!|WaF<14rRyKaC znyMdq@9>+NO;76BF!>ExX6~%nwe7!xCU?LL-GtnT=XwCcS znzt_P&i7-!`+i@dHI>|!+oRblr5{UzEB(zM>}CT~FVVs5gHxQ2Zp^*D`pE*vaz>7E zqu+&$W6;9lDzRp8_O{c6d!24+ip|R$+O#OA6q#M`2hA2-0v;!Biz7?F!Z7)4^0zrT z2qTxvp|JhK3DjkHm=`|Em|h57mZMS291*lx>;sP>?9Km3u%Wx#x| z1uxUG$V1MjzCum0(6?Qe(e!0en5$9y_i$-eRP=_gISuTLq#?N@*^n6G6G3m{kEs$1)6J&nuU($M=*f3)LJY<>M2bIrAa#?daPoqQb$QKD`$%!TDr|AqtOW!TS@XN6USlsa*2B;k3`@I=DI+D^sZ{(W`i zJbANWLlVau3m^fimCfDCw{Alo{ZIYVhELowjlw7@x(%ZvDr*-k4}Y}sZ)CNcDWJi* zgMx69pbI)W!=1XkE>JZdvl|U3O8K>)N6P61?AsCXuyeS0#{9d6Uq3zIOjurP${#LV zewOpyjiF?R|HvBq(?1p{@6?(?W92USVud*`u`4Q63MUVMo%yOge{{D1GT%-Bp#49H z`!fLS^^1hR1Vk=EHvBKJe$D5&FB3ova{>tbms9%@^6fPS<@2mtOSpaMzW~~AuZSmr zN;c5({0R&4fBE~8fZr#f#dA3@vC#Or{AKuM`*tFb3#dHCTL;G*<-sUyC16sGGeM4bf@>S`e zr|5g)(?f?KcS2SXGo&qpMF(= zcFRI3)@OF;Aa$;B60@OVB>V#F(3U@$Alqy71STYgT}DbQ>?IXND)EA9jF+so$(XkR za-nBGu{fy9NU9*u&3M6H?8e-!Kq5z(h^oeSWZgVkY|Mp_Wsph zhobC~%$r#rmMTzRxlAOqQ+7s4$f7V;wB93#8h=vjom1s_-}$Jr(xa2i+?cG83 z;KrJthL3G~%XxCT7$&lR#1m2D5U3{L#Re#G>$LuYmh?1dKz$P6IQ-EbD)ZYynTb`R zqf5nj1_oH+^k<(9uwiBv?DwEVf-o|^QegOtSl_^!VV4NK=F(pp)M6Ryk%Kj6zt#J) za@+ul=WusU#;R96(9yyK$;Md)ifaHU`ruIwY4Yd4f=f4T^*fF=)f(rBuySlg@=N@o zIf6KUcxlW?mdJZIwr3X?f9-sE5;ncn$*b}CyJP?xx^nf{E@S*tmOnLd=%Ibg1quy& z>X(S={T_nKr+COi!EI;BP~&mX2V9@XpRddZ0KE%v*ag0W{J$KCPsr!59EfFr$2d7z z?|Ofmf&{=3#@MR zzObW=M#OskP=$pzLQk_j_y^#_{>$r_OedIJV$6Xh_~<>U$eK*7^(>w&K`PZWf+J~M zT5EdoCOexrnynLMjB7DM|BuJ}4KdS*^Z3c<9n5jM_0B^e=joxX`pv($VMwj4T7LP) z!sIPBxrqqh!UCf_t)s7iVpCPHeYjw2#xM=K6P+Xej`|Iwwc~hgsU9B9jzJ)OJ3T-A zSXmmZ(J>KgigSXB{Mmi~9HPKg0Q~e$$d_!$wOiQ8p#jvtCAot`sV<6pt}aI7^XE7IDPR?Z?<-g%#x@SlZp+o(SIP+r5<*653H_Pk_jPVM~ zjb}s_#qR!7mU&5${l)+z4*J96tUP3Myn07;G?kOW(|&KcPVTq|OWmzJH!OQUqfALn zhI7BRZIT3wY$f)n>@Aq%0Ai7d1?V7Uoela@g+3%}zKb-S8rb89!*8%0X`XNwXw>6L z1XdUB2T4E1$gQOmCLQ)0(m>NM{P{9&NnQ^FCJbZ3VOLoh)$lQ;H^HBfz9(byc4KD* zvKLihh@*3-)rK}0>8$e`u|L0^G#Rm44@tHOvMF&9R^6s;QoFJnOh4hO;yr_&^>5Zf zQhL$SpXI5m#HNw8U>L0NGPxI1k=+#jw`6C%fx$fA<8IIoUrJ2F13>=s3#bvm`F;nG zh+M4>e|1Hs|8_;OFRk+ROU!q>Pge(G(N9F@PfCyz0fpycfMdQT@BjPm<@$qxI`>nc z6^RrGDFghTd_(&MKB}{rS2({SQ=v4g}{<$O`~DXq?sksVlI*Em3Vf*r86@ zNL)fM^W_#lJ^x$pt5cwUdP%F%NmLfgb(t3)gWjGaS9|2$T8dQn>4%d3==9Y?rkCaN zpUiZlpMJ2U6vKg|^mG~!K14Gt$1@)FtQ_w;k^2+;y)3ys0>Ip{9fYiRS{sCaBnm$QXLxrq4eFw zIM;eDszn=)dFCQ*{|vSfv7m`Kcmnwjxe>ij^dP%zxCx#PSzO1(=|wx6wou)w(Zheu z9T#lIw=aye(W*0+Gth80aq_s`@9uYwie4+jVjc~nG9|wj`sx zv4AFxEy4>!#qlxSPl5k0{FaqM{P`ZE$CEpQi?Tt=3%s-gq|g^YmUd_yAawZ^)B_Cc=6r@QnMC1z zwbvvVBap%QJ`X57Zf*ScZ=8E~K>suV`U-^hZCx(m z8)XIki?Ar2Bw;UXRpM$&X&~1;X#S<-M&JKtCqe4BK_mR^83slOo6_kn2~Jt3M~;f{ zMtyL@J?JBSHExkw^sm|s`|jnWZkhfeygeB1@iH@YBNY?7egB2CF|92PkGuXyFD$wWv+@o%=p2E)qd4`VMirs;a`CDqbpEV zrQ$hyY0@ODNq8&9&5-Ya=U;7PIR-^;L?E?fMY36jSV~*GSJ^a7k&ABK!Qvi~ZJ-bV z-_%mFPyVB&sxW8idp2>DA}!|^79(r3WzzTXog_Q+Shh^Bj{Z0){AR%#E{WT74W1nib4Z!p~#Ws|wv z5HyduEd=6+O}UYtVtV1SC(HK(;*hI9&I6jJU9 z5nNvLVcP>0mXz~gbj%iagxg{x@7RL6oy#1;X@-qxr6Nf&=*Bgs*q{1P&tECFM!U!UE9w9a&mP#`J@y4T284G>GCo38hYAj)3TVYXyP{=pJ@qiVi9i2#&0#OF zjrnw z52zfj5Dh0Ts$oFqK=EnKAyjTy{PQ^T`icD7AabLo-{O*-o9vsMW_W6m4)&fo%geR9 zC!X*?^Zr+#7;BT_KPuEQi%Z%Q=6( zm{Aea-nAG+Rc3!|yB;?u>u1cYAX=e!+YqO{*7DH4_%505RQ%E8IK)O^!$SIQQGWYA zJTL_@6%f?8IvP3;tJu!wW$m;MNzgP=4aOIb`3Yy={rOXtDQQ@)HtJqClvv*RDHBGS z;q&@O4w@Qdl1&2-oSGT=Tirqp?NxvK5MTSR$EE8!;s%)T=P4@1YQ#?CMC7pPRw}8< zK=5)er_VoA_xB)Pd)Kgh`2S&+`&c zm(fVt-3ZZQIeRQx_SEZD@!l@4n)QTP-vU{(d9~w${8+c=&%zvIVh*XQYxmcr3xmBY z-7-J)puNce*pERUdVfhC)?P+F|7vW&xAPn-b=IvP<>M9{!mB=u_x)u(e#xG$gU!PMest6b;toit$KFXj4{fJ~!`?-~Y#(QG&e%MKZ3PoK z>h!}$EsxwV3tq`#6tfKPI4AYQ=~ybScRdV3NI#C&E?~u;1WdVPGU^4Z`1Gx6#M$e4 z(e|>27}D}oXRB5U7eiX-R$1^mW1yRwh6Q*N=tN@Q8!%9h<59YH)v58>xE=bx93>0s zCJBl~)k4(igTJrUEXoQRw7LafFYW`dwQOpu1tfe*A_l84~s0wVHjAk^4w^3ftR z;{y#dD!ZEJTW#)4i`bW9Ftr4SMIDB>=l&J=PtH^7QmZJ1me*ED&`-=x#MedMTBsu` z3%MN?f*w0il@I8RMzN6#*C!U5?Fez>*L%5cC`)cwknpgPn55-}o%7bpY3T?1BhE$c zXt5LGN}hO%nH^o5>VE4Kf%|m`acBG1b*qnrgYUF#&&LvPK&EwURp0+U$NhSmJr{ad z0jh?a0m4u9guZ-EUi@5a*F^F}lkT&IYNmoidn`2GpEiJ{&X&Ux6b99aP06Ab&@@Ub zmX~=+&*sfT-T>f9+5DuzR3F+B)U$m-E&=>r0jM_t6X4NT666603&MJVJOfZCx?cs( zKruWw;=bX5eLW99b(4Ex0SpKrO9D)PGIUqHpQTRy0nKmK=vYcSb1Sr1=!Q;Q+X_8% z`Z!rPOoEF*t47fe*R6qpg7BNcsVIePc$akCh1=4Hj$1re{ySPP{ez7d%MjS1q2={; z19EasodzWFtEr^V1oH;F7^uWPO&>ba} zKa8I^u7M`+4z-UDCp>lmJ)R|SHzg)g1Xr_R*1&-6{#Vb3a2kIiqBtezZC$BN@(;G^ zSZ}T_d=~kgIR&D9Xm>AX57+1wUSSe#`apN@Ie0izKCQ`>Y34&jdIQBkt~JO*9#diI zuzNUJEud~j{Z?tM zkSuoS0~1u8l=)pTS{r&15BrN8EB+se*B zh0S``Qn=o5>AMo;7hymDO@RLV6%_$R5A?ZQ2SYAWQ5x`29qxTJs7Bm=yY0aY9oSLg znzemtMQQ5(OCP+dl+Fog>>|D74)Wtg>;700iRZ*Uvvx0>gyZRA>3TBAILmzi`3ipn zBHCu1rq(HNRZP#=R}12@&j^kiZfAc{a%VQMT3fc;mPn5agP?_Nqm^p2&!KS88oPZG z%z~1vdO~)w-u0>asqU^hOIpMpn60Ve>UwsRr8wNYUiI2K!3z6J3sRbVaCyeVh`7nS zU-{m-BgVXbz2E^W21^2VKnUl%R}+hUf-Khyo=mBZ^H1uJ%jKlw9#2l4LR<%=u=tQ1 zT_`Q}+nG%;=bf|tz}7eiO;$KKrNL{Na{NM-+F<3g$ne1^^XwIf_26eLG_Dr8VXci5 zuZDTF!Ro%0O&@z#jIo#6A6NfQ;)~oAlm6g`;!OIP@f7k@+C6L7a3nn;6p1kPZ?28I zJCWY?rG~2Wn;D_C*tQhIXr7O-*!;TWJITHEgmsX4Q3%-spN^og8^RYrYR?)or&uVc zb+@luQ47-Wve_0tfSs__ts7PtF+~WBg`?cZH4%f{6d5jAdh5rY*oVNzp0*Xni1(6v z6>D7G+CES@BYd4Up|1iG*ycQC@KD3Y>Qj(b&mGq*u$NW%X}fM$xj?E|(ATlnwcBLFe@g=MSQHh3LX#6Lj$hb>)F;WZ-AW zPIYI~?}}4;URHaXD@~@Wo|V;5ylks`;VqbnnGyZ7g!JUGg6CE{3^n&cSEwV!*xBSC z)|1OYk=q7;xF>Udw-lD(uNj%cWN@^<@huP;-WDQLqhptCml<-?({}`)_u$aGvBLgm zdd}JVsP3Nj4F^5;1$1lT0MxIm*|Bd;KOvCgiCX2p6!$F?q}b~d@zpa5^#SPl6j1%c zej-<^P?%`;U6$l;;KglW26x`#)fPN<;lWdU%+r>8(x)=i8ns6e{S3Fx>&eB= z%T9*WUV{s^J&VcfQF}VWN9C(V@9OB8_!8_k^7LJ<2cB}W-mmX;wJ3?J-`y^q&!x!5QQ56he~OWt~aCWG=Ev(M|DR4XtdfqtB1YrWde(9+Xap;#G42T^7b$tMj1Bf zQ0p<|a02nz3EnJaDIWJ99SMhm>KH0Zfx~?FhsQ9%|o-Ff9AHC4KKu| zUal_sKZmXQn~>McE$s4DC{ro$SKnMf zVDM%7`V;Xh@YeSWX$6Em0)O7RYx{FPbCQOGfeKO(PB2+_fbic}<$M*@oo_{rF1c>- zUwCxr{z`oeXp`#+*Rcm#eDqzfLge2BTP-{J|o(Y6Ct_|eDn3D z9Mb-nxq&(_u7k9-tJh%Mz+*4}Ro^N6j3l(JL}qhn3X#hqM|Zv6yds=hRRXkh122^h zJ1e|Rfmx=q`XlmnMM|MzhYu~*V>LQTC}F-oM|JtgBGB8xwc{$a{R`2X=An)kS)`9K z2CVCtVG+HA-{gr>9nwz^Cn>EnWipbj^e=cZfw0&b`c$;i7pk3y7%x-%Z<$K zs(niXBKb)UZ`db==+kLTInMN2N2OB4P(x>QXSzowEkZg@tLol^J0sv zR0Aaq`t?GjSrhkFOF+cSJYZYM%AD;?_RZ|S%b=X2kLm7fU(hK4=+q|#C}(|l-Wigg z^8qEe?>=T<8&2&1Za9lM0rbz~>+TxPT)>y^s}7;>^}fSA`(E6L0c;S(5Yu1SPL`d9 zucZ3Uq5qWVwQp@-xjZ_Hv1e-sO*XNKGu##zg69*-F3AnS?*bPtFBQzKj8TeYowBdjWSC~Af#4pG z9_$=d&g}4M;3NDibvk5AY>DP#Wa_)Tp*0*XT9%C+Ub`D#bL(KI5{FGb;S`2|X3wrH zo7_EtVPn*W9GWm}E~5Y^gx426ZLvX%KugAAvQ|L$_`~P!?CG-8W@`5E&7ctr9U}(3 z{L156SqgGOwR4Q->|oZ4DaFDXyHS&6uI5+gD@`-O_zULl{AJwiS3>>LIu#D^jj)3| zRu}=cXinEjR_iB2tQtZ_imzy-#H)EIWV!a`OG(o}b>{#Mk&DQEZ9CJegAwtdT2mut zG=}PeflvvP>X&)w;JO)}LTP)%*}|@4EQP;ge42ACu_GVHBsym;evzXvcP36;XdilY zM}WZemaSYDcX>X`yj_fi^Cftx{2D8s>a;yx<5q+cjKDXR{stMauG&n-*dG8{`P9g1 z%C3vQ^3?I#7R!p$AN&|d&s!eBiY44=h0@E3Z+XN>D56izM1LLPnYzIU$>n@?fa@c^ zP%}Tu4(6e3Z5bhgduq%jb8U_%B5Zli!)}jjtJ{JKNPz@i1lc4MHDY%{ek>X&Eabac zUyd^eKL1TG<^=@)BU%OY!HIEBOK=&3go)i7(5Gm@=7n@7Kup*;Yxc!)B009oOzSqs z_J2}hs8WP$Ee{+5EEF={`+s~!%B$z9PlMG&QXEE{zY7&{3XW&&jo3)&adSmU$0*r~ zub*lrwsZAIC{CX5OQ)2I8O=sye<*R%DO^i%;S(}RD2Q|A!HFwpE6=A0xNAHIrlUj} zrBXAKDD45c3%*hBr*61(Cwci}-+FsmJ63b zVreYM1ffQbN_o-X4q^!ZAsk>6qDk ztD5v}I_`J*=9oRkg}(jIEcE`=3tQ{ABJ#Z70w|(MGXJaPe7!~= zJiT2r{`_zI0#HxC_SF3dzW0Q_*^O8KbMSe7c}{RB(*Tf%82lDOfr6hfggAEgtCgA5_5`zhY9@c$t*!+j5Hl5toOQ?bKBhieK4}0YD!IAo@SqoynRFald z_zIXArX69XBTGxb*1pU?Rtd)Kzm;ZqjiWJ8Wd#pDwo*m~25jWa#q%4eG9ZU52)p8N z2TP7G<91kW#aK|g43?B`o%t1Pvy+9X?t7EBs`Jj`{c{T;VTY z=iIhY{UJWd%$DomMh?!Z48JPOm4_@pZ}rmB7y)Zput31+H99y=6P!0;$?Bt! z*Ux`bM*~SdUTRWFn4_u>P1lm08}DaK*VE|oXweiAaVC105Q*>mJ{?v^wmF!By603G zA{^y<^H;)aS}F~k|CMC$Axw>p#?)#5TAF_OFs6oppALE5Wo8y_H>wf^XO&}g4C!y5 zcIfE7r+mdr{9~h@snXA1>s5#_?DLD7-9MCr5Mr59-Vv7t)K7e02;X!Lz!Eb<$PXGace98GlgnJgSx*@Cv01`l+Y5|Rw7=jnL#|(&i{~}e% zUFdyHT$#WSg73Q?p!IuQ<7;^hk!kTE^!f09-hB%AEO%p}k-#hnu>bMLxnwc)XgIZn zk0$z+AE| zoWxJ2d3bvhckf!zj?A(}O(I=>c3{!AbQW$`b3S~ARZKr}U-%jc(NSzG8(U3QJ7FpG zp=!{VMlOn04Skoy%Eb;?8-n!Lv+=79uJB#iN zuCV18e+4ra(2RX_?aLX%47|fN z(|4i$AJ0|?Z*1a{kglK)u6?`%d%M`6YkDC|{9BJ?{P!)1Ipt{56fW1Y+s(|+<2gT! zHj~gPnsVk88w@own?3s9bXjdTrkA!)d*N4=K6+l|{kS=gv+?^s=vxGT7&ez!lYr#> zvEXfCr2D;||GjRHg%1UMZ;ZwLX}~`g8^gF~=1f^3+J|!H!lPjWBE;BV6Q#%@eTbr^ zz;5#L^$9WQz?R7^nejGr-}xQHB-x=|&%Jfx2BqHK{w^_?z5*WdVL)+%Y1gzTh$cOb z2_qmXv6J?Qp4a@Z{nGZFo8EMtaTpZz4+WJtnne<>NuPhCb!5qkA|s))l8X$7XL{6p zvXCrkDA=~N{skr+nVMjnI5hD|70+*J@zDfk$F919yIkCwBuXZ?Jo!SJKaTdoHe&^y)Xz^o=vjV6Kmup3wH%pJ5%NM1i}YtE$~5 z7=NAhBkR~+t#iNm?)`^s1@C2RU3{Pjd?QY2 zrphk(^CR?BIA%U<=gZ5n6}$G?_D8Gk`uJorYg3dmn@0^TwZlMFnlU>QyG~oq=ZExZ z3x{*njE8_29~GtT`f3*$+3VjcHqeKAvSr=2?*_$c-)-U-FQTiXk=@@T^j^fIWSaqj z*3XD1lHYleLuu3R`g_r z24XHZW9~R8+W9?*7D8KgRNUA7h+S84GE{MpxngZFYpsr!V}|nNkwiKtnYO?%abKiJ z^0?FH?9uOn5t82S4Mg=Vg4i+~iVbWDuWw^NNzK|P+sjsa!v#M$78|9h)}w1MHT^Ew zvm`S_F@owrx)pjC-!OnksP58rP@|rP;#^Xja*y^bjzNeK%Tl_6mt%n9L=1YEU@Q0+ zo#ZFPQRrKZCvw(4#^pc4pX&1zL!H`^-d0PsFF8xtyB9JX$s&Ac_AOE5Pa6Fr*wIdZ z)gfyS6H_xvZ&m`LC;UBrw9Eq@ps8yvRo+Aksi3YTSs>!kH{3EfBSMD1Oqa;e!3z=t zioo4v?OP}QvN{_Le`aY(H%`X>{aJuw7Z4G#?&HEd(Q7=Z`SUhW2uUg52M|0qGR0K$ z*;kTVTRJYMm!k_#gMDl9t9p7vJxze%5p}L&{{(P>d(YdLDxf;ZQ$6c=#FsYsk&z9I znH@7>7k{*rVyC(sk(Q=qD%2ASJ(X;qvfGKU=4l$sP72(TydZH%j`)E+TOJVw6Es3c zuV;I7+EAM|U{7J%#ksWY>Clm$Hk_YXu7toJ<8fT_T8+MH#1|=VdEo6Ax!N9UM<{wN zW-F_Fd?SbDc~QoCwHe!XCwAQ7P_h6Gg~+?V>W7MyTDjr$iTPPt$RBfJ)sLe-%_JEz z;=BBUM5WeiBizlYX+6IyWo7igh!FAcTHN9B&%VMVlo5~sL-Mscw-Pkz< z3QDWW#YojfJ+i+gyXo__#loUkXd@Xc(p0(F7%{$Mw@Xb758#mq8S}D=VCM`|a*{-R zi$FUnJ?s_dk+fuxK4(5yQ&$-YE<_2B6CtjCl{4O%EQFcG9W=d!!`tS11Q~n6=T(7Z zaFVd&F;sBk)3(M)Uplrnbj9gHC@DD!#@d!(S>jKsXcTU7AtpU=^P+NTrO*OXm1oW} zU=4Y%RP;U{-tyM4YL|UgVhQDTuM)xF)_$<|w(ESvhj0Q2H-YZD2R-2p>7Ux-SepRg z*opuEssN!kaPINE2)Jg-A$FTf&1`&A!vVU7y5Ds_W1;~|zvmF&#G#XQ1uG_oWlZ~e zYc{f{lbY<2>ZypNU{DOOhd&iMM-kSSciHRX?wQxjKl|Obt+G|KmHq7Ck8eev%jT=)wvc(kBp<-9n#bBrOedHl<=HHNM6yFlrUgp+Tf%{W~I7HFZHo zDy;$P3)#`ef&a{y*yE~7R$oD~ZOn=+WGWRDBA$~eMO`BG@8yu~Gdq+lbuzBN-!H}P zmvO89q|FPnole}@5ouTx++%myP3+%q87NBXL$+!}PTPf}H{zBsGr+PEwM0Y+abP){i;Hls}oI2+c&ZH2C*GCQ>aXD}2m)f|xax|)`r}U## zRYe3_@>P(l1j{qr%&b;Cx8jz*eRXiyPJ+5~MT2_jvtw_+Bo}^^8}fAtu~k{%og4CT zobm7_a($h*8U3E^Fe(g>RnS6t6U2?KrpqQST)Q8_Xt9F--2mhQN`p5~&zw2?fM?5uU zecqU2qb#4xp(G3Y!_s}IjZ4K|%U5jaN=|`Qp*RpDczDA_#HxpT!1ya%E%3Vl^$?Zg zCzK*aVK~!^$n~kWVQ<*t+QL{4c37Fi77}W+#4N;beP$aeBv^C_qwO}OwKR=?g%=*Z z4e94=%;b&kaqA;hQWec`+<@3Ipyu{@xBa2#1Y9E?alZpvF9NUcz{3p<5Hsw&)%D-b z^!r2X96-+oK3zY=VthXW{?oydKl85vUBvEt0pUa1t0KBzj$pYDgq46=^XE43^_>9l z3h9Xd3DA9rGCqgRU#c94Apz@N5o0r zX7dsP7hQx>A^{u3YJ=9!uRTFX<915O)qn5NQvp(zExDIM;kH_)b66(pc@iv>i*(Yw zx+n`Za-Nf?UUbrai-qu`E_&W>!iK}9*;TgeKk-i(l~g=o^mW4>J~MLsihjL4z*MO1IIza|8llzmKBdUpB-#lZ<(6U~-tt?`cli^IzykvK zK`vV-_LSex>XQ;!!({#24Igfs?hf)(_mc*Xrdw!3u$nArQy{Xxb3Tk-a}|N07(FPl zCO;}Ui`d65!m|cn8-J}(;_BeHX)j_PVAbx5t6W7(?TF)y@?W6ig zh}Deit#`mQ=$T&w$;H@ag$*VYSQFjD6<=6={2LdP6}=)bg?^q2UxYgOXW~9TB=HXv zxOjnuZ?a@4Qk+mwqmuH2cyGL163I*7e~Bq_&nW&*4o>s-oE=lPK(B}){f3bp6XS6T z>mC83827#l$jcC5N{a94$Ye}{SHx}TrG~Q;^}BJn8ss1vr&vPl@p4l z^Jv9i1%~@+S(8~Kzz${a@DJ{y`-jD()_aEbkh}h_dVH^*@dZ|F2L-;|yq0+hq62&- zCGC?Qu%;MotAf9(rNy+x$dj|A1cM~h(ZcWh0(S=}-Z>i#6!Q@7Z@iLA?81#^)0?6`|b2B>(EHtf)~N?Tlp z&o@1ZCQlYMmZC)oC4EJ&kiJ8T-fIhif=N;RydGC7P^K&v*5`vT)%ctBvy4s$W4Jb9AX7kMA{A4xULU4zA%11EWL z`=jW-tt=)1ICw>KhSg-(uh8NTAS;Z|7@$fhJi;HU&Mn&Oy7hC z{az+pdV6Q7|SZWyE_R( zr?5JVozfSR&7?_s;Z;pfVhnq9i><3PkqSEB1%sNGSeu0{DhN}cw!$2IccK<=;ucRF zo!cPIU&J9;Z!p{>a#`6NEyjHO$CL{fexh6_p&b$mEWv^6>2GPVVex6sOcLWNs@JsS7E(pdp~KIKpH99DxV5i))d#?(_( zEV17>#fifv2ChiFv2$fDMRS5gz7h`xeLb>Ih)_HSvGzL|o3fn@DVp^L``N=QUME(p zv+%_riW_9h`9 z#YJg_yeHb&7?s%_FJah06_KQ(-_g{U($#uxpV%|r{|ame$Nb(5h`_KrT+8)*%jX{h zTMJWPIX#^QBgx4o{_dqp)01Zx?3D4l3xl#Za2l1Eg9?oyfj3Oy%d*;lgH+7I#OEZzV zJd*mNvtv*uoSmS?qqSkJj6Z$Dsr>$~n}a4GhoU7XSbxp){{@#gtTLk}5#`hfa zNc-8Q-WiXCkn@O4mLy#rNOC!GekO$0OOhjF6CH-?*`nUEv#1r$N6)_|?tqBP#99#K zxNjJC-7Uqt2$Nmj!7FAb<_s}$gxXqS0C(3_>Zzj|!LY^S>E0=)(9w!gtK#onY zzP_{MZY;Q=Nn`Ja2P=(L6_Z%+r?RD=W(?V-4B8E=Zik~qR<+rq+ok{f8yh#2eP!q3h~n}Us}X;dN!*RbtT6=$8xb?uxv3D7AL#esY}nkXbhS5Y1ZIx# zrTi#_z&D#)I(}FDEMER(oW||K-+|i(vk$$uPP>4D=!3-YFL(fV6ZZV>GJ)Fm_Xyu+V8FQfJTYx%-agB#9LuajNg$|L zX@`V~>`6>!OPc+2G1336LQ+eVPrqXAVTqhxXy($YO?CdA+2ZC6l9hOWqtzh($ebH2 z#H;NJ+y`3Y-gZ(aYRRjpFzSU8Z(`c#Od+wWTVHCl9@;q>O2I;VUYA;7nrvUfH%DHp zvr5nTKr-VY9?g{Q!AEv_rWb|!SoZvCdJZIH#W2Qd<9U`byem}K4~P17KJs9@1BG8)^q1RV=l@px&dK<@tY5q0sTv!KyEdt3P9 z-^I6Uc7}YIcz60T$zZzhoH_?-9>0v6C~w!de(~&u1S)BbD*w4(o_q!TZM5DTmB@;y zCqh;!qDszNej!|UAB17o6jhbiTPW+iNG6KcqV2E|uI}U~q1|B@lAtbQR>=-i0;&fe zb(Jw<|2PXA*eyj`BbMit^EL~aawwacN+s0J!Hx8&%9c1BjPa^zx0g5$72RkT@Z`-Zc&Rhds)1Fm7Jh?kZVqitw6&Zye^$ zD^dVM^a;5KoM!+xZ=aMLcEA$@1oCIMuFXO23~UM@kZjdI;BJ9SbVDMvC(^& zBxJrolKqvXfodoZ7NYa=a2O8v91|GqK6TAC}w4<@h7PdAC=uB)w@R3q9&Z8?(sOE}p;w~mKB5t$nlG3`@n zXLQ^#Hn^})LxO1SOmITm{-fVG9lt6>I5wAmAbs#jWuFbR^5!RW8kEoQ43m&O__Qe8 z7%_WBLd{N9n^t^*=hH&mE|h2aKL1Na@r>%^$agP%I?L5(HU}+s1~S-Y4>WZ&td zMph`N<}d#FcIlMmbLHbA*Wj92@9ey3bSLaDTaFvadd+Z_{qNuDF=^es%U(}rR+52` zd+A?zm5u!QNMdsMgnn5!jJ5W{Vh%kL7;E?sl63snhwD@*rjHyjFPgWR+L!7%CDn^y zW3IdHt7q&hTkMl=v^wY6zSu^p+pIHAo70G>&mX28tN=1AQyq0>cv(pSLNz_%j&J5z ztJCn10gsX=20^C;hfdn6^ygDb$b_ZAfXpbo?7uz)mvY)QkDu`AU*Rbbh-GLD_z?Ct zM(fh9^o05{sVST3o^vSZj@KrTZyCYeI=j(f-mR}5Yy=)Yg;u1E;=OFeUiQSO6S~IO zFYC4lvbfWpOeZR=^&czz1ch%$ZF2y~rHDuIGie_15!wp7_j4R^Spfcnyw@$|WYTtK zaJ~|q5~K25?^oDwR<%hXvppvzKRP8A189qPTyGtT$+0{h<1HP|>^^b81}eH*>2>tn z%DX#x(pfbq@nPpBwW_(?3$izU;H{HCqzQ3TF(23>X@$mraox|lNWB78ij?Z zO*!`Y$1fKgbIPj^{r!+b&>~iTGH@^%@VL4PZdX33v`C%%7*eutvK6eYbYsIfPD7<8 zZWgP>$X<8Qpf6k&yi#aEp=Y{w)@A}j&z60!2*r_zzpFrjEPkt4z66q#>WP;x5_X_ z*Z|RqH@Y@bH)$z@yqE)yk%@h;{yVK@Q>D#?8*^^HW`!Seq4#o&iuJ`0ovaiawuGW) z|NXwXKs;%lNfN@V2$YUTLiHs0&P5Eua!BJ@tOYgG!HC%ok6Gmdr+w;eu8fhvM78U; z@OUA)PL$>o@HMnfQaW5%BJ}ulBXR7PO)%w{9=SuMr z?%&zD*7*)=%H-MN9E3!*5frF7$~GqtZmRQNKCAU~U3@JG0bwal1hJlGFs!V1Af%-( zCp-Iw{Nx#Tn(L%06+iROTGx1-A{Oz@(EK~jba>G65yogNpV$Oln`u`=Z5yv$ z>(qEY>?0xheSbn^*VoY=T#-@@6`Nk;zx+{QbL%Fb@B19%xMG8h#&?e-J@UgG7N^=eM!T+uXnJD76lF*8cyux{JVD)pq3_@Z$*x z&*uoN<)qL0+-epjA&<+y;Ko9xcXT?<=?*5abEPNLSfocy@0HuLvVGaK^rK$mJh4)_ z7Ivv=eL0-h(6Nn6<;R^;IMmxP?@70J8#qyYYV4Ro>w*i*y7$F`cgzUo&z0Djem(ZV zdTr)b{U{@~AaZ(RyaSK$J$ZyLZElQJ#bNw;SUro?jm`0~5jUh7B|v%Y)UaZU?b^8F z8kg~ufc$7$lctChrA13`-%~_R{r9)w0;dd`lXxXxxnZ(NESl4?L35Wn9nZvj7f2WGlDVyKCFtU99+F_N=od@!hy_(}9?P$c_EtKd+iDZVY zwrCsUWFAVu!fV8`c~GMN78yTacr)m$Kqs&LB-3@n)pcP_c}-5;SND%5E)~3gs^=Iq1K^(yum($V!$Gx zDG>Yt2r}@0Iq5xhk-Z%O(hK-Q|DTk!59$JgNZv(;rs)3w#(kjsRk{CN)l4fd_vHBh zCb(-z*w&?pE9^4>4tjs5+&j5Jn0+w86^h+_7cCM`&Y}(@oc}gOYbjas+MGw&fPAXM&fs;vTt=%kTO;)D_L<($e zx8HREIKgeL6er#QcWbF4nRVJv)CuQ(hZ_N5e^7liw&vp-@lN5n!<=0kIS%fesl#RM za0H%XXEt`T2oe-Xu})ZszAy(H%i)iVA1a8-EpUlarEMJSDupl$B*#jBDbM#^h60T895n! zc#(vB8rLkDEHBC|Kj!T0#>Q+xkzkz2Rv;MW)l9h7xVEX*_53-x7&$i89xPwf5l-8+}L8NK!;t9HG zM@T62D#hoAds3o`SRnU#5Ex)e_$DCVK%EpUGm})k9Mhbt&SkERK`%IV9raf>Kg$d+ zO;Ysu%Rli=Jij>+{{k&s)t8+J+aAdh)jK@1WJkKn&vWFo!!(hU0*@{^)h^TH+T64K z{Ysg3Ad%eD_j(xy0fj$;D78E7XN9x7VJ7W3<%aVv&P#8H&zkj~gW*i0#-A=lTpX|s z!(*~${M!^2jMayCQw>|R*yt51+re2OKYnYhJ$*dwkg8J7}6d@5*f zlA7Sm)}B#mAO09gIGbmtT!an_9Y=;S~86yDly%2H+1GS>CY(kB$9Um0 z-$lj=0g+)VQXMW>eOs@ob-81_EnOl(eIs)L%vC%}BdDq-1mWUgIsq?yY!h9Ej4mSC z<(uY^a{148HBuBG%+Xxs;^uby=S9doygq3=qE~g7>E49DiRYf_Bce{RR0X3RBGuS) zulqxXKHq;PR}f*4Ww5PIw0#i$3xpj324-(yJzyke8hGn_cd^8{?EV(Xz3NwWvE?q<;G|jx#2ANUR4+o6mnGUP+`| zlvJ^ei(-Xp$p?MQj#H`@-ggbe|Jtd%;T!zv=|=YrW$-s;9I|r5ZV94szWu(p>u#KS z#q26UYYcJ@j9h@?=g7#hTu5X7x&UUFVwFRDI6@*0HC`Nt57VbI7ONXv_(mb?+rN*B zLPFd`#NjEpFL!hW-1%er<11^W>iZHa%57(vPu7~Ld0U>QkgSMbCE@vy)Ku(84S_q+a#5grhn;AT&DJU~ zW7(W_)3Zj7B%t~*H|hnnwQQ6P>VKCJLUZir>KKcXs*2%!&#bwV`#UKq>NbC-v|IW31;<(aBQ>YrN7?493lK;Az81I9uELIdlq8kND1@ns} zXkYEcNBtTf5#+fJjd1bZNN8mo-kHdr7KJt~k)^F^(b~K0l$h1J$^9h9rr8&3i-(NZ zRerOhQ>p2T7O&61656VLiSBQy0!o+x6DqN^4nJivzUdN=JbAtP57=d*=qimk+5KL z1|h~^g5?nNwa9DYdeU2ubl8vh{6WQp)o7J%i;3?}_)mfClF zDm1tMLbdc5++q}rQpan4=quf156i5E%b3(oAo_;xDyvVr1 z*=tw$+}k9Lz^hUgE_WPOqcG5*ol$(OJE95?Q(N>8YynWX~pq~8F_6X0|X^tK3cqPP^^fQwt0@H7y4lpQP`Zqc*4|R59;DRKy*|-?p}ZBQh*nOY zIh-pVbJ`Z{V($ruiLQqK`H$X+y2svt=0Y;qO#)%}Vm+!viGDaOpZRp)G*QRA0;wF#?iS>E+(>(6nH9}a!|+HJz< ze%WaaQdMq`i)Pm2Dr#hu>tCpSl3ZmyY+YwmyZ9!2{J3x-8aj@XyCZo%`6fq@zm7C# zQq=g@^d-&p7ZcJB6`eBMo&vJ>_c(<4IcxjeyaRI{w1KgF!Q5wE)^D62XPd8(zi6A8 z^WiA8eKkr;bB6E55)324<-vFop#Zu%-!3m-jP%3bi_HHbClesQ6XEU4eFd>mvfVZ_ zi#Z{a?TApk85O?W{&{H=1?S=KFtTm_gH-3CxZ9*RiiQTD|Ldd>=Sg( z1dX*N42r(9ID^TT89z3SuLDfPks-&zL)DH|B5$w$LLV4&;JiV*>hJb1n5Qn>^%m*L-1FJS6n)Aiiclhe z)9NT{kuj>-^Vcr`&w9`K2T=zboi$;olxD-mn@7X`{uCVW`Kv0;m?^R%DLrYa&=2T~ zoluigM)R|d@Apm48$=`x$n{s7q1%f5K_iO?Fi1A|e_x$UA8ft&YOA(4D8eoedG$M_ z=t;r{q+co<*$)zM>=KjL7DPF6mYj_tR8DS}XEUgzZt+wD<6;}ak9=435P!1bnvg4R zgD!w6KMyOk6h}aU`Ib{Rc#+CzCfVP}-HM^>@gavmSGnU5qkRY;CtEBq#~v#kL*2&1 z{55aDBbHL1$uA&7OBFv$$+bLEZNFiouP&;d6y@u`em1TX&&r}1D@3^HFDb(zeAhnSBC_1&lCIV4FIKixW5JXP0S)|XTG84oH{nI};;3FMs&T_Hf1 z<5}%He(q^2X4{A=BIuW9ck#shxmmxa(iGnVrL^osjS{6YLxY8;cfF}P3HTIm4CTu= zl?JPkr`OkM9=w61x{L>$>MWU7|t*}m| z8na~`KYJe>QeyT^Mh*U)I@u-7-6J4{2K6)46bT*lym&)@iw=>@-@ID6jWxHlI-2QR z7%~*?bQFZThJGjCbMN2wb~wMV-KgV%#tS={8|wKQiPTM9`hZEr&+JVKc0?W~*f=y} zi+CCBdkn`4XCl1*>Z@@U5_w!(e*%#ODq<28vj_w1(Ad032`pGvdSg4`0*BTt_%BDl zD-jX6j(hVuzx=w1o;jh2oJT8Y**o0VTa=oU$3f%kFJCXzl_smOy=YY zrM&_R%NA#F3fb)a(e7OqDlhXd?IgPoZ$hF^AUq+`x2)Ydo4nM=nNRzlA%)sx z1V9zM$;<;w4*=fwGvw{p`%l2L6%hawA#q;7>CZWl^z%vE5V&eM)L#zgao=~6fyq%lUQLChC z8TD@p)f`rmX{on74Mw>C)Bt9eRb2&VZNf@vdGALerEYRD^YO4865;buF!Cs5*_8A! z@?-DD*}G(Hv((h(ZVIwU;dvpy1;>WKU{B_Yfo-3y(_6Ih&c&S5=bT$1t4kqC-x!=r z=ZtOiGzt@)yYoKu9v#p z^oK`Wn1}HL&i@jzzOI$M%t7DxtsU&c8&uGhy1;;S%Fi(G;+b0%M@yGmEWU5gIc)}- zs^K8fup@lBy3cf~`VA5)XkD9yD&*0=A1EyHzQKgJNtQR2W;nYE(TKkNd#TN?_!hr# z_k0%j#Ywr^aAeykymY^cw%wte{1^EiHg{4GXY3414#TXr`0xGR=jksR(03>;o_$E= zd?A?uDR-nBlzAt+g;pIvi_=xIJ5SnMrkP6ox1e#Ri!-fuv=ga+F`iO>DkP zi!)1I2sNllDv=_V~sfRL;#u1^2cWRDXpyM{{ z{R`ig(2dg0dk_(~j;siq`Mb4^?KdZHu=D0G6XN!hc!;+Y$GqspoA-B{iy+tZh z3lnEf(;?8IviuWQFoZj^owh~zk9&w4#0-q1bv&EJ3Qw~S?~cvBI3Qf2oNCCIY0!c>v*eu_4ah zEN}hGEWcG@cNV(l@J!8Wn?WX6JM1T~{9G76iA9SWZ)hv}dle<&EAH%D$_$76`Fv%%8NZNGu*QkXrU_$OSYgw9xF>eF zYy7s=tlm6krbPC2)^%$6>t>9^h6CKbeFtn<{55j&*uOz?4w?&`)^)731A|pfH71=7 znu%_1M7=j{P> zSIx*0wMA?=RAe7BY;)Xqwf(%DvcVh((?eM^6xI^eS#M?M|WK3zXZ_JAW z@_}YNn}6!7vV{ff)_=FePG+KYhMpQX^M&VH>wzsq-X*y`+bTbc{DTjXVinT-x47t8 zc)tr38PCt@rX=?=mhbH2Xu;VcY{V($^V}D&^AlPvoupMcDlgFX9NT#3k^Z)-K5BDJ zU!R$4ArPVJfPcy%uD&f9`R;bol89d{MwN-cS~lHGz}=9xv0lMzZuyE0a_lKv)Z=r@ zUC!lX>kN2GV4J-a=so`TH zd?RLYf2|0PjRFbnALr-UMtRMvm&;|8&cmNSvp-LKtgdyC8$i9VAQB9c$5YA6QbfT| z&AMZ3_;^@_=jeL65`yk+uFHOMY#;u)J>$a28R_P;lJVqyYTl@EJ47^FEM~*UX>P+5uEsKABn=X0dD`HhQt*%5rIhK0RSneEQDSc)* zY&Ko+pjdJkN`iTwB%cY(WeUL+GlBxpu7JtQ7gl?E21W|hM-YfLeOP20fDF7t5X`xR zT@2i)pz!_=&$0zJnpahq)+C$;y70d40bifd)`1}u(B0Oz)TsReO6V3SD9C$Uy`FRh zTmq7T5Won9<_1IqL3jm?zh7hQ_At}ny<=QH{14-DxCDs3f+5N}@7EoMYzaW)Q5U`w zfX4Z%46v%f=K({2%>VscC(w1+B?om2yqU5kyaTM>$uWh%x9s9L)_2eg^8fC9r)u9M z9>8zPnE%h0y}Up7-XM@em~JU>mn`j z$xtuEUUqmnPWCYNGcOk6kSEbOXX}+|hd_LmzR7^u^z;HN;rU6QbT#&t9QTt|o`HPs zR^~mvVzTA(W?JbPLbi6XsHm2OJ2-$tM&VPg)6bd1KI@9x=75YF%$99Ebp|DMxRXW! zL89ObTz%;gt$CuxsiuBJ13f0w%<+k{eU8F!_-J>pc_QtV=5K}46TX)i za!%2bwG|Z}#VD?0OlY4nBrVpH4ZWbIDD5b2Wgaz%#y$&dpI8J5iG6U}7z(1Bxha+I zf&q6;rGKFlYO%y?3}LCp*DfDH5}6dhfoku*%amqBKSl&6CnAFbS;O-({ZLz~9KUiOt{fEpvVv6yHk)s*gHgk}w|) zSj-tywA?9?V4lhCj`(aSh~H37XH>@$tQ(i!S zd5ZjGf>EgMUi&fHg1$iMK;c-w&p^BO?wAm7z`{x@uBf=GCLagh0&ImheLhJb)+)ma zyGOIl7;h;V;qtMX)ALf1macC+cAW~u*&&r}iVsetjj`D0^Qa3ZAHB^T5ok zNPl9^qqO?ebBkPJFZ}!6gmbB!`KlOj7bMA**iuA~G?xCxDq0fKq#U}w^nFEH3@I`1 zDJa=jUQGFFPt!c(h@#8?%a$GqVNpsrvi_6dRotRV0n^a*Pp^nQMEafy!j0UYy;Z9P z)`K*iUkHd%OzJc=aETkNNVYwyH&SbqD)ij2P1Kav!4W2|!pm1H{(Y)+B+QO~_qnQA z0%nxdF*+A8?QxO(_g5Y3n~HT(GfCSLs2MD=nds={VmB{-9kvWQVSoFMZ8V>flH<9Z zxuwpxkXUS@)$S1klfr<{PqjpAX%D4SN`6m+&L@lJ!r%KKc=+xh0+?C(0MGy$Wt|3E zvFZELY)oM1OEE@~$K*uVLv}@F)FMw2uULMj{XpKhi_8a0&O~lp4u)7_Oo=IiPL1o| zsw7_ofBis3hJO7w?EXq-2_MTp1|4Yfxj<)yWW_lluj5it4Pk}LvTviqoNP3l4avcW zR@Jv{d}*H;Yy~e^lO`wvF;T-)s&b8I|N1vwX~|3mEPa1rTmrtSB=J;Q9?NU!*d|J{ z)h^v|;P3UT4`f_W#_6YX7#IA^JZ}NvI;f_i{5W6}OGx?^-48x-+bLrnBK>8_`!w@> z`CTQw#rkEFSmwy47-n8Cey z|FqcCKm2K!`$hExce(GQLw) zX|+@kvX5>4ydV9X^1`Xa4C3lhPZ%DXVY|Lna`1JkIy3b7p&uzl4D{2plV0xqOl;xA zSnAuZYCi7_Vzk?VFf9?$#RO8xLVwOwp5AYp7^Pg_kDgubaE6ZlwU@@!<{3%dL9uQc z596t>simeZU1K;44VYr(Pk)0mW;=~>doWl%A#K;Qs3QOVXxgbq%PXdqj~JJgY7W1i zmWshh3|x^1{u#NUSjKI*j4jAIkJ;%BMttBT{o#29Ykq;bNu?z*KdsxEUQq$_M!F4T zn*m{4_qpA#U0wM96}v;w4e_1lcz{(9< zk?4)HGEq9Zs)_S_sb%|fSP?YS%_~ zLkv+}o1KjT;+h3JE-8JX@@14;Dj{b5pU^-81l? zMni2ia))G44l`6Ead5-@&~Q?y2A|))SXCgyu~UH}T8Wsqdl92Q=0~|De2Vykt1+CR z2b*NPYAH^T6tY_~rBmCF&C8{cvYhGxX6TwRvAj6kI+RHx?n{d;O*=f9x^im&&l|QI z(^Kap#qPOBxDUJ%Q!|<`hmoRnPiJGT7F2L@_}s8j1+hwl^1J68w?cjK8V<4^SR3L| z7PIwun;#*|Db$2~FQe_b{f!KrlqmW;Vw`KpOL&b8mp|ST0IOsVlT+f&3Y9Y#MazOc zdAFTg41L68n-P@CS8K4%%(xszNTlW-Ghk zDj-ayV@Q3Cbo}_ksOX~}G4{PYx*sCV$>$$yLc9DZCx7g1qz0?J4y&k_1hu`gfPwD(V)F8SBZ2G z%DYWVjsuKLjz0>>bZ{qYd{=_)c(kHbloigB$Qe6xQv7FeX0E&SWB5G3`-6uI_P}Q5 zQvi-wI24BJ1NTL7_-gW*H1WqvFy(059D;wpnSiSQN3Du>#ex)RQ_O=O1$SH~ItGzHg}+y7${4of_JG`YVzaAumpVC^EM4Ndz%x)yPSSO+#-I zhgBU>#VmCz+dBD*d%vsjL7Ugu&JnW&ZwkJ{&VL07fZVU6{`_;b_3^4zhTi_{S9HF_ zDor(33d}4EmE3N&)`drf{1|0)yXs0tllXY(MF^Jk84V8>msQb@YoIZnpI`l%xi;!p}*s{M~wL(I`ZR{}xnRSIqLGy&>f zMCF~)_`)i4PkQxsNdQmuB=QZy3V$S`$yWp1T70E)CPsup{hzVN+%MoGaR=}|Ddd{G z(_@p4-iL)y>ezdrhBSzDDFJF5>VM`79pK!!SJDt@AdVA-=LSRpd6)T{gqcA~iS%U$ z-ieQV+is_zVNjQI!BZ|>-CKRnQdFoW&WcxCLs|W)8U|-Bc@iTT-7B=gEjX`!QXUP} z)W}1+A$#a+FMg4W7m1%e(d$rXdL%c>u?4KMrRcT$O4pm+({H1oYN7C>$tOgT4&%7j zEIl?HNrrSv+=y6so6!)>C8lwd)8va%hL;YGh}ZQLT+H(`Ru73Dc@Gq<3LLQp>yot& zF*7E21a{)x7%~KZqH&wFCb#ghSvRL}k!_rOV3F5V z`*&h>rZT2QmB8sXywA;pPgN8} zdYC&CfYazS#BiEpKk4*mrTII+WGhk#z;~CbT54RhIaIjXZwFNVL~;-kHhgA%rQ3Lw zZ}6lQlb*@{bdVYrFOHfnvjKDu)s_s<3|n5MsTZM(9ev9$RMNSkn4JHmsZQ8BEP8JD z=$3MY5$JSj5Ijk(9jg2mRCU_8E*GpoLK+s4&a<*sB1e5VzSBX}lSSdXgjRe*dV{&N zSpm`AaOwH+*krbV6fSk_t?z1D6+O5EX6Do5Z;y{TWfctVeHe#@#HMLTD8*W=#C8c)G2mezMF=ws0Dg^wFAZB zp&~@ZJ9jioSKo-lW!0y0=ZX8}ANv6!ceA~0-o(Rq=^nOP2pKi2mv!z(7$+>vFG+GA`7DY7E zZy`l#gbV|b$0=hoVC!UYnvjH|kUd`?(u3!RBCLG!?^MS}r}a^2Hosg%ZUt}}IH@5c zRV^+rn|0mGKbYHw#Amv9*oH)P+|(%aOz(D~*?mzv0NOe_IYi&rFI1lP=(8$fa;203 zravRln^*ng-MA_IzSjIka!0>Ov2{?s$Z9FbQ8UOcgkqR_i}~Kzz+hz&)rh~_)5K!I zjBcvUpF}=-HCS}yd3`MKX{EsBG5trK%pnCVT8i%wEa?h_aBYO>t&9}k9WndxPw!6k z7vc4>YJ^D>@pL+)mo)9l=3dtwBKJxyxF4{ zhOf8qt5MIo@v)t;{`qqOgu)HTqc-e~nx${AMj-hcaT)cQgua<|DL&UwQd9h`*_3x# z8l69E57D+Wl3{2t(sGWWpH+^B$C7SieyW(YH*(GeWmG1H{1#1FBO|Jo!nC-1dDE+D z63&o(GmkB!Vb>sTepjKcg<+{SNHoC!)>nb7SGdB6Ia*+r5q}|ZsGb(2=N|^PIe{v# zB5lX)#D@JCNpkQnT$~b)VHqq4?wmQWt5x?j7^r^$estRv+5uRZ#0GWwTE1!_Fc%p~v9OX^fPhd5^D-9{?p` z01N&Zq}?+)4S%V|Cg)7w+p?XFdRmGuKXk!iBNS4k-D4PIRj{dl3fCq|Ip|Jf>}C;- z;qb30SEg3j{^iy%NJ5dv^h&kIw0e+Z!6enAHP=WntqU9O^@rB$`%RPeh(A(c^P8B3 zbo&P?F}i%3EY0Wg;_qJ$sxPZ;6=*7tO1ewE+jTCbYF58;(!b0QZC+g?H@2ypNvr5I zy^1H<#I|9APVS?_4tnx1cTdd2uVNk~HC1dm#y$?++X!!V#V~H!Kj(aVM@ju*oRSoO zmsp}g8k& zvt>H*u`5qH{Oe%id_gzrWxVJDi_fy1>#G@!y+eJMDw_z5rg$3N{`UBr2c2`zE!Nfm zUC!5qr(?Kf&872{rFN77x6fy7tm-E7;z$!&`D5d94gKzPEo4YlG6)e=8Fk2cj5=$p z`jTE77-q2fh&nUJ$er=j74Qd5$EcA|Wh2b(iGd=0R2M#Qwx4)a(KsY+qAz1-XV-kR zma~7kH;)C|#qV6oq$joA`;H~T-p*q0Xmo8n!ynSTf*~?< z*N;1tuvK8RvO+Qy4+~zQUs2yiQQpg1sA~@X)8*Bc^wm-w1)8RknRNB{R1lvezHXp@-d;2{nn*DL^XhjwoVDhY2 zuaxQ!TXh>flIhja=}^PFHAm{5uqL%}(6(sn+D%Y#(a8J^a<7nqes;OS>>+5;iU;2! z)&i;Z*V~Of{RKd@lyf7y7b@M|oa z&N>8|@utY7z$aAv(_#BJB2R;w+E>^16m>V-3**jS8%dR48H_cM8ewexF+r}2B34Pn z%B_cF)*z1)Jq5Q)(Q`B7(Anw>)lb2=7j0N0iMMvGTGtR{(Vb7z#z`$;qK)vHA=dCRw#WmWn$`YgWP zF;%(#KDWK*$CJm;K#m^-_62lSr{9tydow!L?;R+!KcI#`uV(GhV6&C-$QuQ1-?)ei zW^J{@qeNgeB84nmQ5~#&NKI3vgzSCw-)}hlwb)-iT>9o?9V6-Iq}euD5H2(AkTM6* zj~%_A9c;82r1_)8c?FfmqRU=+(|-3*yUI#MGcepq+GiFB_M+qKVh-z0c2oV5OmF=n zaC=vG{^PbI9-h-Ewf>g!wFxmJE|@5}cVl+i&t5{ymm;NC*tOp#Jai?xgMsk->WN7o z?Q=RjuK7M5{K_O@U%~aL*U{+Eqr@{Udg}^&h#s0uS z8V~IB`N1nhrQ0$Vuu(2cb6<<#W;fsCM2aj12e)dU$nkEKOGkujA=iY^QaO_X<#?AJ zYYOTy&=%&ZYm@P(8%PR1QN=n$(xI|FP+>a408h->KRD??%h;Ldlr~Xr(rO;Ne zDy|kAS?aK`QN8I+m!faQ#BbXlp8I{|I%FA>(0(&1k9-5}w|L+f7i}!tgm)!s`Y1ef zbp4Z%uY^v>@8{G6@Zn|E>YHzkw7F(?tbh2M$K)48Wj_)kp?mMsh4B|#R7@Yz3b6Yw zYcX6x!#2;V2DL`WC^_gSh6}h zYAv}x=&?pBOOoG@95=A4uvf)n;sk6&3g0RAv2HndQHbOD1%5ps>#3{yTEJjl^}L!= zdOV0ZaF;s}rtw;y&!gmcuv0pm>ms=vkbqxoM#oHQ7(qTll=Q=Op@z^ZX}$z)S+6sd zK91mH(!@41IUxZGlb+r2NUvb!cU9P9wzcPG$L2cNb9oF25jC)fT=@GT zmai<2qnb8YHL3JD^D@3f%G~z^JCj3?PI#*t=B~huZJxnEW=~iE>LQ10&D=tXpw5pE zy0*r6zW2{c87|?O`y7KgRbGI`XkqKt%TzD3tVQg-vgE6}WX75`+Ej=QjqJAHMX^RLD57`Qc=Yh+|Iy5lU&x|IJ4KTYo(60NmW( z)bRNL-=96wZ^{KgZ}%JNm(QmF9#%Na|6A{T0MHEZ1Vnv=eg=8`_aGMl`+uXn3{Lp( zZ2?TE&;RGj@Bnmq&%fJoLxBHh;(rE~|7inDB7b)W|8L@lfCk=PVgNG;>?P=5JfH1# zO|%}E#dQ$l*Gd__2%5fz7V|;-&UK*`I2O`m&e2@+0*fN{$u2l6^iTd1qxZlSQ0KX5 z@GZcINJh7)NCLW8ugx4vE|B~~(U&3uQ|pA(Wa%~u(Ke!@_AfH>;%&P+0gw)uoDYvf zew*DLORHd=rKTx<-*<)c*}+3)R36P@Ldk_DPEdQS95I1aMDpj9K zPjjL^UGWaG7;U@Bf84#WZ6Bow)tXJEAj7Z~8J=fr2S~}qFBTFhox5f}2fT?Efyi%23NNJCQ%ZY9rB2Q^A-Y>7QRJ#y#)C7-)VhZmDzVr_o+bYnu(rd?~)r>L&$dbdq#5lmM z(XHt9x1ICz4!J4rPZD;LxvJsY%4T%+mjz2yUQ=^1K&-ZXja-P$>ClZ4I0y6mS}!W| zlM5}SrUvJ07c}InS~y2F3{64{1Gy&NVj;frevNuP*VCgws_&QLIz{RRT5|XHD$evx zJD!Kp8RpU_Q*7)}wH#;R(q{k#0}(W^$-X0NN7Wal3mituk;Y1~w~&eUJ*m<;6sOYt zSU)uIwrRA3Pz07r;3RgN`fl}~R3+#FmBJxHA*snESQfbjwz>Zs{kp3AC2jlB~Q#rfk; zFW!0d#m#Qwfr}A&&-p3FzmujLWU-x#d|`W+ z#h+Gg{#a4WnXe+>br_mASP`P*SFuTd*E+I|hvaM1_QEry%O4RCi4-m;kbug!oc^>Z zH$z=<$lz>CbuOK=(TziA^5TWV*w2ELJjCDS2=!cf5?Cn2qR0;W^{~Mh!)mj zJhR4d3@{h&Wu$1jy!@<^<4-9JO#YOrSPD5*GZ@ee#q1{XRQr zU=+zj!EsX+uqzt3WLE8vHQxML;uW>;Z@5Qc-bI`q__~a(W1?Pa%BifM)?3) z(C)K_3E9Qqu+>V|7O``Zx`nf(a|>ww53T?2ZvMlc{a;q-{{KJBedPaE^*{9e@B86z z{V&+liu2HsKzRV}xc~3JZ`{iNvqRVaeE`Z~LI0>M>7VbLdp#U=?C;7_%htQeWr+Cm zs5UiVw_6T?h(!@z+m}4swX~BT-e?FUBp{mJr}5p4`%!!5o)a?KKPFAC@l}^j;2J@Zc$1k0uClFSeU|*d6>{+b$Cl)wb^b{KI zR4}|*_Nx?%$agiNz+_+2iLWQQ`W2sKB}(bXitIfa1NdlmZA&WJvL?G{_RgUw|Ad#% zG3poUSF;81B>Al0ByW_?TUy81#P;$vw$x_xT`r=sLzvyhG6;LBF|1$^-9(|dG1nLD zuvf%m)oGM#zQ9gx^S}GyfAfEYZ-gE?5-1PA-QNFU z`R^OI+W&+0|J@4_vd57*Y*MuF*Va3-NH%5%@Itih*5Z5HIKn5Zy3CNoqwu&-Yw_=b zvY*#$!e=^(=Ib%}VCI3g)jYG{EjtI?iW7Im4teOaOjws)58$eK*;czP`>k6SsRq1i zQvE6p7M?LMU?R3EOXVZ2sdlH*ZlA+g#4o+gAk@qHocwLuL9clFEinE4%Endc#_3Et z&avtKw*^F@apOR6N%}If&skb%euCUGZ_QOLbF61*WpdC% zuv2)orxKl6>5jy-P6r&2#(D6OodK)%HsYJWi;Q+pW3*26~03%O*C6y2oe_@eM;0y6 zt>$UJKh}_0tpndj{F;dPScb0LJ~t1{y>tFz4jXf|cG&XF=$rZzVPwZphP8SMkwdCYN+1)nIXc9Bz}3vo?6yD@*-K zHeW8(MxOuH%Kq%~i_;aW($;eL`BGvRJV(A0M(8ukIH^oF(RenoPZf`A7Q1@NfX5rT z)(DN`XX{Y^5B2|dcmBhs&BDzC?f?IWxsUwc#{aWI`~U8Tzy5#Z4KB|^M*`&mxZC$X z{vZAyy8nMKFew(~ngMhC{9{AF`rl)Tr_CmRlEcdvEzO)r8;6rCK&~3aY6Net#=zCW zC7eEzqZ(l^C`z2|v^OH8`0-en*sc25b%lYKTx)TR+d6z8V_Tfkgh|ZbjZ?BaOrV{TVGckoEoE-> zz=ZA1p_g5d7K{j0h(4c!RFAtPUx*~E6yE?pFVcrL*qEz|xd^pt0yo!_T#rG}QX5~) zhRP#RokW_%(y@qvn-;k|gaYN_$E$>2*wo-czSls&m3YWRZj%QbqV>6myd1>`YE2B2 zgsGin?W(>y1}jOC`nilJ*N}3hIL2iB70Ltjjn%DAf&CwS^~bX#R_JK~GAW1tu+lu1 z2qSf&y(ZEy?kaROplrsFFx)QvCKgf(q?I&%Fjlpj!-N!hN}9NBOMLW#rwOJAZ+Tx| zlN}8Px#JQ-k5hqGqyOXIQN|yPX_fs6@C?>G5aUGb^dx#CmKz|r9bigt06t+=Ks|lz z-ReKrfBzVC{r|hf&N<8dGFDL-3^dh?)i%yy)uGW`?;kv*m~p&ehxa5p*sM%-p{;#6 zt6tWCSGSq`J`5Le^n@1%E!*3veYJ{(b{R*y!UtpR(|&3SwZabLN~fAM^nh_JhN5`Q?52fc;|zvzl1ZzOl(FyN)fy8j=@<_r zZMqrZ*~!DvI%`9Fp2`m+XQ-ar3x`>9K0D6E9?JIdXxM_9lFDxnWdSM!RUJu9*@xNk zzN{6lc8bjb@4`rifFb8IK&rufqw*Nb?L4dgSpTA>8bU&xH=ht14V1mMCXdA-jSEsi zxOBG2y4XZr*za$IFzvl1lyiz~y=fcd4jmCdnw3Fs`0nv!X}WWV{6 zWc|6*b+PMOjVg^vpsivEc_i#^rsVi}s&4F%;?kwaHyL+gBqcew8EJ2HckOn;4t8{& zxnzkDT|V0Abi9jM(o8tiaG7e^&)bwmMW2CzbX(ldu#Fvmj)7&cSSH(!U)H z>82V8_kq^`(E9)G=D%?Lng7BDt^faB?j!%VI{$;d|9LJnortq5@CnT%BKII%6L)_0LNpmt^V64_~eEG_Q+_`9FQ|n6w ze6HehR}?4*HjD%{?NC^yyFdkoI80}rr$PSmNJ$$)L(rrZ<#&p0e8l38jzj2uhG7qG zMU{=rzb;y>RZj8GlAZpUCSal4AW8MZQI`)(OC~Z>nWFZ9vRqeDcN<3giV@k-aYr(D zR8!8koAy0I4+Zh0N?KN#2t8RUli{FX8CT5nrB5TT+rPc9k#-fTUn=T(oQo z%!em8izUB8`CD?yM(M-s#45H}(QVINg-qCf$&9t0H@R_jgN8qiPnFb**!aXv>70m$ z!Gz*N{Xf+I-(CObAOFV+_5c4c_mTfw`G2VX-w*$L|078_@S!7t@&G{fAFBU%c>nXy z{tp*a|NmX?BmcM3f9U+r`{A$tvvG;GKXfEe9)P>;|GEAA4|eGMpL?Nqpl_aT+uw*| zNf6)JF0pUlfhSEuPE#ozOXa1Yst!Px^R+(8vsI%*q3uGDrOfAuF9$(|j-s0KsE<@!T5xb~#{H|@wXp?Jv5whjw<%VHE{;rtxvJlE!R#&P z1ba)i4>fT>uQqc6Wm$!C_J?0JXHu`to12w1ge&DCj3+AZq)@2AfC==3bK7G^BZ`=l<@1<#K_x_K)^L%QmefoGPLZpNuy?3PdjsnsW>4G$+MruGnFh~m+ ziijXck)|NMg${vR=|~4dlO`xgZz8=2@*8j8J~PjZ_W-`g=N~xN%$_~p-M#kg`j9=a zuZ|0PMQ_qQDjnfv!T&0o&U4H@n#Dyxd)vM{xh~5*u@i{=QHxrAy5#SPDzNx*m1yp8 zVt6y-#g82oNG_SSHy{g$BUDP6>9w5Inr1)Gai-Jk-jA%OjiX6XrdTm$Q^v z$J;U3)9nUB@t(XFWst?3?fu_>*#C{~|2Y@TNLD13t>w3?k2(2%MqiQ%;17cmCd)j6 zjt0fy>D7|e6xLW}1+@u*(rR%RmP_=g6)Th8&S#-=l)=)>)nSW0Vz{wGTHLB%;v%_{ z@<^3qqKtuLmsDjKhmDB>ry#3^*&Z*cuV$$5K}XPbmQGe{$rKfdij77Rp7E((ry$?m zk@vK^mu%=r7!sh@k6@gyS%U7iTT88i9-__N)hd{ZbEEFJ7pL5|U38zN2uU!efjCY_)gs;0p8NaLm5J5IQcs{^aVD6T9=Tvl~tgrthy%#NC#X9W8Bf^=xZyU6M4mt zhh!9FCL%_ZE;WG-wvLf zr+U;=_>I$eHyhH1jx+5P@kz_#c||!`2!$>s0pDI02(sL^@E)T7a_xk%!K=|O2f!sz zIQ~uX#!J(Gc8q*FH!>J7bf?D_K)SC(r>%$c`a>mk>efmPZMQKNO!HHT*V+}QBZ6Wu zh|-n~beQPv@&4RVQaXPOYy7HUpbJ?U+ocH-7uc+RHXu~LP!(TVNg0RP#1nSCiiDu9 zE-iw;une@tYeCS0@Of4{Mw>tftNxJ}L3kPhciY z{HOE_C%;+)+^}I&VgPz3MLaN{d1ft-HM?y4?Jlj6-v}Z5Ltm~!wi|>MJThOg7`k-5>wJUMOG}39AeU77 zzvBav0{BiDLv3x z3r~D^0&?JVuATU7Wf$mHQ++vS zvI5CX3JzC(2F#D{9}g(<(&U<8{6g&VU?2AOyW`H?_57oQ=(@&-b%e_+TwP7UjOAgk z+rcWV_`x5JVCd655yQ3i&=W`H%eWipB%JvO>B$bF^ zQ76+=R~5<6QH)WdNY*UK=5($SbbBd+5Rs#M4G!QE*;cOseApix=ogjiT28SNzGcx; zHc)`5Z4$Rv{*1%hMq+MU?eSc?kz6NID5&_=t{}DFb(kDp!003s%o^M)AnPV%9`RTb z#3N~I-1{unVs_^3ZMn)#_Qj|#6PeEA+^*J3jl+4tdw?3wjEyi4L)yw$$YXIL9O#C3 z?4GBrfjSow?&Wf?JUOLJd)87;<;&I0EA9cAbbMKLc#O@ZX z`i+)!B_FuJ5S{Zhk)QAvG{+QR^o#LQlQGk3gnUKFY0^Z!zw3Ae$ttq^~uc#9_=%YGjMPT zZ;@S(~BAG3@@&znJr|{~zW5*!f@Q!|(jxwR!uj zzW!MLX3qBipZ^g5WA}fY3uBP7+43Xfc4d-&cAM(J>7y56Vl^5n(hkHD?wPIW#E(cxN3U@71Y{l-Bo(&*YQ^3W&V~xpwuPj9Wda{e+>YW=GEInX0`b z>ncxOy9R&`dZ`gYgLC%ZtXfEi#kTW7CcV;i^t3LhpL0_q;2( z{CuWT0A`Zz;wBN>BP<|1-%J19v2`^GpCvZg62rLJ4gUNr8a3Ir5HtuhTwiq!=F@MJ z$Re&G=p|}Zz*rF@v%0LAm6S&HNQkD2*dH&n@hPde^YaL@E9|+1=J0K0!TPJ-K3LWZsqe>qo=M}0DgcLrnP@iAC6zR#wN-1L79K(5#I*) zoxos%W3J#T2kib2Z2W(Q@!zlg4`SH;AAd3DVgEmh|FQaiKKzdV>+Y*_UGzz?TmWa= z|NH0hza%#QKNl{;To#Xz;UlxjnpsP`3-Zztnf7LVIH)C%VoP2$9;Ys&$MK6TF_N!e zkolr<26D}G^bJ}>l|_5h&e4=bL?O!(5A??y8s3{`%EDQieI3MWCQKYYWfie($P$$2 zt$y>tBQHzi?=yBi>s_^*ttclyS>NlLq3m%}Kp*56 zYJ>*=NM>n9IpmE*2r5)>39xYI^dI_C?SzPqveM$7onMm?A<@ACWqG}a{)_m! z7&j4%>Y)x>12>!NrpYD3da;#EJGGJYz zu1cG5;^=(c#EITD)sWE51~>kJz3dA4du{wmRFwd=Lw&mNAME}>il+`sZ83$dcMR9A zD6u`-zk9R#Yd&3QhH2|zCfVi3c6~ARak6*G!|&HDsN z)iTLYOoJrnF~(4R?r*3xU@A;p3HQ_Gn_cy>jAbO>V^>Y^u5@g8O4Bd;J?Z)rkX8oO zsQtX}BAo&Ct#sOrpn=+!kF`C98e5CLfp=5w*6$)alU8_?D%O<3I|`USPLYq;rTf=< zN`K0)m&oOOYX9=4=m!LWefa%d|I6IePju!j{j$1=DyqOqEgf7K?V2#-fooPh;U1px zd?#iuC0yX#3HMGwlDE$m5h{r;`T&&1#`IYaQVy+HPprT4h&*YM`R&o3Sb6o@>lwSB zW7R?Z&J~5`lLbvE+#^ArVin4Pbyo)G=1jW`UA7b))ot*t;!@;6Xyd;j+ra~}5pll)&C8~>jVzw`f{=odv7eG)7e z09OC8`hRxyUx{D!U+n&$znJr|{~y)=vHkz&!*BhMI*CcW=#yZ%0I>Rx)&H~m{!9Ow z|0s^t|G$^>u>T+FKX(7$`S4r+D-8*!FZv`{E�lWA*>+_P>e$+W(5}|NDzM5BvX7 z{U1C3?|k^J|CUDcOBa0-EEm8T-~avljQ_{~|L^~Qu>OB8C|j4+sutGusUXt&3<{2N z*75ypL|pkcOW8A0wTB*Ys^`VC;av%d4Zhq~)B+iw2nIftK1>ozKO;n;Gx_5HD z1AE+>pI9ji?&osO!0Qj&5^DD~DnnDdLtc$6@Dvh_d-jJEfF3pliV0%Wc2d+QASwR< z7c)JNeX@AzaO5hv*=8!i={Tca9a?g77QNQaBQW$o)#~p9q4UE;GG*qNe_Q;nK?pJQ!Q~Jc-C90LWj}l z&-RT}5sC51FuJ9L;eF>~ZI1Fk3``ho6XMIFKDgHamhyer{ci<@;X#%H>z zIVk$Za^3u(89NW^Sj=j>Y+#~o<5PqMpDFf?~CU%^PHMzV&=!MhEq zpRFsuvNi8*@ZK$6G}%endjen--wC#U0e@X4`|@o2|D^wY$NtFwvHkz&!h&$bz56=& z%Nn0_Tcb5}lX;vQRQz_dPoI#|9=|6PYN|_GQTwSqqM?}SiV|j3b+hyw!qj?Hth~F% zMsnkgPf6i!VC@6TcE{I0mcyDbc=^tQs`VOpMN()goecW#T2FC$F-}=0h$P#_>tK$j z++}^*9RJ*q;P-zj9;n4}6LK>^S^xHk7r?6E~Sj{8m`R0JI@< z`*g#l#Z)yYRac-`2N#YP>#-gQZ~r(tvVjCgg@2IQh?ES>x+z3^tm*1cBqQ`{26Tz# z{un8SwD5oIo%LJOZ5zhPA)qk2L+Mb!0n$i^Od13g7&1~yL{hp_6r@8sq@)Bv8i(|d z?$Hgx7&UpH_z!#!@cCh%f8f54 zpKhY{A3CW-RUN4gSX;R0)UKS|Fn1sRxUqm8m>i!tx)AK0?aveX49j4Gi3PjaK8Y~f zvE;<*_1IMbgo_D`k=Ph_tVT)*wWgKO*`S$sG3Cv*m`Nn@FGQa zQYTfM_YzO;Eg`eTJ~^GZpm}GH+_Ji#VV_@ba|ySIS2aI;Kt0xDn=Zg}tamMr8-GEk zZ~40yqSi=Xsk@tkZkc!Wl(Cie6h2CqZ`;~4R%p+L=V9_?M=%B{M8zOpZ8{Rc5al;n}1IOvNE?4Z=DC|i?me#tI@8kW;$F#XFM;-$6Wg|BD+C}k({ zZ&NJv#ivrXmwW%WDCnPO?6=;3%>3UgVJK2x2nw&REkpw3@n^kcQuuPeX9WI&_*Z zGD$g7Tu+m@A-hE`V%5m;%JP{5Uf?HcO9%d3yc-1P*Dpf*W6Vg%)qC-L#ats0lL;=5 z-1qeCErR*;@~U@Jca3dDf>t63mG+Tqq?3LI}$`C<5V6zugqzhcqz*OfEh*?=O9hD#@6~0PUNLdkn#dr`l82iUj zj`g_=o!@x);FE{$E;~%5<)0l&dy(P;S!9_#%`GDF?~?_OUUWcroecuZ2xmQ?GRExD z78hzATfel!r}naUgZ9Z+s~MJVqx!I85O)>yb7>d^26dQ77CgToW`u*9w0Ny=@UJCo z!h37(HFA{y)J}o*Z&g| z?ymgPJHhY=xZL>v_x-=SnEJmffu;m++MCj;MZ`r}Zu;;Drh^+@Jn@9#nRufkUzuD4 z^p+g?lITbG8=;i#owN-x{&As?eEG)A2?C1<6F5BKIj~73?7^ynY`O@&X)y<-TnLi| zXJ=J5u=!-qlYGO=yYg+)(~3DIKI zuC~l*Xyg&EaUx;|acw0koa0G0&I3LhFEOzIm0G?ZeS|<-a6Z)y^>%v* zx3h&oTG#m&^nI$HO_N`4{_p?r|1kdll>j|Q2m##cSLw)%y<+bTaLP+WRhZ-o_VbzA z5lF+x@K#HR8O%OyFKzOO1ihwb4L@eKEL6c)M9s?ih($)1SSxxmU!=?X9ENTM_KySh ze3#E>I*lA>gOOpC-T1|D>c`yPM$tr)1anqa_iv*qlHBJjbjxg_b1h{W&z_0WacIW^ ztLR}iyOOK*!LJL1!497GjI;ga~@ znl|jDhAyTF8$K%vKV1qax(=@_)*79qR~PR)JIy)8+nnWrBkLLm}d0V zw-`&@@Fk0%52ptoR(2sY3o!9NCjP&?`0wxj4?#@-&wrS!IRD>?|1tBwu84o*|5CcU zoqu{K82$j4T>tma8ow3)WA6XD5}q%AA}~rf>rK~wh2X?Ty9(cPS=PuBxj9b=a~K=w z*uPMS0^Yg5lM_U`;@z}pASJE(?t`yqNeo-NUSJ1L7RXEmb-dG_Al)`aO+w)o6a#4# zn$(_7ivhkq<2b(qEByIH6=0=7VBJ659@6XCS)4G+F?`+x|1te)aDQVdU62VNy9obd6F$<7TPFrc$R5(t@ zA6S0^Lp&kq4*kJ~{Qx}h%#t(F>)Dhuq3b+BY|K*DS5^S|ESLX#>}I@vSpdoPonFqf zDO9=I(1IK`T4m1TT3efeKhaWi>fxg|Jf@F=56GqQ#0!QHC5}D}d?Mpdqp;|ZSu{O{ zha2-|ZxaeSN?&*9WSfhPEs`}&F7C%Cz#UqJ*3HABL{uWbOW5BX(?KCyd&2!qQ&Uz3HS4Aj6|nuC^`Pgu z_Qo7A!w=NE$+X_mJ(Z2H>&RdQJAKTwM^#7?xb3j#6vj2~StjNSmPE<$5G_Jk&e*vn zEbrEffotN0LyiPyek8fVK2Fd)dcUK1+#r$o=n&v z?dzSZu0lZR44*UoxI&>Z0-r1lXJQjG-u+V1mAcSPC26oTw$NYuB4FU{ZX`HmJ3k5x z*_7My?^d>_hW^4Vv+hVsxa;BSu zsftN6AVpO%#I{VFA4I#bno5NDxTvVI;oW>@ZFy?4tr|G@XNa8}N?S(By^N~06o=m7 zf-AQ{f!evnCAvb;T?FV(T>{S2P`)UuFQj)17Bv{Bvb#x^umnI2+RW`}36`v*z6NdM@W11K$_Czfy-Y=Y z0*j85UpLWPtPc@*CGJuG!Hj)O!(H-J^jjwZOcb}9sMSKeH1fxMW9}v~`3XOC z-9Gx(otH4%MK6z;|B2E6m)QR${FnX{6~yTO|1MW?{=en_W9t8|hJW>c$I|lYpWX?E zKfvYcfAN3T_$~d9ssFnYX7$Ql)I^?EYx_)4h+U+25*g2k+n9YkSrAYWUk zbZ0sc52$CLdy)>ob(O<{-~wF^wi%j-l$#^>PO)G}qt;b(=*t+s5@&N7XDv2IUK18~U8FAKHnUx@Se<=Abnz`buVVckdTU z?qzgHyyJsCOTS79n4EJ&CKzs?c6OnR&6~KN70*5Z%C1oBvKvHNG!iFp=pBw~x22*k ztTh58*PQN7ooVdfDy+<%Ye7BW@5hNrOOu?}>8B-k;5v$6Zb9`phb9* zyL_^}lQ>gsA3GK8JsUnn8j;K%N@cCk-jLu<-|KA~cmE`R-CY4ET>i)`taUV#5KiIn zAYQ+iDsTh-)`_F@*G!`muxsdNDOupbD=YwoXqTZbcq5r2{Z!oqXzc7SjrP`9V}2-Y zF8x6P7JBeN_Hyfg|BwDZO#R=L0A8*ttyzoIhvVsb{fe5-Blg=yKa9hzy@@)LyX$K$RFXTp&&q5db1k=VwoA%2noan|btXW&((qxTepZ@f(|F=zN=h@7P$4 zfIxjmaF-Ds0*7raQj{3KLuXx2yi?`YM#zUDIWXWwPZLkeuAA)5u#lIr>UGw)SPBOB za+%VeUqmaX^b?GL#8$S0^7}J=061p@M92O>=_*ix-|dWba+z)ibp@xj>-sCHyJSr- zdt1LN8z>l+pWGB=$XInTTT+YfYqhHQaI5N#$sMc{MkjF~GJmH#;;p9Pr-_O#*T5;+ z8uppTwG7wmxU?ZQe6FumGw16HmPOE4vv4TueZw@#^UD- z-UzJ0d&gjnmg{_ttlwHV&oQSmcoJB4pqBhrHBLfXvGRfcEDU?&u*<$qYV(c+2@P?WwH;TLNMqki+M?Oj zyCs$K+aVQ-&0KgaT|`=!TmN(KpJxo?|NVEd9vwW%FJ%@~G%PxPHjdR0S zm=gFwMN|0&^5F}9&;;a8KB z;0Om)v)%~N@^I$hbmIj5_9(-8qIa2|sOaPW*gMatCbw-3gLFii6bl{cH6f5tf@r8x zq*p;Ah=2r;-Xww)1*L;D73m-dh|)xm4o0d}0g>K2n_iA??znA_JMK7~O>m6!J-@zx z-yE5F*PMCh%329icc^vnpS2mdTg0aIMO1n9dGh*={h&7~_2>1U%hY=&-p(f}=**fC zZB=4G_R|V!!nI7R+u9yn(6E7R2Tkm)`NfRWryZ975;@bJ2}Sy z1?XtNt~zm-H-2i|n7yx#f14~c-p6qNyuto1`$xLr({(r~!rn=L%y`2Ddu1%#AfM4X z+oWlF>FNQgk)1sO81ZSpcdt8JK(96hDwa<1c!lH`+zC>rswSJL_7L{pc2G-6xe^)6Hl9v}P0 z=YNoY{+{_khhS1YLGpC;mUY#ut(&*;RC2tOejvWFVy>CI>#vQF?}~Dhd@(2Of#4GB zrB~pO{K&!y_fqo zd~tsaT8Vd>>SET;#P?uWNOn;FHPeV>AGqZ+w(oDTc()xMw2TIt2LA^bB-)MHu~O@349lugBh>0 z1a-m4%xZA9mvJ1|W!A52*a|gKJ=P$dUo-tm=7>rrV)fth8JV&&6l$L#_a?Zb6Cf43 zK5OR^yCj_@Uq<+uEsZx+OS?2rJd$h1scSRG2q#tSmEc&(&F+|V@PUf2E%;RkS43QL z=%vU!YJZ-Vj*86NR;i~pwvq2MuZk#k_i-wFiP-mqS~Ps}Pkl1|mB~pWUs5^K%p-xh zogW;|0LeWQOX04j5`WQ?qC8XvwVm~TaF=1@^t$EnJ(~RR)lQ?^7Q0+-$^(?P z;r%UXsXN`pr10oRZ=MG`jH`0MJkyCA3LF+!k!aXgq3hp6d(EzH;IbD^Uzx^#e9k%{ zuL1ACmQ5wfQ&_PC{C4tg2j}LMY35_mzo_WXN0I;B_xVrM|NqdxC=?3p|HBT4_5VVg zIQ?pVNqZJ_}hYw{Qq}{tE;3A1fTz z|8hP?)sv0{-~>2k{r|ZU=>O6G|Jwimr~ZY(!2Ay@Ao3qp8&ZDv0bvFVDz4Q8*1q+Z z#296BXwI98;7n4T+ifSl#&=;6)N$d*h@klS(&55sr(xd!`b4d-jtxnl1~*AWH3w&1 z;5&G0R>K^M8i8dJr1gau`=qGEw@gm`rktG#~Hd=4e&Cm*U5s66*B(j>cMC7G+VSKUpbTBP8`rPHc-^zQgC z$~p=@L2zQ$6kn5m!)NMt!Rd1i&3S7w$gFEV_u1@xK!el6*pyY%VylB0auAB?4VCe& z@_Wm{f_#}f%E~0hZml*Ew{=(~SPL@mq}ycZizwPkq{_Q-nEJp{{Z*TVrA`S*a#mlL0&Qdm2kAU8i@)m{{OcH8~Hy< z|1S*u{~tRX_W!D%YZp#B5`Yunc=Zp3{CdZc`hVc}KUUbDSpC7tUa=Nnyj%*e^pEkd ztqxiw9TKwVB{wz~sFU?*m&{}uW%t&Ud0Yan;RUxqsQsgR5 zSC}sRk${cUd&jB#t~+iiS>)55d+H$q_UpCn%qd9Svd1e{k3=)11?(?VCg!c$8d-YP z@iC1Ja$mSh4Yla0(69VJI$xapCM-|MX6EA$3RB*L6mX%8_p2*L!7UEbv{zo=ukg}+ zE{)S+tmwSCySt&=aM^~-!%xL>ymWv6+5y3kG;&rXzdCIj)op#3Q@#Xo*7A1hgpx~Z z9Nvz~!F!cGz7=Qd=n3X9y_GWK)j}=&{cDfm6w&7bmf6)qEDI)DrK_LvV8B%KpIn5O zeLZV^?Ud4MOByF}v|z?E)1MbmGBNA*zu_o5D!J zw~XTCJ?t-28#>3X^RaV{7VbYQYHg5?GLxzY=Vg*~?6g62I_yRY*Q@C%R~#VP=`;kl z@`tyzx5&vTuim=h_EE)IeA#b1wn0hl`#dW-EVp5?{%O_Y&f4|}YKC!&Eo7>I2?jGM z1V&b=$JO}_N!f0G_J-WEXf=)86{DZqgdrS?FCMS{A+TTHu_NgpIR67HG^VIXmxR+o z)+7V$jAcVO?z(HromZhz@WSohQEU`=7#iCXVx_1|2Aa-Je7kD#Swy_peJI|s=zPgOey0H<@QE|!t{8SDBvyLx#+!go3OZdGjOjExUIX5z0$pRf>yDPGiavE zS+jATD0XzmJ!pQDa-cm=T%l^#>+&Gs8wgB3;S}0!yE(S+`s{mW_Tid^0?9%8JsMZu z+X8o9ng)R+9dfqor#qxyk#sOx(K(}hs&pq-`n#9Hn?z_Hn7P#n5RlnCSU#%?J>OLy zv+!|m_GS@12(uAr&QOI68jq&~N8h)|wHq)bYmh(N+p#nO^$%0DRhr1+*^S*M;$8PPT1} z_Ddx`Ye@QCWl@}7MJjnwLFerCFW~IYYH+0q!x@fws+<`qe2HlugmCjL)#)T_GG}d% zN}p= zKRt{ygqJqqA)<6IvN$D?msZAu;vVAKqFHvlNJ~kAWTZ?xSkxzTmZXz-l+em~3Y z<#{4f>}XOd*ynImVQUO(A->H*&^Iss^PAcyvs**QzDejzlU=R8}QK|V(ktBnAD_!Vx+ zC$BFOwD1&E>lDTWpUFwYF<#oNM2lXSHQIaG!@aHiolXfQbZxd?TaN&>Z_VY9K=dQp zwXK3}O!2&(+6!~9SOh*0Mvh={vV_NBlc_l0|x5_rKrA>sm-0RPyf@QIH7NYljJ4?@?!xQP-{Q-&vq{Aka}e2$IG&31TM zx?}|IbPL*xH;O;#3?HHivsM|R;^&MH9^Ca9voz0pNt9Uv?;iP-Klj})I4mvf_~`BL z>Ew2$#@TH<7iQfNX}WN|WSTeUBkzd(-g*9B{~ik0G>OHgdr zR}Way(^vbGMAf^-@YUcVKi4HfcKdLerO(d+H!WVlqwP;q&kd%3IhdPs7{}I;5YPt< zh~^}Y)=3;|n61GtCz|f&p-NZn5ndT-eC1h^#x)g76dPIyQlkTwA09(vBgS>z*D9UR zZs$>RC0Ta?%z#FH^_uEfyz+g<29tclT@G#mU#W-RG#&TRTl-+fJOD%;v+_YSJ_7`L zVSe;5YK#JkZ=mfr!ZZ72paX_?o9F*mD_`s15;FY&KmJ!L7lS`m7u5|Kyn|k~_HX_t z#u@?O2I_^=NCyG+z4eJtlUwl*hqwxO-mEJgLaC(Op8=5EC*_o?3A0 z$PgJkj<_$7QzAndM^|iFW!>7bMvW|1-2_|gp-g4DMN2)sDsrk!V&qyNWMr+l!1Fud zhADZ-TqXxgbNUZtb)=1;V#3DwIXVw<6BQR8O-T(r7K-z$a%qrWh%2K*DHLg!UNPPX zd(BPlV~2vSqf*GmLH1>mi&e=(Id0nF)~x}KxhD>AAI-;~c?})?=X_~SN*`;j*#xui z(Rn{Bydx*9b~t%;CM)Ow{a~i&MalNGRhyJn&zES7DDJn#kT)cTLMf zn^crW>co>2_3dcVcla$8l88`jYxRb$H^ExvXrVEDQZ_@o@&(mqAzI zU)KuX-{ZAlFs36)9oX*YSmM604JAsOGk1Q;Nc=!sQRh?Y%tZOtK~54;ywzm)sXeAq z4%+gQmfw(bfjm(thnW2a%hhC~SaV|MOzFp=6ocJ}iXMbF=c5?L0)ucYG~Ry{K42qEiH@d(07&2o7SqFw?8=TumywcFeNS`csfPpuO{K!7chICl&Fpcku1Y4t{Ay zyRdM=rk?i8_YW+16*5%%xXGCW0g_quopmL!i9l#Q6XX#M$WrXms(&zlf?$R*j= zze0z@a;Yi8`;T-or%?y$x*UdeAhGvRO6644qU+$<>T8dm_@f3aQo824wP%?_)+^sW zc^^?fJ(D`3Uk=# zfo~Y$&*xs}0U;r?h;du6@g#b1<^0|qX2w6-_D0p-5n@4l_|=m8f&HlJ2sn#BYSz z(O4*N>r+oS%;bz=@3`iF!XrfN!H-Dfu5`Y5w~i78PniGKI!7^v5s~yPI;a-;V{NrN zxfrO9=s8gLMr{(cv;##G&q&}W7HPv7k~Ofk?~7Lae7y6|TXfmK06%0fK>Sn+-V^<3 zM$onF09Fe?*lkbCz}5@l0>r$-(g!yZ3iTy+2FQE`18s0(aVYh`>(&ALn=k#lcoxus z5B}T3QS)nES36+)v%oP(2(YUIUd;ep!VfA}&$!JK!2J^23hbpkRRUu6ICz?KJ($vB}tMr&gring&OOKY! zy&%UWG>u}(yPsc-kf|4PhPpwyp2hls!!&7&sD&UL0ArFh8o2%S&JVnEzzc|7G$qYkhFhLs(PnpfA__{u5LCc1}>z^UVX(x6~M4~ceZ zci$_hI)IhSxB`N9{T7(5`&qs5+i$&g3d+9e*@sGEis{p6V7&9#Km?0ah4)7CEGX)A zg`!buTg>~7fk${n^&6Y|Pn2<)-k$(#FbUG~lT`2;%>rb{VdS3%5dyUgQQtDIdT!Z^ zl85QyaALCV2r&JZxtedLNHAOiJVg{Tu%R>t7CzV;I=m|?->}8joIyPrX}jo1$fD?o z6p9LvKHt0Qk{&6jgf%~{I;7`^O{Qr+7fY@?xQmSL@80dXtzYNJVMy_$EQm9e(1yPp zC2Cl;#Zqn@C&{~T>j~d$S`JGIr~fW^SA!)`qfu`NUUU(AOr`hbzMS86oB2*2CqCI0 z5fmBw0$zBcsW+nXp?=%}>&Q?Id1#Q4?rpDRVJh7ms<-8e1hKer%dtDAb=p}UFu%)2 zer8p6X@6p~`&dBqpE7-U2i6R#2C{o?&;<54f|>e4=srfIN>_BxT1=wPP!zbdmo8nK zhvoXqbqrA}N}PsXQO+^2+l*qOZXvn0Z0Z>;zAc+i=O|RipTHWWjC?rwEk5V{o}TAu zCm9X1wh9@<<*5x8{|RlCEZpwpdlIMD>2!&(fbw+mTur~$MwG6B5jB=RozZg);fQ13 zzgDvGA_c#8i0qDT#M3N;m>yg0=+tokE;Dx38WNj7FlQ`U&r;4d{h+(?a2Br z!%FNmN&*LKbXa#?Es4t0KYb%fw0Bfi-@l*Z(G-~5WUj&;eM*ms*&1mTcUu%8(0arC z4_SKmBjjf*tSE-b@e<0oh+)U{VYN?vfm34N1B5f$;&X{_ z`HKQ|)8i}$U*>Lq`+A%T$TH!;t&3x7pq!G1DfbTk$$+m{XyHe+j*Ev12~`zxoA{(< zZCdKR7MYj3oS19pfYluF3@`C19W))YVqpx2nN3_{^TW8~h^m{XAmbZaGZfr9bDM&D z(!s-n<1;4fWJ_vxYxpxU8xG8437V|%3C}7l3-?zp!ZmAcKgG+Ge;m+_)I~E$^z%mK z4a4I%|5C7INLrE(R7{8Lt7CiF9V8^~899xOA)30#!Jh-Th`=vz;B}9L{TxGFpTexO7#_Vm=)7Z z?B?7l^-sz$)`7%1a*f#mAArds1QDv4)X;u^*E+JY9L!+}I}#Tdjb=U$LL`fA%`7=~ zL$My*!NxOyl_r?#OVM57l{)$@jBHAYBFFX4L@iJ7y-!~$G)%K|aRIa#Rdi61Z?Nzv zT+SNz#5PxKE^5Z8Lk?kjfc6y-bL}x{vv|~odX+k{8(k(a?>RTN$+Z)hCEf4LnNv=d z`5yD?$T!#iUez~P=uH_T@;3if?y=FG#qpG!AC`&feT+#3S=?t|8{w1CZBAE%lwQ77 zRuQA4Z+fAB{)k^Ub3C;{4;)KMRFnykxvENq?lS5Awi!Ra-rgdKPnCprrfm@VXk$P? z&i7tTpIvl8ni17Yr`^=!-HXw-9M8g?c8)n2iu?cnhmA)kX?Cw{H;3*0RW->{U5*TkrB7hK%lW zPx8OquYArKim`KZ(myOoqE>K2^QH9Kw@shJ(^H z|CH>tb`W2~O-?v{mWIuH(Yf>VwcY29bn2sCSq&ms+XTBb6Cl*+>8B3-xOL2UsE>Lq zR>J;kuhX(x?hT6`MAx6xoR#+Aln;Z8J{#mVhbDwHVfZ{U7&cu=W{h;?uyrS^>v zC)x%N3QT5fLjGmTvRiWNeopADb_jQXcz~f;zk*P#m9yq#if?(YTSLr`~D~2DLc{Avn=7MqFR0`OqI*Xc$8Crd3=L08dP5vu|u*y z$9j&gdOs)U6uMO3FdqdHk#0xu=EuCByvC1aviK7 zx~_gXk!VK%biVHKS#5bId-+DFm~Rw=wwI_v>#awPYz|eN{^kdWmiA+`peg<(WJVvi zo?tk?KWHRF&Evq{CK)wZ4KF6rtFFa-4R3HaG@n8BPGc*#2|7JRNLZ4sm3@jJ_;IGC zqiX3@oIW_=$U=|fE3aCE=bu?iir?f}rZulr+pGR(pZ`52`!ZiU!4~$F3h{4Fi2z`q zI?xs?Bxm-`G5YJrP#%t^*%NBK;ko>L>9tpc~Uv6z%(MypB(c_b=4T z<@#AaZDxDju~xCW&n!JzV7Q;(-(FtXDq(znB8FhJ2*a01w-!jo*2L2-9DhqZK{HM8 z|3VJcF3=CbB^nbRzk6Kvw@lm}Tc(cT%X=dOR?fLCP30N0(U*r_$#=?gqpj%gV;JM@ zoC6uG?!)$df*hw$e;A7onQAmK5y}L7<6i{7ju{)obk5|~7$Dj(3Q?hWZ)0-7DoEL1 z)a9zjKb43S1%${&*_F?tBMWj$4sQG44@M4k4eR60ZzQ(xWf9^QWgLa^>S3X&DY*%b zLvy18Ki2UIDnBnDOu#$sMMGajB+naS4197GHT}s)bY}jmsf~wvT(?(zJyZPkSra~# zuvx;IRB%z-qq3_ur8D32VY*P>_3g?^F-+~G%JC~h8JUjm{+%(dmgK3Bj%-NWQ9cti z%p{_DN#`CN>pxblm5#+dTOGZ;edXdK^b!%>rq!xBjfix-PEPUhC|-rU7nM!kfMLVK zi@D?nEFp)`)x{zIf^KY0G9PhhGy&U}W*{SxRJH7$@5Ds#JO)m~&M}#|qYfn-y70IE z*HJ<9dgCb={6Gzl9$Gqb399>7zVuR_^Jx(!b1Lg-_N)cv$qM$HEm&UdGJn%ssI|m7 z<5%q-NXxVSN-jFDd-f=K_}Yo%^U?Aq7GO{84zLUDxdp*)Eh+s^$yxFUytxlG9r}iWf$BVvKGyKH7|&DJ;!x~L^iY$y z?QBJb0NL-csA=w6-LT_>cH~^C3^=`?FVgbCq4MYr%U*M+obZw~Q?lxmY!!UpvuU`& zka7m0NGt=_99DINoLckG?m=va#){)8Vuosi!{ceJF<$*IAu<%{jY2M~qpyS=C>rR} z^2hH#0}vp}$c`(E6imzxm8RQae^7b$IzZBwUgy!HG_=G}ysOrx(0>UIrp=m+V*Mg5 z#C~CBXAY8RWoArS@le#+wwcIl?qibngG<2xwK&^;ryKaQ3zI`xyP}m}!}=BEdE~)q zc`x6JuDp(CvLJpw8Izi}PjFjWD6)lV!C@>F5r{-jf`e8mAAZwrr4{xZDpex>&sON} z5XT(%@UT_zItg z)>vY-YI`da#9uA?8h&CXip}s1wS79q(jqB$xMa4>>@gNEO#Qtd^IrQUyXmq_x<5~^ z6~JD0NxBJKWlzaa|1tjhA!;Qs6yYei+DJqbIssFHuwQ@b$Q5QH?UJc>hlY1qMr329 z@5Sf-)-ulZG)FU&s)kOVBpdb7u+KU zo(?<)Ha>qWqXR)LZFV$6_u>1^_y&PL8a=6?qnjL|JSq5mgBwpncM;hj0Z?~D4w<`AQ! zJBkagiwt;pGu>Ah-_>DVDTN|PN(RnkD_knYu*xWvZ>YQ0WQ$a~IqFPgu_21yD$-?S zIq?hiN(1XRq!qs5Kef|b(_B+bFFUUXmE~T!tMiIDJ?W|A}QILGhCKG2Jp$iTaO}WNv zLVFBJ6KbNzBB!nP$h_BPPk=DoEWT;=z^ft@fR*zyn1j!b%*4m}bMRK*T*N|+F_9#l zh@}-@da(?Rc9{F%tsNP`r}Kke;LNW#5VgJF%^Y%Te~5j%lijprofWIa2(Tj zpIlXz!jSUS_2<&?-kr{G8J1QuxSjMW&e2rG;Hq;d(f-f2sKjdU&*`s}ZvgQ0?E3=@ z(i7N3*edAJ0wzM|Ur)v}tqAm06)BvyW{@d#v0yu4mWVTQPdGvw))ZmT^Bqu9M%sSd zxqAj`Cw~9!p2T^lB$cA>CQZaQJ7%^%!iEEW{#|`u4mE`2@6_cid|2#`X0ktC$F#GS{-G6`0(WM@87*S6Vmc-rZD5Ks}Pigh2ie9 zA3ogWe>X6u-pXO#JP*qQEO&*a;8Yo=sop*Qc~Qfhwa3a3&U_a0&qSIx?s0g8E$LG{ z9dluR-G+NcqpRe9pB%HkLngDL7_m5YWG+9Ujk+{HFGPWxs??L(8>b+{{+J9Nn|tVF z_k~^f0S6(`sVX~Kadb=(Y5){6dpphluK#H@kNANE(KYe^arqFrX9@f3XB_dv5&P z+PJ(m7B$FlKs1oKj5^FgBMrm0S1FtN5It>*hP27!r8AlNF)hMDNHjy;?ab(O;0;0r zpsoSZ9Z?m=pi^^{zJh+zk*`__6P~Q16weJ zU{K6Q+ZnhT)PHx)e|Yj(P5CDal9CU2J&xMj2mGE1p9Jrlx(~%;L{O@8P%1!{4#LPY zb4Av2PNhlU-$4ho98E!kq}bLnv&8jY4_iB?-oGm%BsrAAlaA>rdE1fZDL3dJcl@`f z;$<_+QRFKoJ5(=;cLLa6fB%jD^9zHvST+q$2>2%}9TSgM{d&r~mfUVrnp^6s&zs9r zWMz8Yzebg2FX+?0?h9GInefk~9Q_o-WFIn(Zy*^$X*5edM${aUlmu~bd{RN?tQZSp zkg-)0;`qRkde@}i8`j`Ck*1e2v4yM-IUO)L@@KLxG65BX{E1{qy{X(g@$~X}ZGIOP zPgga*gJB<6n_;yLts%%YXQBz$+sZW3kCX_L2^{v(A=Zo`(C{LZrtVAT2}67 zSiXEUju~?E%v*^j5F4Tes9jRtg=mxp*T^^8RxyQhbuI);-*^a~KR6sB!8f-nr#Fn5 zb|=wG;n6GtdNG`8zTP%2`J)^xK^FGkJuCMXd##5#PY6O{va3%Gk z`2Cq(@g5Qg8MJMjOIPT-+Sg~w_)U+gGT4N;(X7iA6V^z2Amrw~j33Z{a>BcrOx$DxkxO&f1jU@5}`tNH% zj75%m)R@@m-1j-@`}3B(6U919rZ}-zZgLsBh`?EG)T-9348BwH>wy4k!B01r);$`OYzay>u>Y}ORk%zJP@aZ_^0Q0hML8Y4qCD!0cR2R z7oZ4xc?WldUwW<%4A0}$@A9WP%?cILOFg_S&jnQ_B{)CrIT*0m@_1*vZAtgp$d5kx z5?zalvba<7|09{A8!uI9vx=F~je{zGPvN;AN`F4E^Yqk6PQ_SyGWk<8`L=vO7i%=D zzz!xIr%{X?sefRZ5V-i}b9nR%hIqejut9tlV>8{ohz~hD+oI8m_)0b0MTTj8ji}H$G)m}vVZ+Ya|xUfx>xcmVz*B$9Zu_O@?S4CDT z(rI}@24>^pX8*lmLtq}{WESH`im&qxTO0WLsp1nulU*QnaHP~%D zcpj;XyAVCq@LUV0FrLB{T2Mt!t}^3hS;P*^B+-7%akBStO7>XA=_qZ=0Nw23KkHEt zkChwSwQBLjDamWRSBqs?D&wB*Gpr@x3^TueXTP**wbra5iKTt= z$WOvk3X*d;dnhetQ0}Teu zkh@iSJNu&HxjUU92mc&8b$b(86Why;=B@y6#HR zJP3=gi}g#cg>cE5IY*B!hn^^AK?u2w1$v71W=|U<*^zdh_TB5aGkhEXb>mC<1SWh3 zFd)DHxMO<&7_I;yZh@E9G(&G={6|PcuLAJSeK8;(eE;XXxsPZ0SVHh$fPu~$_+QEF2!waAM$Dr>3q5Q{b{3A^%|Le3ffU^QXfVc;Izj{3ZoG*-U`yaVc|2>zG=8Vfm`X4#} z#7)4#c?f6&fLEXg&K|h#;~OTK^<-qm!}R_Ewd#a7S2>uGw^^3^DWbB|WZ^nvc1P<1 z`imLeJN~*1D;E#TX$uxMZt2T+z7A{J#g-2JdL1v@-h=HduV#FgFXa%g7&CvrG(DB( zD5GSUL_0d8@H+(uL|fwj+LyE{%hBi7RUz2Lrg|0+di=Y_Xv!BQ1TMfGGia$6mHswm zzUjSXpjjCZ5bxM!+SP5C*LE>7%6TGy9z72?Dc9v;p|(i`n$|Iz>LZva90fsKFg0WT z6}58}>Y4A^PwoDhbC{|{CvQty-5Qe>)4rwSm`u30C}T&3Pt$RodOO3Ir1g{=UFJ%9 zLi77fn}>du&vk_Gu>4aXft9ZhO{?^aK!z$_ft=}5S?nIet5AkXJwXs{Rn>3m0niY6~P- zFCvW`sp;!LSDN}drccqTu3F(*DM!na7G#QsAva&GW)>1pBfBd@jrw7H0A zXj&vI63Xy()x2mosy=kM&^OHTk9ka99w=si3{i_T4bHV+OehA(rlbSVCZw8;$aU-9{B#LyZh_tIFuTK#$b@(4&~QWF;9icfKHL;j(epSMwTx9Yi{_=l@ml9b?A)wxz68 ze&*WaC8vT%m_l7&&D3egZ1hN=fSNadRJWmO-*8sU;7@=T0n;*R!-`|`Ny)U8Mq&z! z=epac*P$KqX3?RRp;7)5qJXsB>-$YnMM-*`DV4?g@5?YP?jjSU3d0)2v3P1cOnAGd zKZm@b9a`AV3vA$3Rg5)z@t zM}ZO%Y)9JApoor#9Gt^J`-5&U-BZoK4fpW=&z1I3197$eY72hX9|p@xn%dXpq!(5a zZ6{N@!_2~*!KPXss#wT6g}g9kIeUw%jIXxV_QH&-tgr7fq}A;^N$ET*w}N%~CJ zALvigR(@|!i0ST72n{iLguOtUa_+6;%GFQdp2u0i@lzP>#(jfBFRwGTl|5`tXoI%L zt772vPet`V6Zw=++AHiDgfC>NzhJ{YDj)!Q50qc{B###c0Gj{m*KeQ~40^lb0H=QE zI^+FUN+AAYxAuV<7H;KV_1+GvEJzdiZbAeoH9o_xwLayTct} z8L$)H^Rnejh|K?gg~`)-sQ$0702qbwR};oA*g5Q0C2f+|{v||nO7bPz2(-`O;6PAO zkws9~5$cILX@v2UV#2%qz#=t!Nj1djGl~Pt2EFCEzJ0CjrGj3)*lgtwBz^LvHE$bU zp0~7)l~P`sYSC7L$7`FIyk8rBN&WLh3DHZv>L6q}UCTBD7gYj^%D)JI!|4doHIvxmQ@O$~ukP`Pv?%@r`lJ+R+sPUvBA_HAk>=^@ znO%g{{TrQ%>JplsA?|0^8$r(Jw!jk4VzC}nm)CodZyZj#jh~Bqbx>J!$VA1ZV_Dw} z^7Ky0vs!rdi-I)D$-lkBsgl8oU6H`4D`8t%RocCAqR{?uH_Y)Ai~J0AVL5As-&poz z-LGc#n#2r4_t#v>-BDmjhjAn%ewJudz?-oqb%$gMxs;C0%R9aUolbABhqp zNq&-_UqWaMl!PQ6cX5)Kk^*X+Fvr{m5%C;3xkQVzs|4wWOPpt$7u4b|-eUXhD`ex+ zD$6ZzqZ#nX&m>@2YYAe#14C_cj$`Ec5+m0`JicTz7nYu>#6f3@7cqP)8H_oHgIN`B z9W3szu2`9v)hqiMPvE$-(y2X*KrmaH?!^}?uQMRV@&f6>JgNwz`GHv92JGy?w?Q-E*=KqRcW z+iGWFz?hByhv+|IaZevu{sQm@8XKTqe69XH@BUvP?an|l_!Z}K1<42K2>L(J5y#WJ zt2;OliW6}9_p=c1J->BMeD6^9*hR!4{r>syq6+&8VtfSEeN+mzj8`K-)7`(u-j7d$ z<^nI^3rHaZ{^8&S9zQ%Bn*LzlRs~Jb(|!tHl;+xyK3|Wx&S12?rXSpeL{4^Z?`zJ7 zeh?F6bHQG@h~9f+;(30Bw-c?)olPVmWvR^a>x<)sSH2RC%#BJ7)d|Ci%DleE?sz{( zFyw4eXX{J3e(BqAC~~E-C29l+^?w&S;*2QxWuIaOiJR}(p38C6t-lt3J>%VJ+!vPI z(DXQ4qKFcCFUF^w9WQgFB5EO=T2H*@AKPNsB1`G1ixHS@TJFrO_><8y&0?fvlAVDi z!DZ(qcKUp#ijEJfk~R62SLl9G-@$8Cn3hoS=XV(#_jy<;Gqs==*s6%jmb@L#H(_5+ zEX(KOwUUFKGAyPVHt3Di!0AePkE-^~f%H~8B{S?bu9sE{mBf)2gToDW{q;gE366I$ zZ*{KKf)e{vMp*Y%BMW2=oqX0>UH6w%u0*zYsNg91d)0wCGHMjJHd5-35now+lSp~P zc{HxJE9Up(F^@TMEYp$&jzF?wQ@T+|YHF<9u6&1g)5blz795gVnT-%;GL5ncscVV2 z;;#0;IU4bqeCFjDtahTMP9|Mq(Tt-UMMsKhFWySRno2}7 zL2<#!EIx+*a-=`NXx_A*xcYV23_(F^>#W~SRnV&;uAgOXNf2GorCsQ0amKk{kx#~U zlJQ8~-xG55iAG@rYw}L*}pq#HI(9iujG5LOVZO>KHy+#w@o{ z7F@-6?a&rd&^XY#5`pT?Y>{x8bBx6rWQJC3?C8^YtTO(hcS}_{$q9#ayIj*OXn_Qx zf@3U(e|Pon%y~!a?xH|i;aT;^BaODE$;DP4vQc@Kx_wIenJtc zSsyUGL7wLPW}iHpBl}D0n6pfdz-fuVc&H@wIK{$Wp|822qmMlU{`-g&b?K3BRXUOL z=s5Vyp515)XCd4#7eqEQ8zg!C5kyDoJX+7M!zInA5uLr(Rr-{kkhTO7k2&qz{*1pa z^S~0mWmhXb%ErHz@UH+rphpmJZ3E(wmjUc}&eF#X=)K1tLVhV&<%@eF4{3PWYY#Me z?Kg)a>W^C57E&CeB1Q8puJP$^gHERA1iKmH_ZZ&8$7^=T<>@&EAK^62=g%$z_w=_Z z{14@uJqN3pOgtV5-9BDCUuW%RV+xH=azKsfPYi7xw5V*Cy!($iL+mL$`N$yznUY&k z$o-czg`}XKi-fbQ$INMS)A+CG;ocR=*sJ!o&1{x`zpjwz zHD}N(FTk+`)@%p#n2@dW_sMJocmKlZqieD{-iSpHm zi+Ob4c<2{5LFPk?pfnY9&mrN`L%U=8i4y8UBg%_`a4F&7T7n!a4;s~SshXFsiC|wx zz=I)K=x6z4QX^(s^aJXgfL@5i_>Wc5VyAj**Yjo3Fm`a3$XOeSIhP`*n`R^2oA8cu z2mQ&?oYm_^1vcc06!i}!6}E|vyn_>_vbPWVX1A}km0>-@Yb0fW!u;T*71t+bO%_5)`U`=61G>UAglp`v?2 zo_`JVWycoi%}gt}Wh^D}l5?Xc9ueWXos+_y0&-wwO342kV4A=V^k3hWuWtK*);;J4 z_^Vgz4bTJQgF*UlfY}ec`qo|ontn%w1oK4CAD(da)J$H*CzW?~ES0-O+N1h^xIE0- zh9zpmOl7a)Qs|091$PM}xW+ss_sYw@XM@)E+Oerk`K63zNs63@&|*zHTO$=mqvPoysv|es=x(_WcVUg( zTQH3&oeN<*Z%#V#<&ir^!t&;aNgm0B9%a?;8c?O#IXb8yJQz+B!hq%;%QekyRg{u_ z$NZA$M|GX5sevi>kol90mSUfEUAiX1-d_6S*Uemf zGq`W+7sFk0U1=H%(>yUk&LDMCw}${wo`8?KOaJ1d?0WFs(A$px*OKY~?0dB{2Kcth z%Y+fD0Y83?}B;UX_!7!4wxSqU9jPx{SjO4`96&m5}^c`*JXGqRfXm(?I zEnC+&8?^V^4mG5PfIfG$)X>PE&BB=b_q*u;tB4K+p_D}!8;ZPkNFr%aSN;)5_%OkD z_R>NMAJyI0nAPe>(=|6*-GZS;!vln}{Yc$Oj+I7^Id(}dsgV`B?WDN|M}U-34xLlQZp_Bhd~ZmMx=@Fxsq- zgcZ0M>RZKm91=|XlnAkI0}z03Ao$V|3Cw6{ytQTdg&p0M;wyn!|0Zk-I($1)Dy=}9greh*POR(^80Vuf~4f)^D|L2 zcth%ITYZ|hS+x%imqupf&A8|Ni+s*p0SqHsAfx1mOGirX=w7( z)x+8PE+tF_^lD>++WsUJBsXO=VIC5%K6WG)#X3q_s;dH#P;4c8t!Y#_`lJyte4!ie z+}|+GdX(vLltGrUO-8huSF=mA_EWyfgosxWTVunwEG(4pVO549U#XK~d5rgdvCba` zrc{3Bbpf5ve{$xoxtfU>K-zj3JD8%8@f-0Xc-Q9l#+nThwrqY9+u7xa-vFhU{N0Nr zdf^qXxw8MMhpa@bKxnGlX&6h|WwY+wCKB%NjY`=D`;&bE<=XS`dxns&C56^)pN#rI z029-GviPi?O!VaRsM(1Dsg`Qjg<@z*M!V;N7s^AFVOb9iZKj<-AbGc^_uFe*t9UXB z5Xk*MVRO61a0IG@y8XCw*^dG)e*po2@!u?^F`0||zpjIC;Er`*2=)x<=;|f?*8}lW z0NCkI28&}9F#UJSvmf}_8TJl*T-gu_RWKcfD)gSUrE^c`5}_G_c$~*mys;EY4XfwW zH>)VG|KyOx%OP9+f-f2hTT;Y3=%78|sljQibX2?;nfS4Mld%NOP445>UFDGuxW$|* zB4Ii!x8a63$-_Ik8nsp38uDpRwBon!jM2ap!e4MmV%@m zUbXOL^DfPjWKR(>{0GFa^Mi3)B-5%VmN~&}DlBUfRd-T!Z@s`|Oj&%(PKCkBsyu~; z;-4ZYL`~Xfcxy8peKPRb0jYEd2bIFK4Q&j8uQKF7 z+e`S*Qib{EsHJfaUkqXIS(^_`O^u3x=pL6S6riWM^LcDk63l?I2M|6?wNsC=BkVa>{|c&;9#A4tswB#0*w~=nKueIKlj4x5 zj&OFBl7MgRol{Eq7$SO!x%GiE69AUCzPe3KxKCxBwp|=$bquPk31XEb=LfRUl1eJV zrG#n+z0g0lq6hgs>%$4^82Z-RBWv8F^^kYms|uw@w&=;M>_p3QUh~MK*i&Gon%^A5 zz2lH}BhcElQouJSCQ=5hEbY(~t#D5>ik0^VLBBYAm!2WRyjL$ITNC;otiK|C{&koI z`%GBpvT%El#51zVaJ=rXqW2Qd9k=!AuaQ1B{tLpot8S=#3p3dx8U1hA%UlIzTD0X# zG7+=EGsMyc4d3aKrLE-zD=u_jWdq}iTGF>fX@amC<`2kK(I@H&Y4&F?HLxuP3Qll_ zX4+>Xvzvy5*qD^Iv(#7Q$CFa(exyi)$C6aTVaIxdt?7u~LR;_M zJQgS{`9BWv?##?uA|21$IdtUgVR*JO9oQH2CEK4f)FI)poXBU^?GZhD>MFi$TpE<% z2CrU(Lqf_uzUFe3zeYCC%4k#$$YED*%I-KpVFHFcJ4jbsHTU0@q)e zBexRaX31qg9dV!@Ke7KI^V)oNi#TB`CILh4Y#=XD#TfC2q2<9%TG$vIv<8_t6sIzv^G+6{>8 zZZtFikOk}Rwsh;%1g_&F*(WjzMgBI?7RN#2W3gCBVYXe%~!9LSYHXYSOU*Qy3Ef5ZCAdgpVANb1~tf# zJb_ktA-)}OS2dS*;cya~Qe2x{0gi1!rE+-mJGz5p+0aD&oT$2065~WW29X~FdiU27u(a{?@7>H+;**H(*QDCT3VkDL^Msb5F%|w$K{8Mdu7JCqW zA#!bXZqBp*bvdzbE7~I5NFv^U<|is-sCh*Nb?UJG$*ooJ8~UjgVx>t-p~rZ+(Q}fe zUVyRg@R8L#+bA;Czib-)Lh!3Mz|-^37yy{Hg}(vuKj1>72csgg7Fo(6OUrsXB9wSAzS>IoX*BEwf ze5>mi0*v&W*UV_hmAKHjJv)Nj88zTW+*KyBqNq3FH}dOr#tgzJ4_J6ys*M$LY4Ow7 zFLjg9GjEGAZZ`(H>1luVc3s)%4RK%(mSKg1;&Q0F2_i%delVO?MXS5VnC%%s*(mLf zm0#8t2r|H&+L_A}kYK75I7PyyBNp#YB>YUAyubg4R7|+&QA<=w0v1bo96}A>u#H#) zY_5N+B;bsU&nkZlRp_;LZ;DUFYqCI%P~J_m4$fLIk11j3s#>Lg*H|r#Pd^~(U$RQb zlgA#n^a>;1CroRA};G;k3$D@0LIKsB|bszWdolejlJH>@#2`nqzTV2sK{h=Oky+qrZ<-eASL^-A(0qOvWk_8otr@Z?r1dYj`&dk)e*}Kq zx+O$S0{@@j0*hw@8%V&ZuW<_5$yN;XZ~g6zQ())P>@6NFJ_&5VeY%^3tGN4yL#!r@ zW`Dy^wIA)F7j)hi>!_aHL5^!w|G|pZ^DFjPg7GC(?Zm*JsV4WD$$`GC3q`di8LXO@ zZie-26lu(iD=Zia3?Kd7m0YY-$k!gyPGnFeOiVI2wHR1aT{a7%>tA?culc8CI;bs- zrSw+*JpQn~aQi?;^WF>P=RtW`Huq_9gH6fveYqzKa@N5YA6VR>|MuZB_cb{3mxmlEg z0bILz799O^htahIdR$xWAOOqa0Z&5dx1J^Xm^$)8i(|QR4SA(w{Zo>F9^DKN*}Ame zx5gT8etsx)=keMW+2=c2gKEt$8@DU0Uxo3+FxUJY){KxQ@HB06qdK*e4b#64*f+!? z4}SgxA3zjBX*U{BzR{I_VVj-vu!aL^v>YN+v?c1opfy5 zR>yY7wr$(lC*S*=|MS}WPp!4atQvI>=7F=WC3(VpXSHH9DK979KX*}N0!vh)Uvs~E zkHG4|V{rex9mz+^xT(*dU)`RoIEpxJoSK&8si9lgh?1PaTjxRzJbZA`y={NG&3r)Z z4TmvtMHNdxz7^Xl*T^+2n{ISWfaIQUs&lnOywxbJbOTOac%CVq{0UqECo!|}q&c8i zUnLl9E`liIbvVmk>(V-;pk`mnBme%y<&=96O5ifRJcWt|(7bWnk{e73YbdvWN8cZU zgN;>hu^z5L>B2bdpEshm?HA=-@5|dCNJoCCi?0C3wZi)gV7>mz<>Q`In4SjZoy&fh zrU_lHb$i;A-Eg(g`37PLu5lqxaRw!c75cN}Xf1l1*)ecFbMWoL9NicN>jG#@hwuY- zX+hYSNAZ26r#Nn2Gcz(#WJuvR({v8UJGNq}@G8RAkOQ4Mu^{PuyBdo{Sk#FEH&lhg zz~fyGuOT+BqcSWnrZlvP1F<-t!N()Tk)RfORWK2fQoVE$U=)(34O(bxgq{heq& zd4sHh@tKXb4gHJ%X1iv7f=*reH^)Z69P>3WJK4XR#{r%2ct3~o$1s;qG!s;6xM@H3 za{RybHp~TWVZreV%-@?@7DJTq*HtM#x~F>e?5TuFOp8w8B=O%GOJi-H&dWCGElEU|q$B2Tou1(t05#W7cYEiM@uEu9yM3emQQ9opf6 zrGHbKb;F*woGYd0MeKydo)1oIxLK0lT~$2VBU$e0YNL@k(RM>GYW#ZyZUE))y-3Gp z>F{^tnbaof?w~!l+c~+6fO=!1C$5kdM);&|GHxf~`%T$wF|zjOjTK=}y(i;8uO5~W z24inn0>q}@CBnMQCB`-yr0)%#VRLvIUmz_dcP^Rnv4xoDkU<2DoJst(p)-~^(cbKe zw#Vt`aJSx~n4=+Dc1VBZ_x+(%j+!oR>uGF#L~||je457RoPjQx=fI@NkKqm7db1XB zI+%S@h=~+(aP;0qC0Sg=45ZWsnH~xSO)TKY2o>x! zf#O}^ldiHPg{DznBN&LIl z!pD$$!Q1PY*H&tct7O0vFFM>?q`2+f+%PProe z1D^1ol_zZ}KeG0G=XM`#-U>ah5vo-~vg&{=4qMx)W7}A@$cVSxDMAayWW$|c?{)~k z)Y*b3IC={l5A)c-hhDCUA2EKZ=FWmY6|?JuS9jT6cnfCPZR_J2a;24ILi-TwWb5PACm%sdJF2wm~`wZ));Mi=*3gwwJ7 zprWo(9TxI(B>CSeU`M1noyT-6jF!k z&7k!iqdGlSc!=0anhGM>U!h#fXF+P73rm%bao2vNGQBtpuizVu-0(Z5##ly0>B)O? zrz9hBQzb+u&QGB#ntgYCsUn*USR5-u-M`?}Jd6&v5iDrLvU0Huw-z*9Cq%t8Zhil) zSp}vG2(Y_CNQ9e!tL3`;tk&OoNc8w`GIuag;F)+ECCjo!@M&*~e?Rb%qu zQX}M(P5K=wWCdlpK=`WA&VP9RLi%0(aOUlTg`J4mX$o9L{)*3H#6ep5JXd|0tTKGn zxVC3FvlJX5{^w!@1zcHnR%UB5(h0l2Y!irTAtJ~d92D#m=edX#i8}8ifMT!}Tn7Z6 z{(&^TsW0#DOW31wX!%f3p{)K>V2t<`!U{4eHib@p!z@D+6%2#7BrQP_ z)$Y*gkWt1iE=CF^DwgOF>JSPO7cEW@$A$*3Alea_uF8Sw?;Z1LJ*YX$K!Px`F$TQe` zitO8y{3Xkq!c1!qFHFMB_=J7GgvQxtZltffCGdyr;{k+GHoy1;63(&947NyGeiMDn zq*NQ(WQd_xP?g)|Y||g$cXP%}MB({P5oVgXThNsjMd`~wTppnnau zE!5dfK^S?}jfFgf*F&R`iq_GpiTZ_8%+pva!N|A_>^#p;IBeS4pQDAuk<_d``Q(x1 zo8f{RSB80yuA)90R_d zV-4e~uYUjFc~C60pAVM$YER=_3Ne5wb>+5nD|nxI(a`=>J@2#-kBp33q{asWcSt^P zRq;Y~XBmL7Y2_4f`;+CRT~_%pZf>E)4?|BqZRtuO|xP8Gh2 zzBK#63Y{-X@54CFU-Q1FTuA}*O!!U=riTJmycESpJ``ox?IuJ!^>ky)gy1@=DTJbt zHRPl|yU7_5kyZ}SiiVQAF%wPOVw z5;KWnO}xre5bGX@DzS9zauYByP@4#zqicXjb5iOn>z;#sxblp3hRokC-J9=ce0|iw z`9_d25Q5~x4Kw3=&TGy?Ljd29#U6zH-ll&XO}yZ^LcxsnUgB}UTx)I*L)8!2ly4pb zYi>E|Y{lqymR``&3to$Q+ff;MW%}_NcbRo$Uf8rE4uczbTkxk?xUPZ4I ze7u=)>OQj4Q*FXKvQ%Z2n$HR;!48J;i^q*3_sXFJnNflS)SsmF`y%&sRz#x4-BZJh zXtOhYJ04ACM#-o@E_ow(Pe0Z-8$VmBwDIV(MGnrVMlfYo4SK%Fs>UrlLsA4xFEgcY z3gV9B7+u{fMM4z8xa<;*KhCsWH-j@Rv{fPl&Pxn~kt!q%{Ci-*{_~%God9M4j`u*{ zfIM*gFUWgELkkUX8@Qdvx4_O4OKl1O^aQXgyni#WTyXN7s5$m6DyJ~k zl(71iDrbxP1$MBbS1)qIapHCNTvEZdIi{jn=LDrt5bmXRiD(cq0H4VIBx7?V4L$8| zSz55hcjPL{!oAzbsznDC18p&(d!=xXv}t=heat#JeNH(3ddfvgX6~LH%DO%!kSIhu zjIMGO%d*`g$*3utd+>3)Pj@`Cq;?nd)4i92l`|b!{#1Xq{ zRUD;p3!B4Vy5^@;{=yI^cveE#Yzu|&h62`YF|wUz{n>}zRw3snEH(%DwOuF>{bz3U`2Mi^AvHh#Y}oyG?7#mEZt>yYaw7w3?Su0N zIDdTr0zUkItTw^b&2um4M#n?d1d7?aDHaPUW@h7u%ww&LENYVwm zDVESI;V!>?OV2NzbDBsD6n#p$+Q0rt{O-b)ih0&%m2!oI6s1;$8TJN=`n1n*JWsD0L1TZRAsu`d8jlZg#d8zIzcQ{mV>3 z&@3J7LmLAmbeQO z2-}#cwmp4K>}wWYWTFA|@LP5sg?q7&P64!N1=*BYY#YY?XFN;70SnfmI@cGg=A$~B zl1DED-&b9^5slU-{$^P3P{rJ6o~_`Bj0k~B%s3tXIefH z%6qWfS|vUH%wGBY3C-^?syoQFu(m)TXXml+DIZOS_&7LCIW+Y99otlv%{sgRv9f%U z_94kRjp2wrFD|6Rr#&FeUwn#E{2N9vgP9YN;u#VtXP&-O^AfgH&)bNZC$Zq4jg(MB z;k;BR_=B)|_PecfC-;$*qS)Le2iLt@@hE-6&}&dRPTnU{B8Kr{0ehm&<@3KYQ&