diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index fe15cf5dec3..819df996d8a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -470,6 +470,9 @@ Release 2.6.0 - UNRELEASED HADOOP-10404. Some accesses to DomainSocketWatcher#closed are not protected by the lock (cmccabe) + HADOOP-11161. Expose close method in KeyProvider to give clients of + Provider implementations a hook to release resources. (Arun Suresh via atm) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HADOOP-10734. Implement high-performance secure random number sources. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java index a8b941444e8..9dd1d47367c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java @@ -533,6 +533,14 @@ public abstract class KeyProvider { byte[] material ) throws IOException; + /** + * Can be used by implementing classes to close any resources + * that require closing + */ + public void close() throws IOException { + // NOP + } + /** * Roll a new version of the given key generating the material for it. *

diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java index f800689fdf1..73c98855be5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java @@ -410,4 +410,11 @@ public class KeyProviderCryptoExtension extends return new KeyProviderCryptoExtension(keyProvider, cryptoExtension); } + @Override + public void close() throws IOException { + if (getKeyProvider() != null) { + getKeyProvider().close(); + } + } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index 5b7f109c921..c4c7e0c3db1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -791,4 +791,15 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension, return tokens; } + /** + * Shutdown valueQueue executor threads + */ + @Override + public void close() throws IOException { + try { + encKeyVersionQueue.shutdown(); + } catch (Exception e) { + throw new IOException(e); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java index ee10483185d..aa0e62458de 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java @@ -75,6 +75,8 @@ public class ValueQueue { private final int numValues; private final float lowWatermark; + private volatile boolean executorThreadsStarted = false; + /** * A Runnable which takes a string name. */ @@ -187,9 +189,6 @@ public class ValueQueue { TimeUnit.MILLISECONDS, queue, new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(REFILL_THREAD).build()); - // To ensure all requests are first queued, make coreThreads = maxThreads - // and pre-start all the Core Threads. - executor.prestartAllCoreThreads(); } public ValueQueue(final int numValues, final float lowWaterMark, long expiry, @@ -297,6 +296,15 @@ public class ValueQueue { private void submitRefillTask(final String keyName, final Queue keyQueue) throws InterruptedException { + if (!executorThreadsStarted) { + synchronized (this) { + // To ensure all requests are first queued, make coreThreads = + // maxThreads + // and pre-start all the Core Threads. + executor.prestartAllCoreThreads(); + executorThreadsStarted = true; + } + } // The submit/execute method of the ThreadPoolExecutor is bypassed and // the Runnable is directly put in the backing BlockingQueue so that we // can control exactly how the runnable is inserted into the queue. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 362c62de92c..94a68e239d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -935,12 +935,18 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, */ @Override public synchronized void close() throws IOException { - if(clientRunning) { - closeAllFilesBeingWritten(false); - clientRunning = false; - getLeaseRenewer().closeClient(this); - // close connections to the namenode - closeConnectionToNamenode(); + try { + if(clientRunning) { + closeAllFilesBeingWritten(false); + clientRunning = false; + getLeaseRenewer().closeClient(this); + // close connections to the namenode + closeConnectionToNamenode(); + } + } finally { + if (provider != null) { + provider.close(); + } } }