Merge remote-tracking branch 'apache-commit/trunk' into HDFS-6581
This commit is contained in:
commit
31bbeaf383
|
@ -507,6 +507,8 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-11060. Create a CryptoCodec test that verifies interoperability
|
||||
between the JCE and OpenSSL implementations. (hitliuyi via tucu)
|
||||
|
||||
HADOOP-11070. Create MiniKMS for testing. (tucu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
|
||||
|
@ -763,6 +765,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-11067. warning message 'ssl.client.truststore.location has not
|
||||
been set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
|
||||
|
||||
HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to
|
||||
determine if in proxyuser mode or not. (tucu)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -385,9 +385,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
// if current UGI is different from UGI at constructor time, behave as
|
||||
// proxyuser
|
||||
UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
|
||||
final String doAsUser =
|
||||
(loginUgi.getShortUserName().equals(currentUgi.getShortUserName()))
|
||||
? null : currentUgi.getShortUserName();
|
||||
final String doAsUser = (currentUgi.getAuthenticationMethod() ==
|
||||
UserGroupInformation.AuthenticationMethod.PROXY)
|
||||
? currentUgi.getShortUserName() : null;
|
||||
|
||||
// creating the HTTP connection using the current UGI at constructor time
|
||||
conn = loginUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
|
||||
|
|
|
@ -222,9 +222,9 @@
|
|||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<mkdir dir="${project.build.directory}/test-classes/webapp"/>
|
||||
<mkdir dir="${project.build.directory}/test-classes/kms-webapp"/>
|
||||
|
||||
<copy todir="${project.build.directory}/test-classes/webapp">
|
||||
<copy todir="${project.build.directory}/test-classes/kms-webapp">
|
||||
<fileset dir="${basedir}/src/main/webapp"/>
|
||||
</copy>
|
||||
</target>
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.crypto.key.kms.server;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.mortbay.jetty.Connector;
|
||||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.security.SslSocketConnector;
|
||||
import org.mortbay.jetty.webapp.WebAppContext;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.Writer;
|
||||
import java.net.InetAddress;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
|
||||
public class MiniKMS {
|
||||
|
||||
private static Server createJettyServer(String keyStore, String password) {
|
||||
try {
|
||||
boolean ssl = keyStore != null;
|
||||
InetAddress localhost = InetAddress.getByName("localhost");
|
||||
String host = "localhost";
|
||||
ServerSocket ss = new ServerSocket(0, 50, localhost);
|
||||
int port = ss.getLocalPort();
|
||||
ss.close();
|
||||
Server server = new Server(0);
|
||||
if (!ssl) {
|
||||
server.getConnectors()[0].setHost(host);
|
||||
server.getConnectors()[0].setPort(port);
|
||||
} else {
|
||||
SslSocketConnector c = new SslSocketConnector();
|
||||
c.setHost(host);
|
||||
c.setPort(port);
|
||||
c.setNeedClientAuth(false);
|
||||
c.setKeystore(keyStore);
|
||||
c.setKeystoreType("jks");
|
||||
c.setKeyPassword(password);
|
||||
server.setConnectors(new Connector[]{c});
|
||||
}
|
||||
return server;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException("Could not start embedded servlet container, "
|
||||
+ ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static URL getJettyURL(Server server) {
|
||||
boolean ssl = server.getConnectors()[0].getClass()
|
||||
== SslSocketConnector.class;
|
||||
try {
|
||||
String scheme = (ssl) ? "https" : "http";
|
||||
return new URL(scheme + "://" +
|
||||
server.getConnectors()[0].getHost() + ":" +
|
||||
server.getConnectors()[0].getPort());
|
||||
} catch (MalformedURLException ex) {
|
||||
throw new RuntimeException("It should never happen, " + ex.getMessage(),
|
||||
ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private File kmsConfDir;
|
||||
private String log4jConfFile;
|
||||
private File keyStoreFile;
|
||||
private String keyStorePassword;
|
||||
|
||||
public Builder() {
|
||||
kmsConfDir = new File("target/test-classes").getAbsoluteFile();
|
||||
log4jConfFile = "kms-log4j.properties";
|
||||
}
|
||||
|
||||
public Builder setKmsConfDir(File confDir) {
|
||||
Preconditions.checkNotNull(confDir, "KMS conf dir is NULL");
|
||||
Preconditions.checkArgument(confDir.exists(),
|
||||
"KMS conf dir does not exist");
|
||||
kmsConfDir = confDir;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setLog4jConfFile(String log4jConfFile) {
|
||||
Preconditions.checkNotNull(log4jConfFile, "log4jconf file is NULL");
|
||||
this.log4jConfFile = log4jConfFile;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setSslConf(File keyStoreFile, String keyStorePassword) {
|
||||
Preconditions.checkNotNull(keyStoreFile, "keystore file is NULL");
|
||||
Preconditions.checkNotNull(keyStorePassword, "keystore password is NULL");
|
||||
Preconditions.checkArgument(keyStoreFile.exists(),
|
||||
"keystore file does not exist");
|
||||
this.keyStoreFile = keyStoreFile;
|
||||
this.keyStorePassword = keyStorePassword;
|
||||
return this;
|
||||
}
|
||||
|
||||
public MiniKMS build() {
|
||||
Preconditions.checkArgument(kmsConfDir.exists(),
|
||||
"KMS conf dir does not exist");
|
||||
return new MiniKMS(kmsConfDir.getAbsolutePath(), log4jConfFile,
|
||||
(keyStoreFile != null) ? keyStoreFile.getAbsolutePath() : null,
|
||||
keyStorePassword);
|
||||
}
|
||||
}
|
||||
|
||||
private String kmsConfDir;
|
||||
private String log4jConfFile;
|
||||
private String keyStore;
|
||||
private String keyStorePassword;
|
||||
private Server jetty;
|
||||
private URL kmsURL;
|
||||
|
||||
public MiniKMS(String kmsConfDir, String log4ConfFile, String keyStore,
|
||||
String password) {
|
||||
this.kmsConfDir = kmsConfDir;
|
||||
this.log4jConfFile = log4ConfFile;
|
||||
this.keyStore = keyStore;
|
||||
this.keyStorePassword = password;
|
||||
}
|
||||
|
||||
public void start() throws Exception {
|
||||
System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir);
|
||||
File aclsFile = new File(kmsConfDir, "kms-acls.xml");
|
||||
if (!aclsFile.exists()) {
|
||||
Configuration acls = new Configuration(false);
|
||||
Writer writer = new FileWriter(aclsFile);
|
||||
acls.writeXml(writer);
|
||||
writer.close();
|
||||
}
|
||||
File coreFile = new File(kmsConfDir, "core-site.xml");
|
||||
if (!coreFile.exists()) {
|
||||
Configuration core = new Configuration();
|
||||
Writer writer = new FileWriter(coreFile);
|
||||
core.writeXml(writer);
|
||||
writer.close();
|
||||
}
|
||||
File kmsFile = new File(kmsConfDir, "kms-site.xml");
|
||||
if (!kmsFile.exists()) {
|
||||
Configuration kms = new Configuration(false);
|
||||
kms.set("hadoop.security.key.provider.path",
|
||||
"jceks://file@" + kmsConfDir + "/kms.keystore");
|
||||
kms.set("hadoop.kms.authentication.type", "simple");
|
||||
Writer writer = new FileWriter(kmsFile);
|
||||
kms.writeXml(writer);
|
||||
writer.close();
|
||||
}
|
||||
System.setProperty("log4j.configuration", log4jConfFile);
|
||||
jetty = createJettyServer(keyStore, keyStorePassword);
|
||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||
URL url = cl.getResource("kms-webapp");
|
||||
if (url == null) {
|
||||
throw new RuntimeException(
|
||||
"Could not find kms-webapp/ dir in test classpath");
|
||||
}
|
||||
WebAppContext context = new WebAppContext(url.getPath(), "/kms");
|
||||
jetty.addHandler(context);
|
||||
jetty.start();
|
||||
kmsURL = new URL(getJettyURL(jetty), "kms");
|
||||
}
|
||||
|
||||
public URL getKMSUrl() {
|
||||
return kmsURL;
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
if (jetty != null && jetty.isRunning()) {
|
||||
try {
|
||||
jetty.stop();
|
||||
jetty = null;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException("Could not stop MiniKMS embedded Jetty, " +
|
||||
ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -36,10 +36,6 @@ import org.junit.Assert;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.mortbay.jetty.Connector;
|
||||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.security.SslSocketConnector;
|
||||
import org.mortbay.jetty.webapp.WebAppContext;
|
||||
|
||||
import javax.security.auth.Subject;
|
||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||
|
@ -52,7 +48,6 @@ import java.io.IOException;
|
|||
import java.io.Writer;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.URI;
|
||||
|
@ -91,49 +86,6 @@ public class TestKMS {
|
|||
return file;
|
||||
}
|
||||
|
||||
public static Server createJettyServer(String keyStore, String password) {
|
||||
try {
|
||||
boolean ssl = keyStore != null;
|
||||
InetAddress localhost = InetAddress.getByName("localhost");
|
||||
String host = "localhost";
|
||||
ServerSocket ss = new ServerSocket(0, 50, localhost);
|
||||
int port = ss.getLocalPort();
|
||||
ss.close();
|
||||
Server server = new Server(0);
|
||||
if (!ssl) {
|
||||
server.getConnectors()[0].setHost(host);
|
||||
server.getConnectors()[0].setPort(port);
|
||||
} else {
|
||||
SslSocketConnector c = new SslSocketConnector();
|
||||
c.setHost(host);
|
||||
c.setPort(port);
|
||||
c.setNeedClientAuth(false);
|
||||
c.setKeystore(keyStore);
|
||||
c.setKeystoreType("jks");
|
||||
c.setKeyPassword(password);
|
||||
server.setConnectors(new Connector[]{c});
|
||||
}
|
||||
return server;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException("Could not start embedded servlet container, "
|
||||
+ ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static URL getJettyURL(Server server) {
|
||||
boolean ssl = server.getConnectors()[0].getClass()
|
||||
== SslSocketConnector.class;
|
||||
try {
|
||||
String scheme = (ssl) ? "https" : "http";
|
||||
return new URL(scheme + "://" +
|
||||
server.getConnectors()[0].getHost() + ":" +
|
||||
server.getConnectors()[0].getPort());
|
||||
} catch (MalformedURLException ex) {
|
||||
throw new RuntimeException("It should never happen, " + ex.getMessage(),
|
||||
ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static abstract class KMSCallable implements Callable<Void> {
|
||||
private URL kmsUrl;
|
||||
|
||||
|
@ -144,33 +96,19 @@ public class TestKMS {
|
|||
|
||||
protected void runServer(String keystore, String password, File confDir,
|
||||
KMSCallable callable) throws Exception {
|
||||
System.setProperty(KMSConfiguration.KMS_CONFIG_DIR,
|
||||
confDir.getAbsolutePath());
|
||||
System.setProperty("log4j.configuration", "log4j.properties");
|
||||
Server jetty = createJettyServer(keystore, password);
|
||||
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder().setKmsConfDir(confDir)
|
||||
.setLog4jConfFile("log4j.properties");
|
||||
if (keystore != null) {
|
||||
miniKMSBuilder.setSslConf(new File(keystore), password);
|
||||
}
|
||||
MiniKMS miniKMS = miniKMSBuilder.build();
|
||||
miniKMS.start();
|
||||
try {
|
||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||
URL url = cl.getResource("webapp");
|
||||
if (url == null) {
|
||||
throw new RuntimeException(
|
||||
"Could not find webapp/ dir in test classpath");
|
||||
}
|
||||
WebAppContext context = new WebAppContext(url.getPath(), "/kms");
|
||||
jetty.addHandler(context);
|
||||
jetty.start();
|
||||
url = new URL(getJettyURL(jetty), "kms");
|
||||
System.out.println("Test KMS running at: " + url);
|
||||
callable.kmsUrl = url;
|
||||
System.out.println("Test KMS running at: " + miniKMS.getKMSUrl());
|
||||
callable.kmsUrl = miniKMS.getKMSUrl();
|
||||
callable.call();
|
||||
} finally {
|
||||
if (jetty != null && jetty.isRunning()) {
|
||||
try {
|
||||
jetty.stop();
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException("Could not stop embedded Jetty, " +
|
||||
ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
miniKMS.stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1219,7 +1157,7 @@ public class TestKMS {
|
|||
final URI uri = createKMSUri(getKMSUrl());
|
||||
|
||||
// proxyuser client using kerberos credentials
|
||||
UserGroupInformation clientUgi = UserGroupInformation.
|
||||
final UserGroupInformation clientUgi = UserGroupInformation.
|
||||
loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath());
|
||||
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
|
@ -1229,7 +1167,7 @@ public class TestKMS {
|
|||
|
||||
// authorized proxyuser
|
||||
UserGroupInformation fooUgi =
|
||||
UserGroupInformation.createRemoteUser("foo");
|
||||
UserGroupInformation.createProxyUser("foo", clientUgi);
|
||||
fooUgi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
|
@ -1241,7 +1179,7 @@ public class TestKMS {
|
|||
|
||||
// unauthorized proxyuser
|
||||
UserGroupInformation foo1Ugi =
|
||||
UserGroupInformation.createRemoteUser("foo1");
|
||||
UserGroupInformation.createProxyUser("foo1", clientUgi);
|
||||
foo1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
|
|
|
@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6376. Distcp data between two HA clusters requires another configuration.
|
||||
(Dave Marion and Haohui Mai via jing9)
|
||||
|
||||
HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
@ -711,6 +713,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6714. TestBlocksScheduledCounter#testBlocksScheduledCounter should
|
||||
shutdown cluster (vinayakumarb)
|
||||
|
||||
HDFS-6986. DistributedFileSystem must get delegation tokens from configured
|
||||
KeyProvider. (zhz via tucu)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -3084,4 +3084,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
DFSHedgedReadMetrics getHedgedReadMetrics() {
|
||||
return HEDGED_READ_METRIC;
|
||||
}
|
||||
|
||||
public KeyProviderCryptoExtension getKeyProvider() {
|
||||
return provider;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,8 +84,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -1946,6 +1948,28 @@ public class DistributedFileSystem extends FileSystem {
|
|||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Token<?>[] addDelegationTokens(
|
||||
final String renewer, Credentials credentials) throws IOException {
|
||||
Token<?>[] tokens = super.addDelegationTokens(renewer, credentials);
|
||||
if (dfs.getKeyProvider() != null) {
|
||||
KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension =
|
||||
KeyProviderDelegationTokenExtension.
|
||||
createKeyProviderDelegationTokenExtension(dfs.getKeyProvider());
|
||||
Token<?>[] kpTokens = keyProviderDelegationTokenExtension.
|
||||
addDelegationTokens(renewer, credentials);
|
||||
if (tokens != null && kpTokens != null) {
|
||||
Token<?>[] all = new Token<?>[tokens.length + kpTokens.length];
|
||||
System.arraycopy(tokens, 0, all, 0, tokens.length);
|
||||
System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
|
||||
tokens = all;
|
||||
} else {
|
||||
tokens = (tokens != null) ? tokens : kpTokens;
|
||||
}
|
||||
}
|
||||
return tokens;
|
||||
}
|
||||
|
||||
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
|
||||
return dfs.getInotifyEventStream();
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ public class BlockManager {
|
|||
final BlocksMap blocksMap;
|
||||
|
||||
/** Replication thread. */
|
||||
final Daemon replicationThread = new Daemon(new ReplicationMonitor());
|
||||
Daemon replicationThread;
|
||||
|
||||
/** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
|
||||
final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
|
||||
|
@ -263,6 +263,7 @@ public class BlockManager {
|
|||
this.namesystem = namesystem;
|
||||
datanodeManager = new DatanodeManager(this, namesystem, conf);
|
||||
heartbeatManager = datanodeManager.getHeartbeatManager();
|
||||
setReplicationMonitor(new ReplicationMonitor());
|
||||
|
||||
final long pendingPeriod = conf.getLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
|
||||
|
@ -394,7 +395,23 @@ public class BlockManager {
|
|||
lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public long getReplicationRecheckInterval() {
|
||||
return replicationRecheckInterval;
|
||||
}
|
||||
|
||||
public AtomicLong excessBlocksCount() {
|
||||
return excessBlocksCount;
|
||||
}
|
||||
|
||||
public void clearInvalidateBlocks() {
|
||||
invalidateBlocks.clear();
|
||||
}
|
||||
|
||||
void setReplicationMonitor(Runnable replicationMonitor) {
|
||||
replicationThread = new Daemon(replicationMonitor);
|
||||
}
|
||||
|
||||
public void setBlockPoolId(String blockPoolId) {
|
||||
if (isBlockTokenEnabled()) {
|
||||
blockTokenSecretManager.setBlockPoolId(blockPoolId);
|
||||
|
@ -1616,7 +1633,7 @@ public class BlockManager {
|
|||
* If there were any replication requests that timed out, reap them
|
||||
* and put them back into the neededReplication queue
|
||||
*/
|
||||
private void processPendingReplications() {
|
||||
void processPendingReplications() {
|
||||
Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
|
||||
if (timedOutItems != null) {
|
||||
namesystem.writeLock();
|
||||
|
|
|
@ -1053,7 +1053,7 @@ public class DatanodeManager {
|
|||
* 3. Added to exclude --> start decommission.
|
||||
* 4. Removed from exclude --> stop decommission.
|
||||
*/
|
||||
private void refreshDatanodes() {
|
||||
void refreshDatanodes() {
|
||||
for(DatanodeDescriptor node : datanodeMap.values()) {
|
||||
// Check if not include.
|
||||
if (!hostFileManager.isIncluded(node)) {
|
||||
|
@ -1586,5 +1586,9 @@ public class DatanodeManager {
|
|||
public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) {
|
||||
this.shouldSendCachingCommands = shouldSendCachingCommands;
|
||||
}
|
||||
|
||||
public HostFileManager getHostFileManager() {
|
||||
return this.hostFileManager;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -129,6 +129,10 @@ class HostFileManager {
|
|||
void refresh(String includeFile, String excludeFile) throws IOException {
|
||||
HostSet newIncludes = readFile("included", includeFile);
|
||||
HostSet newExcludes = readFile("excluded", excludeFile);
|
||||
setHosts(newIncludes, newExcludes);
|
||||
}
|
||||
|
||||
void setHosts(HostSet newIncludes, HostSet newExcludes) {
|
||||
synchronized (this) {
|
||||
includes = newIncludes;
|
||||
excludes = newExcludes;
|
||||
|
|
|
@ -1000,7 +1000,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return Collections.unmodifiableList(auditLoggers);
|
||||
}
|
||||
|
||||
private void loadFSImage(StartupOption startOpt) throws IOException {
|
||||
protected void loadFSImage(StartupOption startOpt) throws IOException {
|
||||
final FSImage fsImage = getFSImage();
|
||||
|
||||
// format before starting up if requested
|
||||
|
@ -1048,7 +1048,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
imageLoadComplete();
|
||||
}
|
||||
|
||||
private void startSecretManager() {
|
||||
protected void startSecretManager() {
|
||||
if (dtSecretManager != null) {
|
||||
try {
|
||||
dtSecretManager.startThreads();
|
||||
|
@ -1060,7 +1060,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
}
|
||||
|
||||
private void startSecretManagerIfNecessary() {
|
||||
protected void startSecretManagerIfNecessary() {
|
||||
boolean shouldRun = shouldUseDelegationTokens() &&
|
||||
!isInSafeMode() && getEditLog().isOpenForWrite();
|
||||
boolean running = dtSecretManager.isRunning();
|
||||
|
@ -1216,7 +1216,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return haEnabled && inActiveState() && startingActiveService;
|
||||
}
|
||||
|
||||
private boolean shouldUseDelegationTokens() {
|
||||
protected boolean shouldUseDelegationTokens() {
|
||||
return UserGroupInformation.isSecurityEnabled() ||
|
||||
alwaysUseDelegationTokensForTests;
|
||||
}
|
||||
|
@ -2768,6 +2768,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @throws UnresolvedLinkException
|
||||
* @throws IOException
|
||||
*/
|
||||
protected
|
||||
LocatedBlock prepareFileForWrite(String src, INodeFile file,
|
||||
String leaseHolder, String clientMachine,
|
||||
boolean writeToEditLog,
|
||||
|
@ -3224,6 +3225,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return new FileState(pendingFile, src);
|
||||
}
|
||||
|
||||
protected
|
||||
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
|
||||
long offset) throws IOException {
|
||||
LocatedBlock lBlk = new LocatedBlock(
|
||||
|
@ -3341,8 +3343,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return true;
|
||||
}
|
||||
|
||||
private INodeFile checkLease(String src, String holder, INode inode,
|
||||
long fileId)
|
||||
protected INodeFile checkLease(String src, String holder, INode inode,
|
||||
long fileId)
|
||||
throws LeaseExpiredException, FileNotFoundException {
|
||||
assert hasReadLock();
|
||||
final String ident = src + " (inode " + fileId + ")";
|
||||
|
@ -4459,7 +4461,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return leaseManager.reassignLease(lease, src, newHolder);
|
||||
}
|
||||
|
||||
private void commitOrCompleteLastBlock(final INodeFile fileINode,
|
||||
protected void commitOrCompleteLastBlock(final INodeFile fileINode,
|
||||
final Block commitBlock) throws IOException {
|
||||
assert hasWriteLock();
|
||||
Preconditions.checkArgument(fileINode.isUnderConstruction());
|
||||
|
@ -4855,6 +4857,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @return an array of datanode commands
|
||||
* @throws IOException
|
||||
*/
|
||||
protected
|
||||
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
|
||||
StorageReport[] reports, long cacheCapacity, long cacheUsed,
|
||||
int xceiverCount, int xmitsInProgress, int failedVolumes)
|
||||
|
@ -4904,8 +4907,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @param file
|
||||
* @param logRetryCache
|
||||
*/
|
||||
private void persistBlocks(String path, INodeFile file,
|
||||
boolean logRetryCache) {
|
||||
protected void persistBlocks(String path, INodeFile file,
|
||||
boolean logRetryCache) {
|
||||
assert hasWriteLock();
|
||||
Preconditions.checkArgument(file.isUnderConstruction());
|
||||
getEditLog().logUpdateBlocks(path, file, logRetryCache);
|
||||
|
@ -5401,7 +5404,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @param path
|
||||
* @param file
|
||||
*/
|
||||
private void persistNewBlock(String path, INodeFile file) {
|
||||
protected void persistNewBlock(String path, INodeFile file) {
|
||||
Preconditions.checkArgument(file.isUnderConstruction());
|
||||
getEditLog().logAddBlock(path, file);
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
|
@ -7279,7 +7282,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
*
|
||||
* @return true if delegation token operation is allowed
|
||||
*/
|
||||
private boolean isAllowedDelegationTokenOp() throws IOException {
|
||||
protected boolean isAllowedDelegationTokenOp() throws IOException {
|
||||
AuthenticationMethod authMethod = getConnectionAuthenticationMethod();
|
||||
if (UserGroupInformation.isSecurityEnabled()
|
||||
&& (authMethod != AuthenticationMethod.KERBEROS)
|
||||
|
@ -7446,7 +7449,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
|
||||
for (DatanodeDescriptor node : live) {
|
||||
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
|
||||
info.put(node.getHostName(), getLiveNodeInfo(node));
|
||||
}
|
||||
return JSON.toString(info);
|
||||
}
|
||||
|
||||
protected Map<String, Object> getLiveNodeInfo(DatanodeDescriptor node) {
|
||||
return ImmutableMap.<String, Object>builder()
|
||||
.put("infoAddr", node.getInfoAddr())
|
||||
.put("infoSecureAddr", node.getInfoSecureAddr())
|
||||
.put("xferaddr", node.getXferAddr())
|
||||
|
@ -7464,10 +7473,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
.put("blockPoolUsedPercent", node.getBlockPoolUsedPercent())
|
||||
.put("volfails", node.getVolumeFailures())
|
||||
.build();
|
||||
|
||||
info.put(node.getHostName(), innerinfo);
|
||||
}
|
||||
return JSON.toString(info);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7752,17 +7757,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
public ReentrantLock getLongReadLockForTests() {
|
||||
return fsLock.longReadLock;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public SafeModeInfo getSafeModeInfoForTests() {
|
||||
return safeMode;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) {
|
||||
this.nnResourceChecker = nnResourceChecker;
|
||||
}
|
||||
|
||||
public SafeModeInfo getSafeModeInfo() {
|
||||
return safeMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAvoidingStaleDataNodesForWrite() {
|
||||
return this.blockManager.getDatanodeManager()
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.crypto.CipherSuite;
|
||||
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderFactory;
|
||||
import org.apache.hadoop.fs.FSTestWrapper;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
|
@ -51,12 +52,22 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|||
import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
|
||||
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
import static org.mockito.Mockito.withSettings;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.anyString;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
|
||||
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
|
||||
|
@ -91,6 +102,7 @@ public class TestEncryptionZones {
|
|||
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
|
||||
JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
|
||||
);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
|
||||
// Lower the batch size for testing
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
|
||||
2);
|
||||
|
@ -753,4 +765,35 @@ public class TestEncryptionZones {
|
|||
e.getCause());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests obtaining delegation token from stored key
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testDelegationToken() throws Exception {
|
||||
UserGroupInformation.createRemoteUser("JobTracker");
|
||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
KeyProviderCryptoExtension keyProvider = Mockito.mock(KeyProviderCryptoExtension.class,
|
||||
withSettings().extraInterfaces(
|
||||
DelegationTokenExtension.class,
|
||||
CryptoExtension.class));
|
||||
Mockito.when(keyProvider.getConf()).thenReturn(conf);
|
||||
byte[] testIdentifier = "Test identifier for delegation token".getBytes();
|
||||
|
||||
Token<?> testToken = new Token(testIdentifier, new byte[0],
|
||||
new Text(), new Text());
|
||||
Mockito.when(((DelegationTokenExtension)keyProvider).
|
||||
addDelegationTokens(anyString(), (Credentials)any())).
|
||||
thenReturn(new Token<?>[] { testToken });
|
||||
|
||||
dfs.getClient().provider = keyProvider;
|
||||
|
||||
Credentials creds = new Credentials();
|
||||
final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
|
||||
DistributedFileSystem.LOG.debug("Delegation tokens: " +
|
||||
Arrays.asList(tokens));
|
||||
Assert.assertEquals(2, tokens.length);
|
||||
Assert.assertEquals(tokens[1], testToken);
|
||||
Assert.assertEquals(1, creds.numberOfTokens());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -223,7 +223,7 @@ public class NameNodeAdapter {
|
|||
* if safemode is not running.
|
||||
*/
|
||||
public static int getSafeModeSafeBlocks(NameNode nn) {
|
||||
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
|
||||
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfo();
|
||||
if (smi == null) {
|
||||
return -1;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue