Merge branch 'trunk' into MR-2841
This commit is contained in:
commit
b160707ace
|
@ -330,6 +330,9 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-11033. shell scripts ignore JAVA_HOME on OS X. (aw)
|
||||
|
||||
HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked
|
||||
in bin/hdfs (aw)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
@ -504,6 +507,8 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-11060. Create a CryptoCodec test that verifies interoperability
|
||||
between the JCE and OpenSSL implementations. (hitliuyi via tucu)
|
||||
|
||||
HADOOP-11070. Create MiniKMS for testing. (tucu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
|
||||
|
@ -757,6 +762,12 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-11063. KMS cannot deploy on Windows, because class names are too long.
|
||||
(cnauroth)
|
||||
|
||||
HADOOP-11067. warning message 'ssl.client.truststore.location has not
|
||||
been set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
|
||||
|
||||
HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to
|
||||
determine if in proxyuser mode or not. (tucu)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -774,6 +785,8 @@ Release 2.5.1 - UNRELEASED
|
|||
|
||||
HADOOP-11001. Fix test-patch to work with the git repo. (kasha)
|
||||
|
||||
HADOOP-11065. Rat check should exclude "**/build/**". (kasha)
|
||||
|
||||
Release 2.5.0 - 2014-08-11
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -644,9 +644,9 @@ function hadoop_verify_secure_prereq
|
|||
# this.
|
||||
|
||||
# ${EUID} comes from the shell itself!
|
||||
if [[ "${EUID}" -ne 0 ]] || [[ -n "${HADOOP_SECURE_COMMAND}" ]]; then
|
||||
if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
||||
hadoop_error "ERROR: You must be a privileged in order to run a secure serice."
|
||||
return 1
|
||||
exit 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
|
|
|
@ -385,9 +385,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
// if current UGI is different from UGI at constructor time, behave as
|
||||
// proxyuser
|
||||
UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
|
||||
final String doAsUser =
|
||||
(loginUgi.getShortUserName().equals(currentUgi.getShortUserName()))
|
||||
? null : currentUgi.getShortUserName();
|
||||
final String doAsUser = (currentUgi.getAuthenticationMethod() ==
|
||||
UserGroupInformation.AuthenticationMethod.PROXY)
|
||||
? currentUgi.getShortUserName() : null;
|
||||
|
||||
// creating the HTTP connection using the current UGI at constructor time
|
||||
conn = loginUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
|
||||
|
|
|
@ -212,7 +212,7 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
|
|||
LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
|
||||
trustManagers = new TrustManager[]{trustManager};
|
||||
} else {
|
||||
LOG.warn("The property '" + locationProperty + "' has not been set, " +
|
||||
LOG.debug("The property '" + locationProperty + "' has not been set, " +
|
||||
"no TrustStore will be loaded");
|
||||
trustManagers = null;
|
||||
}
|
||||
|
|
|
@ -222,9 +222,9 @@
|
|||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<mkdir dir="${project.build.directory}/test-classes/webapp"/>
|
||||
<mkdir dir="${project.build.directory}/test-classes/kms-webapp"/>
|
||||
|
||||
<copy todir="${project.build.directory}/test-classes/webapp">
|
||||
<copy todir="${project.build.directory}/test-classes/kms-webapp">
|
||||
<fileset dir="${basedir}/src/main/webapp"/>
|
||||
</copy>
|
||||
</target>
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.crypto.key.kms.server;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.mortbay.jetty.Connector;
|
||||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.security.SslSocketConnector;
|
||||
import org.mortbay.jetty.webapp.WebAppContext;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileWriter;
|
||||
import java.io.Writer;
|
||||
import java.net.InetAddress;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
|
||||
public class MiniKMS {
|
||||
|
||||
private static Server createJettyServer(String keyStore, String password) {
|
||||
try {
|
||||
boolean ssl = keyStore != null;
|
||||
InetAddress localhost = InetAddress.getByName("localhost");
|
||||
String host = "localhost";
|
||||
ServerSocket ss = new ServerSocket(0, 50, localhost);
|
||||
int port = ss.getLocalPort();
|
||||
ss.close();
|
||||
Server server = new Server(0);
|
||||
if (!ssl) {
|
||||
server.getConnectors()[0].setHost(host);
|
||||
server.getConnectors()[0].setPort(port);
|
||||
} else {
|
||||
SslSocketConnector c = new SslSocketConnector();
|
||||
c.setHost(host);
|
||||
c.setPort(port);
|
||||
c.setNeedClientAuth(false);
|
||||
c.setKeystore(keyStore);
|
||||
c.setKeystoreType("jks");
|
||||
c.setKeyPassword(password);
|
||||
server.setConnectors(new Connector[]{c});
|
||||
}
|
||||
return server;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException("Could not start embedded servlet container, "
|
||||
+ ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static URL getJettyURL(Server server) {
|
||||
boolean ssl = server.getConnectors()[0].getClass()
|
||||
== SslSocketConnector.class;
|
||||
try {
|
||||
String scheme = (ssl) ? "https" : "http";
|
||||
return new URL(scheme + "://" +
|
||||
server.getConnectors()[0].getHost() + ":" +
|
||||
server.getConnectors()[0].getPort());
|
||||
} catch (MalformedURLException ex) {
|
||||
throw new RuntimeException("It should never happen, " + ex.getMessage(),
|
||||
ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private File kmsConfDir;
|
||||
private String log4jConfFile;
|
||||
private File keyStoreFile;
|
||||
private String keyStorePassword;
|
||||
|
||||
public Builder() {
|
||||
kmsConfDir = new File("target/test-classes").getAbsoluteFile();
|
||||
log4jConfFile = "kms-log4j.properties";
|
||||
}
|
||||
|
||||
public Builder setKmsConfDir(File confDir) {
|
||||
Preconditions.checkNotNull(confDir, "KMS conf dir is NULL");
|
||||
Preconditions.checkArgument(confDir.exists(),
|
||||
"KMS conf dir does not exist");
|
||||
kmsConfDir = confDir;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setLog4jConfFile(String log4jConfFile) {
|
||||
Preconditions.checkNotNull(log4jConfFile, "log4jconf file is NULL");
|
||||
this.log4jConfFile = log4jConfFile;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setSslConf(File keyStoreFile, String keyStorePassword) {
|
||||
Preconditions.checkNotNull(keyStoreFile, "keystore file is NULL");
|
||||
Preconditions.checkNotNull(keyStorePassword, "keystore password is NULL");
|
||||
Preconditions.checkArgument(keyStoreFile.exists(),
|
||||
"keystore file does not exist");
|
||||
this.keyStoreFile = keyStoreFile;
|
||||
this.keyStorePassword = keyStorePassword;
|
||||
return this;
|
||||
}
|
||||
|
||||
public MiniKMS build() {
|
||||
Preconditions.checkArgument(kmsConfDir.exists(),
|
||||
"KMS conf dir does not exist");
|
||||
return new MiniKMS(kmsConfDir.getAbsolutePath(), log4jConfFile,
|
||||
(keyStoreFile != null) ? keyStoreFile.getAbsolutePath() : null,
|
||||
keyStorePassword);
|
||||
}
|
||||
}
|
||||
|
||||
private String kmsConfDir;
|
||||
private String log4jConfFile;
|
||||
private String keyStore;
|
||||
private String keyStorePassword;
|
||||
private Server jetty;
|
||||
private URL kmsURL;
|
||||
|
||||
public MiniKMS(String kmsConfDir, String log4ConfFile, String keyStore,
|
||||
String password) {
|
||||
this.kmsConfDir = kmsConfDir;
|
||||
this.log4jConfFile = log4ConfFile;
|
||||
this.keyStore = keyStore;
|
||||
this.keyStorePassword = password;
|
||||
}
|
||||
|
||||
public void start() throws Exception {
|
||||
System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir);
|
||||
File aclsFile = new File(kmsConfDir, "kms-acls.xml");
|
||||
if (!aclsFile.exists()) {
|
||||
Configuration acls = new Configuration(false);
|
||||
Writer writer = new FileWriter(aclsFile);
|
||||
acls.writeXml(writer);
|
||||
writer.close();
|
||||
}
|
||||
File coreFile = new File(kmsConfDir, "core-site.xml");
|
||||
if (!coreFile.exists()) {
|
||||
Configuration core = new Configuration();
|
||||
Writer writer = new FileWriter(coreFile);
|
||||
core.writeXml(writer);
|
||||
writer.close();
|
||||
}
|
||||
File kmsFile = new File(kmsConfDir, "kms-site.xml");
|
||||
if (!kmsFile.exists()) {
|
||||
Configuration kms = new Configuration(false);
|
||||
kms.set("hadoop.security.key.provider.path",
|
||||
"jceks://file@" + kmsConfDir + "/kms.keystore");
|
||||
kms.set("hadoop.kms.authentication.type", "simple");
|
||||
Writer writer = new FileWriter(kmsFile);
|
||||
kms.writeXml(writer);
|
||||
writer.close();
|
||||
}
|
||||
System.setProperty("log4j.configuration", log4jConfFile);
|
||||
jetty = createJettyServer(keyStore, keyStorePassword);
|
||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||
URL url = cl.getResource("kms-webapp");
|
||||
if (url == null) {
|
||||
throw new RuntimeException(
|
||||
"Could not find kms-webapp/ dir in test classpath");
|
||||
}
|
||||
WebAppContext context = new WebAppContext(url.getPath(), "/kms");
|
||||
jetty.addHandler(context);
|
||||
jetty.start();
|
||||
kmsURL = new URL(getJettyURL(jetty), "kms");
|
||||
}
|
||||
|
||||
public URL getKMSUrl() {
|
||||
return kmsURL;
|
||||
}
|
||||
|
||||
public void stop() {
|
||||
if (jetty != null && jetty.isRunning()) {
|
||||
try {
|
||||
jetty.stop();
|
||||
jetty = null;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException("Could not stop MiniKMS embedded Jetty, " +
|
||||
ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -36,10 +36,6 @@ import org.junit.Assert;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.mortbay.jetty.Connector;
|
||||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.security.SslSocketConnector;
|
||||
import org.mortbay.jetty.webapp.WebAppContext;
|
||||
|
||||
import javax.security.auth.Subject;
|
||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||
|
@ -52,7 +48,6 @@ import java.io.IOException;
|
|||
import java.io.Writer;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.URI;
|
||||
|
@ -91,49 +86,6 @@ public class TestKMS {
|
|||
return file;
|
||||
}
|
||||
|
||||
public static Server createJettyServer(String keyStore, String password) {
|
||||
try {
|
||||
boolean ssl = keyStore != null;
|
||||
InetAddress localhost = InetAddress.getByName("localhost");
|
||||
String host = "localhost";
|
||||
ServerSocket ss = new ServerSocket(0, 50, localhost);
|
||||
int port = ss.getLocalPort();
|
||||
ss.close();
|
||||
Server server = new Server(0);
|
||||
if (!ssl) {
|
||||
server.getConnectors()[0].setHost(host);
|
||||
server.getConnectors()[0].setPort(port);
|
||||
} else {
|
||||
SslSocketConnector c = new SslSocketConnector();
|
||||
c.setHost(host);
|
||||
c.setPort(port);
|
||||
c.setNeedClientAuth(false);
|
||||
c.setKeystore(keyStore);
|
||||
c.setKeystoreType("jks");
|
||||
c.setKeyPassword(password);
|
||||
server.setConnectors(new Connector[]{c});
|
||||
}
|
||||
return server;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException("Could not start embedded servlet container, "
|
||||
+ ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static URL getJettyURL(Server server) {
|
||||
boolean ssl = server.getConnectors()[0].getClass()
|
||||
== SslSocketConnector.class;
|
||||
try {
|
||||
String scheme = (ssl) ? "https" : "http";
|
||||
return new URL(scheme + "://" +
|
||||
server.getConnectors()[0].getHost() + ":" +
|
||||
server.getConnectors()[0].getPort());
|
||||
} catch (MalformedURLException ex) {
|
||||
throw new RuntimeException("It should never happen, " + ex.getMessage(),
|
||||
ex);
|
||||
}
|
||||
}
|
||||
|
||||
public static abstract class KMSCallable implements Callable<Void> {
|
||||
private URL kmsUrl;
|
||||
|
||||
|
@ -144,33 +96,19 @@ public class TestKMS {
|
|||
|
||||
protected void runServer(String keystore, String password, File confDir,
|
||||
KMSCallable callable) throws Exception {
|
||||
System.setProperty(KMSConfiguration.KMS_CONFIG_DIR,
|
||||
confDir.getAbsolutePath());
|
||||
System.setProperty("log4j.configuration", "log4j.properties");
|
||||
Server jetty = createJettyServer(keystore, password);
|
||||
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder().setKmsConfDir(confDir)
|
||||
.setLog4jConfFile("log4j.properties");
|
||||
if (keystore != null) {
|
||||
miniKMSBuilder.setSslConf(new File(keystore), password);
|
||||
}
|
||||
MiniKMS miniKMS = miniKMSBuilder.build();
|
||||
miniKMS.start();
|
||||
try {
|
||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||
URL url = cl.getResource("webapp");
|
||||
if (url == null) {
|
||||
throw new RuntimeException(
|
||||
"Could not find webapp/ dir in test classpath");
|
||||
}
|
||||
WebAppContext context = new WebAppContext(url.getPath(), "/kms");
|
||||
jetty.addHandler(context);
|
||||
jetty.start();
|
||||
url = new URL(getJettyURL(jetty), "kms");
|
||||
System.out.println("Test KMS running at: " + url);
|
||||
callable.kmsUrl = url;
|
||||
System.out.println("Test KMS running at: " + miniKMS.getKMSUrl());
|
||||
callable.kmsUrl = miniKMS.getKMSUrl();
|
||||
callable.call();
|
||||
} finally {
|
||||
if (jetty != null && jetty.isRunning()) {
|
||||
try {
|
||||
jetty.stop();
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException("Could not stop embedded Jetty, " +
|
||||
ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
miniKMS.stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1219,7 +1157,7 @@ public class TestKMS {
|
|||
final URI uri = createKMSUri(getKMSUrl());
|
||||
|
||||
// proxyuser client using kerberos credentials
|
||||
UserGroupInformation clientUgi = UserGroupInformation.
|
||||
final UserGroupInformation clientUgi = UserGroupInformation.
|
||||
loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath());
|
||||
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
|
@ -1229,7 +1167,7 @@ public class TestKMS {
|
|||
|
||||
// authorized proxyuser
|
||||
UserGroupInformation fooUgi =
|
||||
UserGroupInformation.createRemoteUser("foo");
|
||||
UserGroupInformation.createProxyUser("foo", clientUgi);
|
||||
fooUgi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
|
@ -1241,7 +1179,7 @@ public class TestKMS {
|
|||
|
||||
// unauthorized proxyuser
|
||||
UserGroupInformation foo1Ugi =
|
||||
UserGroupInformation.createRemoteUser("foo1");
|
||||
UserGroupInformation.createProxyUser("foo1", clientUgi);
|
||||
foo1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
|
|
|
@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6376. Distcp data between two HA clusters requires another configuration.
|
||||
(Dave Marion and Haohui Mai via jing9)
|
||||
|
||||
HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
@ -602,6 +604,17 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6996. SnapshotDiff report can hit IndexOutOfBoundsException when there
|
||||
are nested renamed directory/file. (jing9)
|
||||
|
||||
HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'.
|
||||
(Xiaoyu Yao via Arpit Agarwal)
|
||||
|
||||
HDFS-6979. hdfs.dll does not produce .pdb files. (cnauroth)
|
||||
|
||||
HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
|
||||
Arpit Agarwal)
|
||||
|
||||
HDFS-6898. DN must reserve space for a full block when an RBW block is
|
||||
created. (Arpit Agarwal)
|
||||
|
||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
||||
|
@ -703,6 +716,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6714. TestBlocksScheduledCounter#testBlocksScheduledCounter should
|
||||
shutdown cluster (vinayakumarb)
|
||||
|
||||
HDFS-6986. DistributedFileSystem must get delegation tokens from configured
|
||||
KeyProvider. (zhz via tucu)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -415,11 +415,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
</exec>
|
||||
<exec executable="msbuild" dir="${project.build.directory}/native"
|
||||
failonerror="true">
|
||||
<arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=Release"/>
|
||||
<arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=RelWithDebInfo /p:LinkIncremental=false"/>
|
||||
</exec>
|
||||
<!-- Copy for inclusion in distribution. -->
|
||||
<copy todir="${project.build.directory}/bin">
|
||||
<fileset dir="${project.build.directory}/native/target/bin/Release"/>
|
||||
<fileset dir="${project.build.directory}/native/target/bin/RelWithDebInfo"/>
|
||||
</copy>
|
||||
</target>
|
||||
</configuration>
|
||||
|
@ -437,7 +437,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<attribute name="test"/>
|
||||
<sequential>
|
||||
<echo message="Running @{test}"/>
|
||||
<exec executable="${project.build.directory}/native/Release/@{test}" failonerror="true" dir="${project.build.directory}/native/">
|
||||
<exec executable="${project.build.directory}/native/RelWithDebInfo/@{test}" failonerror="true" dir="${project.build.directory}/native/">
|
||||
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
|
||||
<!-- HADOOP_HOME required to find winutils. -->
|
||||
<env key="HADOOP_HOME" value="${hadoop.common.build.dir}"/>
|
||||
|
|
|
@ -225,14 +225,13 @@ esac
|
|||
|
||||
if [[ -n "${secure_service}" ]]; then
|
||||
HADOOP_SECURE_USER="${secure_user}"
|
||||
if hadoop_verify_secure_prereq; then
|
||||
hadoop_setup_secure_service
|
||||
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out"
|
||||
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err"
|
||||
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid"
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
|
||||
fi
|
||||
hadoop_verify_secure_prereq
|
||||
hadoop_setup_secure_service
|
||||
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out"
|
||||
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err"
|
||||
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid"
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
|
||||
else
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
|
||||
|
|
|
@ -3084,4 +3084,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
DFSHedgedReadMetrics getHedgedReadMetrics() {
|
||||
return HEDGED_READ_METRIC;
|
||||
}
|
||||
|
||||
public KeyProviderCryptoExtension getKeyProvider() {
|
||||
return provider;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,8 +84,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -1946,6 +1948,28 @@ public class DistributedFileSystem extends FileSystem {
|
|||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Token<?>[] addDelegationTokens(
|
||||
final String renewer, Credentials credentials) throws IOException {
|
||||
Token<?>[] tokens = super.addDelegationTokens(renewer, credentials);
|
||||
if (dfs.getKeyProvider() != null) {
|
||||
KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension =
|
||||
KeyProviderDelegationTokenExtension.
|
||||
createKeyProviderDelegationTokenExtension(dfs.getKeyProvider());
|
||||
Token<?>[] kpTokens = keyProviderDelegationTokenExtension.
|
||||
addDelegationTokens(renewer, credentials);
|
||||
if (tokens != null && kpTokens != null) {
|
||||
Token<?>[] all = new Token<?>[tokens.length + kpTokens.length];
|
||||
System.arraycopy(tokens, 0, all, 0, tokens.length);
|
||||
System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
|
||||
tokens = all;
|
||||
} else {
|
||||
tokens = (tokens != null) ? tokens : kpTokens;
|
||||
}
|
||||
}
|
||||
return tokens;
|
||||
}
|
||||
|
||||
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
|
||||
return dfs.getInotifyEventStream();
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ public class HdfsConstants {
|
|||
"org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol";
|
||||
|
||||
|
||||
public static final int MIN_BLOCKS_FOR_WRITE = 5;
|
||||
public static final int MIN_BLOCKS_FOR_WRITE = 1;
|
||||
|
||||
// Long that indicates "leave current quota unchanged"
|
||||
public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
|
||||
|
|
|
@ -164,7 +164,7 @@ public class BlockManager {
|
|||
final BlocksMap blocksMap;
|
||||
|
||||
/** Replication thread. */
|
||||
final Daemon replicationThread = new Daemon(new ReplicationMonitor());
|
||||
Daemon replicationThread;
|
||||
|
||||
/** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
|
||||
final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
|
||||
|
@ -263,6 +263,7 @@ public class BlockManager {
|
|||
this.namesystem = namesystem;
|
||||
datanodeManager = new DatanodeManager(this, namesystem, conf);
|
||||
heartbeatManager = datanodeManager.getHeartbeatManager();
|
||||
setReplicationMonitor(new ReplicationMonitor());
|
||||
|
||||
final long pendingPeriod = conf.getLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
|
||||
|
@ -394,7 +395,23 @@ public class BlockManager {
|
|||
lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public long getReplicationRecheckInterval() {
|
||||
return replicationRecheckInterval;
|
||||
}
|
||||
|
||||
public AtomicLong excessBlocksCount() {
|
||||
return excessBlocksCount;
|
||||
}
|
||||
|
||||
public void clearInvalidateBlocks() {
|
||||
invalidateBlocks.clear();
|
||||
}
|
||||
|
||||
void setReplicationMonitor(Runnable replicationMonitor) {
|
||||
replicationThread = new Daemon(replicationMonitor);
|
||||
}
|
||||
|
||||
public void setBlockPoolId(String blockPoolId) {
|
||||
if (isBlockTokenEnabled()) {
|
||||
blockTokenSecretManager.setBlockPoolId(blockPoolId);
|
||||
|
@ -1616,7 +1633,7 @@ public class BlockManager {
|
|||
* If there were any replication requests that timed out, reap them
|
||||
* and put them back into the neededReplication queue
|
||||
*/
|
||||
private void processPendingReplications() {
|
||||
void processPendingReplications() {
|
||||
Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
|
||||
if (timedOutItems != null) {
|
||||
namesystem.writeLock();
|
||||
|
|
|
@ -1053,7 +1053,7 @@ public class DatanodeManager {
|
|||
* 3. Added to exclude --> start decommission.
|
||||
* 4. Removed from exclude --> stop decommission.
|
||||
*/
|
||||
private void refreshDatanodes() {
|
||||
void refreshDatanodes() {
|
||||
for(DatanodeDescriptor node : datanodeMap.values()) {
|
||||
// Check if not include.
|
||||
if (!hostFileManager.isIncluded(node)) {
|
||||
|
@ -1586,5 +1586,9 @@ public class DatanodeManager {
|
|||
public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) {
|
||||
this.shouldSendCachingCommands = shouldSendCachingCommands;
|
||||
}
|
||||
|
||||
public HostFileManager getHostFileManager() {
|
||||
return this.hostFileManager;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -129,6 +129,10 @@ class HostFileManager {
|
|||
void refresh(String includeFile, String excludeFile) throws IOException {
|
||||
HostSet newIncludes = readFile("included", includeFile);
|
||||
HostSet newExcludes = readFile("excluded", excludeFile);
|
||||
setHosts(newIncludes, newExcludes);
|
||||
}
|
||||
|
||||
void setHosts(HostSet newIncludes, HostSet newExcludes) {
|
||||
synchronized (this) {
|
||||
includes = newIncludes;
|
||||
excludes = newExcludes;
|
||||
|
|
|
@ -34,10 +34,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
|
|||
* @param genStamp replica generation stamp
|
||||
* @param vol volume where replica is located
|
||||
* @param dir directory path where block and meta files are located
|
||||
* @param bytesToReserve disk space to reserve for this replica, based on
|
||||
* the estimated maximum block length.
|
||||
*/
|
||||
public ReplicaBeingWritten(long blockId, long genStamp,
|
||||
FsVolumeSpi vol, File dir) {
|
||||
super( blockId, genStamp, vol, dir);
|
||||
FsVolumeSpi vol, File dir, long bytesToReserve) {
|
||||
super(blockId, genStamp, vol, dir, bytesToReserve);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -60,10 +62,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
|
|||
* @param vol volume where replica is located
|
||||
* @param dir directory path where block and meta files are located
|
||||
* @param writer a thread that is writing to this replica
|
||||
* @param bytesToReserve disk space to reserve for this replica, based on
|
||||
* the estimated maximum block length.
|
||||
*/
|
||||
public ReplicaBeingWritten(long blockId, long len, long genStamp,
|
||||
FsVolumeSpi vol, File dir, Thread writer ) {
|
||||
super( blockId, len, genStamp, vol, dir, writer);
|
||||
FsVolumeSpi vol, File dir, Thread writer, long bytesToReserve) {
|
||||
super(blockId, len, genStamp, vol, dir, writer, bytesToReserve);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -44,6 +44,13 @@ public class ReplicaInPipeline extends ReplicaInfo
|
|||
private long bytesOnDisk;
|
||||
private byte[] lastChecksum;
|
||||
private Thread writer;
|
||||
|
||||
/**
|
||||
* Bytes reserved for this replica on the containing volume.
|
||||
* Based off difference between the estimated maximum block length and
|
||||
* the bytes already written to this block.
|
||||
*/
|
||||
private long bytesReserved;
|
||||
|
||||
/**
|
||||
* Constructor for a zero length replica
|
||||
|
@ -51,10 +58,12 @@ public class ReplicaInPipeline extends ReplicaInfo
|
|||
* @param genStamp replica generation stamp
|
||||
* @param vol volume where replica is located
|
||||
* @param dir directory path where block and meta files are located
|
||||
* @param bytesToReserve disk space to reserve for this replica, based on
|
||||
* the estimated maximum block length.
|
||||
*/
|
||||
public ReplicaInPipeline(long blockId, long genStamp,
|
||||
FsVolumeSpi vol, File dir) {
|
||||
this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
|
||||
FsVolumeSpi vol, File dir, long bytesToReserve) {
|
||||
this(blockId, 0L, genStamp, vol, dir, Thread.currentThread(), bytesToReserve);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -67,7 +76,7 @@ public class ReplicaInPipeline extends ReplicaInfo
|
|||
ReplicaInPipeline(Block block,
|
||||
FsVolumeSpi vol, File dir, Thread writer) {
|
||||
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
|
||||
vol, dir, writer);
|
||||
vol, dir, writer, 0L);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -78,13 +87,16 @@ public class ReplicaInPipeline extends ReplicaInfo
|
|||
* @param vol volume where replica is located
|
||||
* @param dir directory path where block and meta files are located
|
||||
* @param writer a thread that is writing to this replica
|
||||
* @param bytesToReserve disk space to reserve for this replica, based on
|
||||
* the estimated maximum block length.
|
||||
*/
|
||||
ReplicaInPipeline(long blockId, long len, long genStamp,
|
||||
FsVolumeSpi vol, File dir, Thread writer ) {
|
||||
FsVolumeSpi vol, File dir, Thread writer, long bytesToReserve) {
|
||||
super( blockId, len, genStamp, vol, dir);
|
||||
this.bytesAcked = len;
|
||||
this.bytesOnDisk = len;
|
||||
this.writer = writer;
|
||||
this.bytesReserved = bytesToReserve;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -96,6 +108,7 @@ public class ReplicaInPipeline extends ReplicaInfo
|
|||
this.bytesAcked = from.getBytesAcked();
|
||||
this.bytesOnDisk = from.getBytesOnDisk();
|
||||
this.writer = from.writer;
|
||||
this.bytesReserved = from.bytesReserved;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -115,13 +128,25 @@ public class ReplicaInPipeline extends ReplicaInfo
|
|||
|
||||
@Override // ReplicaInPipelineInterface
|
||||
public void setBytesAcked(long bytesAcked) {
|
||||
long newBytesAcked = bytesAcked - this.bytesAcked;
|
||||
this.bytesAcked = bytesAcked;
|
||||
|
||||
// Once bytes are ACK'ed we can release equivalent space from the
|
||||
// volume's reservedForRbw count. We could have released it as soon
|
||||
// as the write-to-disk completed but that would be inefficient.
|
||||
getVolume().releaseReservedSpace(newBytesAcked);
|
||||
bytesReserved -= newBytesAcked;
|
||||
}
|
||||
|
||||
@Override // ReplicaInPipelineInterface
|
||||
public long getBytesOnDisk() {
|
||||
return bytesOnDisk;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getBytesReserved() {
|
||||
return bytesReserved;
|
||||
}
|
||||
|
||||
@Override // ReplicaInPipelineInterface
|
||||
public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) {
|
||||
|
|
|
@ -222,6 +222,13 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
|||
public void setUnlinked() {
|
||||
// no need to be unlinked
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of bytes reserved for this replica on disk.
|
||||
*/
|
||||
public long getBytesReserved() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy specified file into a temporary file. Then rename the
|
||||
|
|
|
@ -45,4 +45,15 @@ public interface FsVolumeSpi {
|
|||
public File getFinalizedDir(String bpid) throws IOException;
|
||||
|
||||
public StorageType getStorageType();
|
||||
|
||||
/**
|
||||
* Reserve disk space for an RBW block so a writer does not run out of
|
||||
* space before the block is full.
|
||||
*/
|
||||
public void reserveSpaceForRbw(long bytesToReserve);
|
||||
|
||||
/**
|
||||
* Release disk space previously reserved for RBW block.
|
||||
*/
|
||||
public void releaseReservedSpace(long bytesToRelease);
|
||||
}
|
|
@ -240,7 +240,7 @@ class BlockPoolSlice {
|
|||
return DatanodeUtil.createTmpFile(b, f);
|
||||
}
|
||||
|
||||
File addBlock(Block b, File f) throws IOException {
|
||||
File addFinalizedBlock(Block b, File f) throws IOException {
|
||||
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
|
||||
if (!blockDir.exists()) {
|
||||
if (!blockDir.mkdirs()) {
|
||||
|
@ -334,9 +334,11 @@ class BlockPoolSlice {
|
|||
// The restart meta file exists
|
||||
if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
|
||||
// It didn't expire. Load the replica as a RBW.
|
||||
// We don't know the expected block length, so just use 0
|
||||
// and don't reserve any more space for writes.
|
||||
newReplica = new ReplicaBeingWritten(blockId,
|
||||
validateIntegrityAndSetLength(file, genStamp),
|
||||
genStamp, volume, file.getParentFile(), null);
|
||||
genStamp, volume, file.getParentFile(), null, 0);
|
||||
loadRwr = false;
|
||||
}
|
||||
sc.close();
|
||||
|
|
|
@ -593,7 +593,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
+ " from " + srcfile + " to " + dstfile.getAbsolutePath(), e);
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("addBlock: Moved " + srcmeta + " to " + dstmeta
|
||||
LOG.debug("addFinalizedBlock: Moved " + srcmeta + " to " + dstmeta
|
||||
+ " and " + srcfile + " to " + dstfile);
|
||||
}
|
||||
return dstfile;
|
||||
|
@ -712,7 +712,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
File oldmeta = replicaInfo.getMetaFile();
|
||||
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
|
||||
replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS,
|
||||
v, newBlkFile.getParentFile(), Thread.currentThread());
|
||||
v, newBlkFile.getParentFile(), Thread.currentThread(), estimateBlockLen);
|
||||
File newmeta = newReplicaInfo.getMetaFile();
|
||||
|
||||
// rename meta file to rbw directory
|
||||
|
@ -748,7 +748,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
|
||||
// Replace finalized replica by a RBW replica in replicas map
|
||||
volumeMap.add(bpid, newReplicaInfo);
|
||||
|
||||
v.reserveSpaceForRbw(estimateBlockLen - replicaInfo.getNumBytes());
|
||||
return newReplicaInfo;
|
||||
}
|
||||
|
||||
|
@ -876,7 +876,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
// create a rbw file to hold block in the designated volume
|
||||
File f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
|
||||
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(),
|
||||
b.getGenerationStamp(), v, f.getParentFile());
|
||||
b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes());
|
||||
volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
|
||||
return newReplicaInfo;
|
||||
}
|
||||
|
@ -992,7 +992,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
// create RBW
|
||||
final ReplicaBeingWritten rbw = new ReplicaBeingWritten(
|
||||
blockId, numBytes, expectedGs,
|
||||
v, dest.getParentFile(), Thread.currentThread());
|
||||
v, dest.getParentFile(), Thread.currentThread(), 0);
|
||||
rbw.setBytesAcked(visible);
|
||||
// overwrite the RBW in the volume map
|
||||
volumeMap.add(b.getBlockPoolId(), rbw);
|
||||
|
@ -1013,7 +1013,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
// create a temporary file to hold block in the designated volume
|
||||
File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
|
||||
ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(),
|
||||
b.getGenerationStamp(), v, f.getParentFile());
|
||||
b.getGenerationStamp(), v, f.getParentFile(), 0);
|
||||
volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
|
||||
|
||||
return newReplicaInfo;
|
||||
|
@ -1079,7 +1079,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
" for block " + replicaInfo);
|
||||
}
|
||||
|
||||
File dest = v.addBlock(bpid, replicaInfo, f);
|
||||
File dest = v.addFinalizedBlock(
|
||||
bpid, replicaInfo, f, replicaInfo.getBytesReserved());
|
||||
newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());
|
||||
}
|
||||
volumeMap.add(bpid, newReplicaInfo);
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.concurrent.LinkedBlockingQueue;
|
|||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -62,6 +63,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
|||
private final DF usage;
|
||||
private final long reserved;
|
||||
|
||||
// Disk space reserved for open blocks.
|
||||
private AtomicLong reservedForRbw;
|
||||
|
||||
// Capacity configured. This is useful when we want to
|
||||
// limit the visible capacity for tests. If negative, then we just
|
||||
// query from the filesystem.
|
||||
|
@ -82,6 +86,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
|||
this.reserved = conf.getLong(
|
||||
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
|
||||
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
|
||||
this.reservedForRbw = new AtomicLong(0L);
|
||||
this.currentDir = currentDir;
|
||||
File parent = currentDir.getParentFile();
|
||||
this.usage = new DF(parent, conf);
|
||||
|
@ -166,13 +171,18 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
|||
|
||||
@Override
|
||||
public long getAvailable() throws IOException {
|
||||
long remaining = getCapacity()-getDfsUsed();
|
||||
long remaining = getCapacity() - getDfsUsed() - reservedForRbw.get();
|
||||
long available = usage.getAvailable();
|
||||
if (remaining > available) {
|
||||
remaining = available;
|
||||
}
|
||||
return (remaining > 0) ? remaining : 0;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public long getReservedForRbw() {
|
||||
return reservedForRbw.get();
|
||||
}
|
||||
|
||||
long getReserved(){
|
||||
return reserved;
|
||||
|
@ -217,16 +227,58 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
|||
return getBlockPoolSlice(bpid).createTmpFile(b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reserveSpaceForRbw(long bytesToReserve) {
|
||||
if (bytesToReserve != 0) {
|
||||
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
||||
FsDatasetImpl.LOG.debug("Reserving " + bytesToReserve + " on volume " + getBasePath());
|
||||
}
|
||||
reservedForRbw.addAndGet(bytesToReserve);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void releaseReservedSpace(long bytesToRelease) {
|
||||
if (bytesToRelease != 0) {
|
||||
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
||||
FsDatasetImpl.LOG.debug("Releasing " + bytesToRelease + " on volume " + getBasePath());
|
||||
}
|
||||
|
||||
long oldReservation, newReservation;
|
||||
do {
|
||||
oldReservation = reservedForRbw.get();
|
||||
newReservation = oldReservation - bytesToRelease;
|
||||
if (newReservation < 0) {
|
||||
// Failsafe, this should never occur in practice, but if it does we don't
|
||||
// want to start advertising more space than we have available.
|
||||
newReservation = 0;
|
||||
}
|
||||
} while (!reservedForRbw.compareAndSet(oldReservation, newReservation));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* RBW files. They get moved to the finalized block directory when
|
||||
* the block is finalized.
|
||||
*/
|
||||
File createRbwFile(String bpid, Block b) throws IOException {
|
||||
reserveSpaceForRbw(b.getNumBytes());
|
||||
return getBlockPoolSlice(bpid).createRbwFile(b);
|
||||
}
|
||||
|
||||
File addBlock(String bpid, Block b, File f) throws IOException {
|
||||
return getBlockPoolSlice(bpid).addBlock(b, f);
|
||||
/**
|
||||
*
|
||||
* @param bytesReservedForRbw Space that was reserved during
|
||||
* block creation. Now that the block is being finalized we
|
||||
* can free up this space.
|
||||
* @return
|
||||
* @throws IOException
|
||||
*/
|
||||
File addFinalizedBlock(String bpid, Block b,
|
||||
File f, long bytesReservedForRbw)
|
||||
throws IOException {
|
||||
releaseReservedSpace(bytesReservedForRbw);
|
||||
return getBlockPoolSlice(bpid).addFinalizedBlock(b, f);
|
||||
}
|
||||
|
||||
Executor getCacheExecutor() {
|
||||
|
|
|
@ -978,7 +978,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return Collections.unmodifiableList(auditLoggers);
|
||||
}
|
||||
|
||||
private void loadFSImage(StartupOption startOpt) throws IOException {
|
||||
protected void loadFSImage(StartupOption startOpt) throws IOException {
|
||||
final FSImage fsImage = getFSImage();
|
||||
|
||||
// format before starting up if requested
|
||||
|
@ -1026,7 +1026,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
imageLoadComplete();
|
||||
}
|
||||
|
||||
private void startSecretManager() {
|
||||
protected void startSecretManager() {
|
||||
if (dtSecretManager != null) {
|
||||
try {
|
||||
dtSecretManager.startThreads();
|
||||
|
@ -1038,7 +1038,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
}
|
||||
|
||||
private void startSecretManagerIfNecessary() {
|
||||
protected void startSecretManagerIfNecessary() {
|
||||
boolean shouldRun = shouldUseDelegationTokens() &&
|
||||
!isInSafeMode() && getEditLog().isOpenForWrite();
|
||||
boolean running = dtSecretManager.isRunning();
|
||||
|
@ -1188,7 +1188,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return haEnabled && inActiveState() && startingActiveService;
|
||||
}
|
||||
|
||||
private boolean shouldUseDelegationTokens() {
|
||||
protected boolean shouldUseDelegationTokens() {
|
||||
return UserGroupInformation.isSecurityEnabled() ||
|
||||
alwaysUseDelegationTokensForTests;
|
||||
}
|
||||
|
@ -2729,6 +2729,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @throws UnresolvedLinkException
|
||||
* @throws IOException
|
||||
*/
|
||||
protected
|
||||
LocatedBlock prepareFileForWrite(String src, INodeFile file,
|
||||
String leaseHolder, String clientMachine,
|
||||
boolean writeToEditLog,
|
||||
|
@ -3185,6 +3186,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return new FileState(pendingFile, src);
|
||||
}
|
||||
|
||||
protected
|
||||
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
|
||||
long offset) throws IOException {
|
||||
LocatedBlock lBlk = new LocatedBlock(
|
||||
|
@ -3302,8 +3304,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return true;
|
||||
}
|
||||
|
||||
private INodeFile checkLease(String src, String holder, INode inode,
|
||||
long fileId)
|
||||
protected INodeFile checkLease(String src, String holder, INode inode,
|
||||
long fileId)
|
||||
throws LeaseExpiredException, FileNotFoundException {
|
||||
assert hasReadLock();
|
||||
final String ident = src + " (inode " + fileId + ")";
|
||||
|
@ -4420,7 +4422,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return leaseManager.reassignLease(lease, src, newHolder);
|
||||
}
|
||||
|
||||
private void commitOrCompleteLastBlock(final INodeFile fileINode,
|
||||
protected void commitOrCompleteLastBlock(final INodeFile fileINode,
|
||||
final Block commitBlock) throws IOException {
|
||||
assert hasWriteLock();
|
||||
Preconditions.checkArgument(fileINode.isUnderConstruction());
|
||||
|
@ -4816,6 +4818,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @return an array of datanode commands
|
||||
* @throws IOException
|
||||
*/
|
||||
protected
|
||||
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
|
||||
StorageReport[] reports, long cacheCapacity, long cacheUsed,
|
||||
int xceiverCount, int xmitsInProgress, int failedVolumes)
|
||||
|
@ -4865,8 +4868,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @param file
|
||||
* @param logRetryCache
|
||||
*/
|
||||
private void persistBlocks(String path, INodeFile file,
|
||||
boolean logRetryCache) {
|
||||
protected void persistBlocks(String path, INodeFile file,
|
||||
boolean logRetryCache) {
|
||||
assert hasWriteLock();
|
||||
Preconditions.checkArgument(file.isUnderConstruction());
|
||||
getEditLog().logUpdateBlocks(path, file, logRetryCache);
|
||||
|
@ -5297,7 +5300,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @param path
|
||||
* @param file
|
||||
*/
|
||||
private void persistNewBlock(String path, INodeFile file) {
|
||||
protected void persistNewBlock(String path, INodeFile file) {
|
||||
Preconditions.checkArgument(file.isUnderConstruction());
|
||||
getEditLog().logAddBlock(path, file);
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
|
@ -7175,7 +7178,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
*
|
||||
* @return true if delegation token operation is allowed
|
||||
*/
|
||||
private boolean isAllowedDelegationTokenOp() throws IOException {
|
||||
protected boolean isAllowedDelegationTokenOp() throws IOException {
|
||||
AuthenticationMethod authMethod = getConnectionAuthenticationMethod();
|
||||
if (UserGroupInformation.isSecurityEnabled()
|
||||
&& (authMethod != AuthenticationMethod.KERBEROS)
|
||||
|
@ -7342,7 +7345,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
|
||||
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
|
||||
for (DatanodeDescriptor node : live) {
|
||||
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
|
||||
info.put(node.getHostName(), getLiveNodeInfo(node));
|
||||
}
|
||||
return JSON.toString(info);
|
||||
}
|
||||
|
||||
protected Map<String, Object> getLiveNodeInfo(DatanodeDescriptor node) {
|
||||
return ImmutableMap.<String, Object>builder()
|
||||
.put("infoAddr", node.getInfoAddr())
|
||||
.put("infoSecureAddr", node.getInfoSecureAddr())
|
||||
.put("xferaddr", node.getXferAddr())
|
||||
|
@ -7360,10 +7369,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
.put("blockPoolUsedPercent", node.getBlockPoolUsedPercent())
|
||||
.put("volfails", node.getVolumeFailures())
|
||||
.build();
|
||||
|
||||
info.put(node.getHostName(), innerinfo);
|
||||
}
|
||||
return JSON.toString(info);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7648,17 +7653,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
public ReentrantLock getLongReadLockForTests() {
|
||||
return fsLock.longReadLock;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public SafeModeInfo getSafeModeInfoForTests() {
|
||||
return safeMode;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) {
|
||||
this.nnResourceChecker = nnResourceChecker;
|
||||
}
|
||||
|
||||
public SafeModeInfo getSafeModeInfo() {
|
||||
return safeMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAvoidingStaleDataNodesForWrite() {
|
||||
return this.blockManager.getDatanodeManager()
|
||||
|
|
|
@ -352,6 +352,40 @@ public class DFSAdmin extends FsShell {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Common usage summary shared between "hdfs dfsadmin -help" and
|
||||
* "hdfs dfsadmin"
|
||||
*/
|
||||
private static final String commonUsageSummary =
|
||||
"\t[-report [-live] [-dead] [-decommissioning]]\n" +
|
||||
"\t[-safemode <enter | leave | get | wait>]\n" +
|
||||
"\t[-saveNamespace]\n" +
|
||||
"\t[-rollEdits]\n" +
|
||||
"\t[-restoreFailedStorage true|false|check]\n" +
|
||||
"\t[-refreshNodes]\n" +
|
||||
"\t[" + SetQuotaCommand.USAGE + "]\n" +
|
||||
"\t[" + ClearQuotaCommand.USAGE +"]\n" +
|
||||
"\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
|
||||
"\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
|
||||
"\t[-finalizeUpgrade]\n" +
|
||||
"\t[" + RollingUpgradeCommand.USAGE +"]\n" +
|
||||
"\t[-refreshServiceAcl]\n" +
|
||||
"\t[-refreshUserToGroupsMappings]\n" +
|
||||
"\t[-refreshSuperUserGroupsConfiguration]\n" +
|
||||
"\t[-refreshCallQueue]\n" +
|
||||
"\t[-refresh <host:ipc_port> <key> [arg1..argn]\n" +
|
||||
"\t[-printTopology]\n" +
|
||||
"\t[-refreshNamenodes datanode_host:ipc_port]\n"+
|
||||
"\t[-deleteBlockPool datanode_host:ipc_port blockpoolId [force]]\n"+
|
||||
"\t[-setBalancerBandwidth <bandwidth in bytes per second>]\n" +
|
||||
"\t[-fetchImage <local directory>]\n" +
|
||||
"\t[-allowSnapshot <snapshotDir>]\n" +
|
||||
"\t[-disallowSnapshot <snapshotDir>]\n" +
|
||||
"\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" +
|
||||
"\t[-getDatanodeInfo <datanode_host:ipc_port>]\n" +
|
||||
"\t[-metasave filename]\n" +
|
||||
"\t[-help [cmd]]\n";
|
||||
|
||||
/**
|
||||
* Construct a DFSAdmin object.
|
||||
*/
|
||||
|
@ -589,7 +623,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
/**
|
||||
* Command to ask the namenode to save the namespace.
|
||||
* Usage: java DFSAdmin -saveNamespace
|
||||
* Usage: hdfs dfsadmin -saveNamespace
|
||||
* @exception IOException
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
|
||||
*/
|
||||
|
@ -630,7 +664,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
/**
|
||||
* Command to enable/disable/check restoring of failed storage replicas in the namenode.
|
||||
* Usage: java DFSAdmin -restoreFailedStorage true|false|check
|
||||
* Usage: hdfs dfsadmin -restoreFailedStorage true|false|check
|
||||
* @exception IOException
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
|
||||
*/
|
||||
|
@ -668,7 +702,7 @@ public class DFSAdmin extends FsShell {
|
|||
/**
|
||||
* Command to ask the namenode to reread the hosts and excluded hosts
|
||||
* file.
|
||||
* Usage: java DFSAdmin -refreshNodes
|
||||
* Usage: hdfs dfsadmin -refreshNodes
|
||||
* @exception IOException
|
||||
*/
|
||||
public int refreshNodes() throws IOException {
|
||||
|
@ -701,7 +735,7 @@ public class DFSAdmin extends FsShell {
|
|||
/**
|
||||
* Command to ask the namenode to set the balancer bandwidth for all of the
|
||||
* datanodes.
|
||||
* Usage: java DFSAdmin -setBalancerBandwidth bandwidth
|
||||
* Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
|
||||
* @param argv List of of command line parameters.
|
||||
* @param idx The index of the command that is being processed.
|
||||
* @exception IOException
|
||||
|
@ -714,7 +748,7 @@ public class DFSAdmin extends FsShell {
|
|||
bandwidth = Long.parseLong(argv[idx]);
|
||||
} catch (NumberFormatException nfe) {
|
||||
System.err.println("NumberFormatException: " + nfe.getMessage());
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-setBalancerBandwidth <bandwidth in bytes per second>]");
|
||||
return exitCode;
|
||||
}
|
||||
|
@ -777,36 +811,11 @@ public class DFSAdmin extends FsShell {
|
|||
}
|
||||
|
||||
private void printHelp(String cmd) {
|
||||
String summary = "hadoop dfsadmin performs DFS administrative commands.\n" +
|
||||
String summary = "hdfs dfsadmin performs DFS administrative commands.\n" +
|
||||
"Note: Administrative commands can only be run with superuser permission.\n" +
|
||||
"The full syntax is: \n\n" +
|
||||
"hadoop dfsadmin\n" +
|
||||
"\t[-report [-live] [-dead] [-decommissioning]]\n" +
|
||||
"\t[-safemode <enter | leave | get | wait>]\n" +
|
||||
"\t[-saveNamespace]\n" +
|
||||
"\t[-rollEdits]\n" +
|
||||
"\t[-restoreFailedStorage true|false|check]\n" +
|
||||
"\t[-refreshNodes]\n" +
|
||||
"\t[" + SetQuotaCommand.USAGE + "]\n" +
|
||||
"\t[" + ClearQuotaCommand.USAGE +"]\n" +
|
||||
"\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
|
||||
"\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
|
||||
"\t[-finalizeUpgrade]\n" +
|
||||
"\t[" + RollingUpgradeCommand.USAGE +"]\n" +
|
||||
"\t[-refreshServiceAcl]\n" +
|
||||
"\t[-refreshUserToGroupsMappings]\n" +
|
||||
"\t[-refreshSuperUserGroupsConfiguration]\n" +
|
||||
"\t[-refreshCallQueue]\n" +
|
||||
"\t[-refresh <host:ipc_port> <key> [arg1..argn]\n" +
|
||||
"\t[-printTopology]\n" +
|
||||
"\t[-refreshNamenodes datanodehost:port]\n"+
|
||||
"\t[-deleteBlockPool datanodehost:port blockpoolId [force]]\n"+
|
||||
"\t[-setBalancerBandwidth <bandwidth>]\n" +
|
||||
"\t[-fetchImage <local directory>]\n" +
|
||||
"\t[-allowSnapshot <snapshotDir>]\n" +
|
||||
"\t[-disallowSnapshot <snapshotDir>]\n" +
|
||||
"\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" +
|
||||
"\t[-getDatanodeInfo <datanode_host:ipc_port>\n" +
|
||||
"\t[-help [cmd]]\n";
|
||||
"hdfs dfsadmin\n" +
|
||||
commonUsageSummary;
|
||||
|
||||
String report ="-report [-live] [-dead] [-decommissioning]:\n" +
|
||||
"\tReports basic filesystem information and statistics.\n" +
|
||||
|
@ -825,15 +834,13 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
String saveNamespace = "-saveNamespace:\t" +
|
||||
"Save current namespace into storage directories and reset edits log.\n" +
|
||||
"\t\tRequires superuser permissions and safe mode.\n";
|
||||
"\t\tRequires safe mode.\n";
|
||||
|
||||
String rollEdits = "-rollEdits:\t" +
|
||||
"Rolls the edit log.\n" +
|
||||
"\t\tRequires superuser permissions.\n";
|
||||
"Rolls the edit log.\n";
|
||||
|
||||
String restoreFailedStorage = "-restoreFailedStorage:\t" +
|
||||
"Set/Unset/Check flag to attempt restore of failed storage replicas if they become available.\n" +
|
||||
"\t\tRequires superuser permissions.\n";
|
||||
"Set/Unset/Check flag to attempt restore of failed storage replicas if they become available.\n";
|
||||
|
||||
String refreshNodes = "-refreshNodes: \tUpdates the namenode with the " +
|
||||
"set of datanodes allowed to connect to the namenode.\n\n" +
|
||||
|
@ -1021,7 +1028,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
/**
|
||||
* Command to ask the namenode to finalize previously performed upgrade.
|
||||
* Usage: java DFSAdmin -finalizeUpgrade
|
||||
* Usage: hdfs dfsadmin -finalizeUpgrade
|
||||
* @exception IOException
|
||||
*/
|
||||
public int finalizeUpgrade() throws IOException {
|
||||
|
@ -1058,7 +1065,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
/**
|
||||
* Dumps DFS data structures into specified file.
|
||||
* Usage: java DFSAdmin -metasave filename
|
||||
* Usage: hdfs dfsadmin -metasave filename
|
||||
* @param argv List of of command line parameters.
|
||||
* @param idx The index of the command that is being processed.
|
||||
* @exception IOException if an error occurred while accessing
|
||||
|
@ -1366,118 +1373,90 @@ public class DFSAdmin extends FsShell {
|
|||
*/
|
||||
private static void printUsage(String cmd) {
|
||||
if ("-report".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-report] [-live] [-dead] [-decommissioning]");
|
||||
} else if ("-safemode".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-safemode enter | leave | get | wait]");
|
||||
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-allowSnapshot <snapshotDir>]");
|
||||
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-disallowSnapshot <snapshotDir>]");
|
||||
} else if ("-saveNamespace".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-saveNamespace]");
|
||||
} else if ("-rollEdits".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-rollEdits]");
|
||||
} else if ("-restoreFailedStorage".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-restoreFailedStorage true|false|check ]");
|
||||
} else if ("-refreshNodes".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshNodes]");
|
||||
} else if ("-finalizeUpgrade".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-finalizeUpgrade]");
|
||||
} else if (RollingUpgradeCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [" + RollingUpgradeCommand.USAGE+"]");
|
||||
} else if ("-metasave".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-metasave filename]");
|
||||
} else if (SetQuotaCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [" + SetQuotaCommand.USAGE+"]");
|
||||
} else if (ClearQuotaCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " ["+ClearQuotaCommand.USAGE+"]");
|
||||
} else if (SetSpaceQuotaCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [" + SetSpaceQuotaCommand.USAGE+"]");
|
||||
} else if (ClearSpaceQuotaCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " ["+ClearSpaceQuotaCommand.USAGE+"]");
|
||||
} else if ("-refreshServiceAcl".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshServiceAcl]");
|
||||
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshUserToGroupsMappings]");
|
||||
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshSuperUserGroupsConfiguration]");
|
||||
} else if ("-refreshCallQueue".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshCallQueue]");
|
||||
} else if ("-refresh".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refresh <hostname:port> <resource_identifier> [arg1..argn]");
|
||||
} else if ("-printTopology".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-printTopology]");
|
||||
} else if ("-refreshNamenodes".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshNamenodes datanode-host:port]");
|
||||
} else if ("-deleteBlockPool".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-deleteBlockPool datanode-host:port blockpoolId [force]]");
|
||||
} else if ("-setBalancerBandwidth".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-setBalancerBandwidth <bandwidth in bytes per second>]");
|
||||
} else if ("-fetchImage".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-fetchImage <local directory>]");
|
||||
} else if ("-shutdownDatanode".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
|
||||
} else if ("-getDatanodeInfo".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-getDatanodeInfo <datanode_host:ipc_port>]");
|
||||
} else {
|
||||
System.err.println("Usage: java DFSAdmin");
|
||||
System.err.println("Usage: hdfs dfsadmin");
|
||||
System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
|
||||
System.err.println(" [-report]");
|
||||
System.err.println(" [-safemode enter | leave | get | wait]");
|
||||
System.err.println(" [-allowSnapshot <snapshotDir>]");
|
||||
System.err.println(" [-disallowSnapshot <snapshotDir>]");
|
||||
System.err.println(" [-saveNamespace]");
|
||||
System.err.println(" [-rollEdits]");
|
||||
System.err.println(" [-restoreFailedStorage true|false|check]");
|
||||
System.err.println(" [-refreshNodes]");
|
||||
System.err.println(" [-finalizeUpgrade]");
|
||||
System.err.println(" ["+RollingUpgradeCommand.USAGE+"]");
|
||||
System.err.println(" [-metasave filename]");
|
||||
System.err.println(" [-refreshServiceAcl]");
|
||||
System.err.println(" [-refreshUserToGroupsMappings]");
|
||||
System.err.println(" [-refreshSuperUserGroupsConfiguration]");
|
||||
System.err.println(" [-refreshCallQueue]");
|
||||
System.err.println(" [-refresh]");
|
||||
System.err.println(" [-printTopology]");
|
||||
System.err.println(" [-refreshNamenodes datanodehost:port]");
|
||||
System.err.println(" [-deleteBlockPool datanode-host:port blockpoolId [force]]");
|
||||
System.err.println(" ["+SetQuotaCommand.USAGE+"]");
|
||||
System.err.println(" ["+ClearQuotaCommand.USAGE+"]");
|
||||
System.err.println(" ["+SetSpaceQuotaCommand.USAGE+"]");
|
||||
System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]");
|
||||
System.err.println(" [-setBalancerBandwidth <bandwidth in bytes per second>]");
|
||||
System.err.println(" [-fetchImage <local directory>]");
|
||||
System.err.println(" [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
|
||||
System.err.println(" [-getDatanodeInfo <datanode_host:ipc_port>]");
|
||||
System.err.println(" [-help [cmd]]");
|
||||
System.err.println();
|
||||
System.err.println(commonUsageSummary);
|
||||
ToolRunner.printGenericCommandUsage(System.err);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.crypto.CipherSuite;
|
||||
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderFactory;
|
||||
import org.apache.hadoop.fs.FSTestWrapper;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
|
@ -51,12 +52,22 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|||
import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
|
||||
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
import static org.mockito.Mockito.withSettings;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.anyString;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
|
||||
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
|
||||
|
@ -91,6 +102,7 @@ public class TestEncryptionZones {
|
|||
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
|
||||
JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
|
||||
);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
|
||||
// Lower the batch size for testing
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
|
||||
2);
|
||||
|
@ -753,4 +765,35 @@ public class TestEncryptionZones {
|
|||
e.getCause());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests obtaining delegation token from stored key
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testDelegationToken() throws Exception {
|
||||
UserGroupInformation.createRemoteUser("JobTracker");
|
||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
KeyProviderCryptoExtension keyProvider = Mockito.mock(KeyProviderCryptoExtension.class,
|
||||
withSettings().extraInterfaces(
|
||||
DelegationTokenExtension.class,
|
||||
CryptoExtension.class));
|
||||
Mockito.when(keyProvider.getConf()).thenReturn(conf);
|
||||
byte[] testIdentifier = "Test identifier for delegation token".getBytes();
|
||||
|
||||
Token<?> testToken = new Token(testIdentifier, new byte[0],
|
||||
new Text(), new Text());
|
||||
Mockito.when(((DelegationTokenExtension)keyProvider).
|
||||
addDelegationTokens(anyString(), (Credentials)any())).
|
||||
thenReturn(new Token<?>[] { testToken });
|
||||
|
||||
dfs.getClient().provider = keyProvider;
|
||||
|
||||
Credentials creds = new Credentials();
|
||||
final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
|
||||
DistributedFileSystem.LOG.debug("Delegation tokens: " +
|
||||
Arrays.asList(tokens));
|
||||
Assert.assertEquals(2, tokens.length);
|
||||
Assert.assertEquals(tokens[1], testToken);
|
||||
Assert.assertEquals(1, creds.numberOfTokens());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,14 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.UnknownHostException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -39,6 +31,14 @@ import org.apache.hadoop.net.DNS;
|
|||
import org.apache.hadoop.test.PathUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.UnknownHostException;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* This test checks correctness of port usage by hdfs components:
|
||||
* NameNode, DataNode, SecondaryNamenode and BackupNode.
|
||||
|
@ -245,7 +245,7 @@ public class TestHDFSServerPorts {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testNameNodePorts() throws Exception {
|
||||
runTestNameNodePorts(false);
|
||||
runTestNameNodePorts(true);
|
||||
|
@ -296,7 +296,7 @@ public class TestHDFSServerPorts {
|
|||
/**
|
||||
* Verify datanode port usage.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testDataNodePorts() throws Exception {
|
||||
NameNode nn = null;
|
||||
try {
|
||||
|
@ -332,7 +332,7 @@ public class TestHDFSServerPorts {
|
|||
/**
|
||||
* Verify secondary namenode port usage.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testSecondaryNodePorts() throws Exception {
|
||||
NameNode nn = null;
|
||||
try {
|
||||
|
@ -361,7 +361,7 @@ public class TestHDFSServerPorts {
|
|||
/**
|
||||
* Verify BackupNode port usage.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testBackupNodePorts() throws Exception {
|
||||
NameNode nn = null;
|
||||
try {
|
||||
|
|
|
@ -424,6 +424,14 @@ public class TestDirectoryScanner {
|
|||
public String getStorageID() {
|
||||
return "";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reserveSpaceForRbw(long bytesToReserve) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void releaseReservedSpace(long bytesToRelease) {
|
||||
}
|
||||
}
|
||||
|
||||
private final static TestFsVolumeSpi TEST_VOLUME = new TestFsVolumeSpi();
|
||||
|
|
|
@ -0,0 +1,288 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
import org.apache.hadoop.fs.DU;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.*;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
/**
|
||||
* Ensure that the DN reserves disk space equivalent to a full block for
|
||||
* replica being written (RBW).
|
||||
*/
|
||||
public class TestRbwSpaceReservation {
|
||||
static final Log LOG = LogFactory.getLog(TestRbwSpaceReservation.class);
|
||||
|
||||
private static final short REPL_FACTOR = 1;
|
||||
private static final int DU_REFRESH_INTERVAL_MSEC = 500;
|
||||
private static final int STORAGES_PER_DATANODE = 1;
|
||||
private static final int BLOCK_SIZE = 1024 * 1024;
|
||||
private static final int SMALL_BLOCK_SIZE = 1024;
|
||||
|
||||
protected MiniDFSCluster cluster;
|
||||
private Configuration conf;
|
||||
private DistributedFileSystem fs = null;
|
||||
private DFSClient client = null;
|
||||
FsVolumeImpl singletonVolume = null;
|
||||
|
||||
private static Random rand = new Random();
|
||||
|
||||
private void initConfig(int blockSize) {
|
||||
conf = new HdfsConfiguration();
|
||||
|
||||
// Refresh disk usage information frequently.
|
||||
conf.setInt(FS_DU_INTERVAL_KEY, DU_REFRESH_INTERVAL_MSEC);
|
||||
conf.setLong(DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
|
||||
// Disable the scanner
|
||||
conf.setInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
|
||||
}
|
||||
|
||||
static {
|
||||
((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
private void startCluster(int blockSize, long perVolumeCapacity) throws IOException {
|
||||
initConfig(blockSize);
|
||||
|
||||
cluster = new MiniDFSCluster
|
||||
.Builder(conf)
|
||||
.storagesPerDatanode(STORAGES_PER_DATANODE)
|
||||
.numDataNodes(REPL_FACTOR)
|
||||
.build();
|
||||
fs = cluster.getFileSystem();
|
||||
client = fs.getClient();
|
||||
cluster.waitActive();
|
||||
|
||||
if (perVolumeCapacity >= 0) {
|
||||
List<? extends FsVolumeSpi> volumes =
|
||||
cluster.getDataNodes().get(0).getFSDataset().getVolumes();
|
||||
|
||||
assertThat(volumes.size(), is(1));
|
||||
singletonVolume = ((FsVolumeImpl) volumes.get(0));
|
||||
singletonVolume.setCapacityForTesting(perVolumeCapacity);
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void shutdownCluster() throws IOException {
|
||||
if (client != null) {
|
||||
client.close();
|
||||
client = null;
|
||||
}
|
||||
|
||||
if (fs != null) {
|
||||
fs.close();
|
||||
fs = null;
|
||||
}
|
||||
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
cluster = null;
|
||||
}
|
||||
}
|
||||
|
||||
private void createFileAndTestSpaceReservation(
|
||||
final String fileNamePrefix, final int fileBlockSize)
|
||||
throws IOException, InterruptedException {
|
||||
// Enough for 1 block + meta files + some delta.
|
||||
final long configuredCapacity = fileBlockSize * 2 - 1;
|
||||
startCluster(BLOCK_SIZE, configuredCapacity);
|
||||
FSDataOutputStream out = null;
|
||||
Path path = new Path("/" + fileNamePrefix + ".dat");
|
||||
|
||||
try {
|
||||
out = fs.create(path, false, 4096, (short) 1, fileBlockSize);
|
||||
|
||||
byte[] buffer = new byte[rand.nextInt(fileBlockSize / 4)];
|
||||
out.write(buffer);
|
||||
out.hsync();
|
||||
int bytesWritten = buffer.length;
|
||||
|
||||
// Check that space was reserved for a full block minus the bytesWritten.
|
||||
assertThat(singletonVolume.getReservedForRbw(),
|
||||
is((long) fileBlockSize - bytesWritten));
|
||||
out.close();
|
||||
out = null;
|
||||
|
||||
// Check that the reserved space has been released since we closed the
|
||||
// file.
|
||||
assertThat(singletonVolume.getReservedForRbw(), is(0L));
|
||||
|
||||
// Reopen the file for appends and write 1 more byte.
|
||||
out = fs.append(path);
|
||||
out.write(buffer);
|
||||
out.hsync();
|
||||
bytesWritten += buffer.length;
|
||||
|
||||
// Check that space was again reserved for a full block minus the
|
||||
// bytesWritten so far.
|
||||
assertThat(singletonVolume.getReservedForRbw(),
|
||||
is((long) fileBlockSize - bytesWritten));
|
||||
|
||||
// Write once again and again verify the available space. This ensures
|
||||
// that the reserved space is progressively adjusted to account for bytes
|
||||
// written to disk.
|
||||
out.write(buffer);
|
||||
out.hsync();
|
||||
bytesWritten += buffer.length;
|
||||
assertThat(singletonVolume.getReservedForRbw(),
|
||||
is((long) fileBlockSize - bytesWritten));
|
||||
} finally {
|
||||
if (out != null) {
|
||||
out.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test (timeout=300000)
|
||||
public void testWithDefaultBlockSize()
|
||||
throws IOException, InterruptedException {
|
||||
createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE);
|
||||
}
|
||||
|
||||
@Test (timeout=300000)
|
||||
public void testWithNonDefaultBlockSize()
|
||||
throws IOException, InterruptedException {
|
||||
// Same test as previous one, but with a non-default block size.
|
||||
createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE * 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stress test to ensure we are not leaking reserved space.
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
@Test (timeout=600000)
|
||||
public void stressTest() throws IOException, InterruptedException {
|
||||
final int numWriters = 5;
|
||||
startCluster(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE * numWriters * 10);
|
||||
Writer[] writers = new Writer[numWriters];
|
||||
|
||||
// Start a few writers and let them run for a while.
|
||||
for (int i = 0; i < numWriters; ++i) {
|
||||
writers[i] = new Writer(client, SMALL_BLOCK_SIZE);
|
||||
writers[i].start();
|
||||
}
|
||||
|
||||
Thread.sleep(60000);
|
||||
|
||||
// Stop the writers.
|
||||
for (Writer w : writers) {
|
||||
w.stopWriter();
|
||||
}
|
||||
int filesCreated = 0;
|
||||
int numFailures = 0;
|
||||
for (Writer w : writers) {
|
||||
w.join();
|
||||
filesCreated += w.getFilesCreated();
|
||||
numFailures += w.getNumFailures();
|
||||
}
|
||||
|
||||
LOG.info("Stress test created " + filesCreated +
|
||||
" files and hit " + numFailures + " failures");
|
||||
|
||||
// Check no space was leaked.
|
||||
assertThat(singletonVolume.getReservedForRbw(), is(0L));
|
||||
}
|
||||
|
||||
private static class Writer extends Daemon {
|
||||
private volatile boolean keepRunning;
|
||||
private final DFSClient localClient;
|
||||
private int filesCreated = 0;
|
||||
private int numFailures = 0;
|
||||
byte[] data;
|
||||
|
||||
Writer(DFSClient client, int blockSize) throws IOException {
|
||||
localClient = client;
|
||||
keepRunning = true;
|
||||
filesCreated = 0;
|
||||
numFailures = 0;
|
||||
|
||||
// At least some of the files should span a block boundary.
|
||||
data = new byte[blockSize * 2];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
/**
|
||||
* Create a file, write up to 3 blocks of data and close the file.
|
||||
* Do this in a loop until we are told to stop.
|
||||
*/
|
||||
while (keepRunning) {
|
||||
OutputStream os = null;
|
||||
try {
|
||||
String filename = "/file-" + rand.nextLong();
|
||||
os = localClient.create(filename, false);
|
||||
os.write(data, 0, rand.nextInt(data.length));
|
||||
IOUtils.closeQuietly(os);
|
||||
os = null;
|
||||
localClient.delete(filename, false);
|
||||
Thread.sleep(50); // Sleep for a bit to avoid killing the system.
|
||||
++filesCreated;
|
||||
} catch (IOException ioe) {
|
||||
// Just ignore the exception and keep going.
|
||||
++numFailures;
|
||||
} catch (InterruptedException ie) {
|
||||
return;
|
||||
} finally {
|
||||
if (os != null) {
|
||||
IOUtils.closeQuietly(os);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void stopWriter() {
|
||||
keepRunning = false;
|
||||
}
|
||||
|
||||
public int getFilesCreated() {
|
||||
return filesCreated;
|
||||
}
|
||||
|
||||
public int getNumFailures() {
|
||||
return numFailures;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -158,7 +158,7 @@ public class TestWriteToReplica {
|
|||
replicasMap.add(bpid, new ReplicaInPipeline(
|
||||
blocks[TEMPORARY].getBlockId(),
|
||||
blocks[TEMPORARY].getGenerationStamp(), vol,
|
||||
vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile()));
|
||||
vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile(), 0));
|
||||
|
||||
replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
|
||||
vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null);
|
||||
|
|
|
@ -223,7 +223,7 @@ public class NameNodeAdapter {
|
|||
* if safemode is not running.
|
||||
*/
|
||||
public static int getSafeModeSafeBlocks(NameNode nn) {
|
||||
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
|
||||
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfo();
|
||||
if (smi == null) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -17,11 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.BindException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
|
@ -33,6 +28,11 @@ import org.apache.hadoop.test.GenericTestUtils;
|
|||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.BindException;
|
||||
import java.util.Random;
|
||||
|
||||
/**
|
||||
* This class tests the validation of the configuration object when passed
|
||||
* to the NameNode
|
||||
|
@ -49,7 +49,7 @@ public class TestValidateConfigurationSettings {
|
|||
* an exception
|
||||
* is thrown when trying to re-use the same port
|
||||
*/
|
||||
@Test(expected = BindException.class)
|
||||
@Test(expected = BindException.class, timeout = 300000)
|
||||
public void testThatMatchingRPCandHttpPortsThrowException()
|
||||
throws IOException {
|
||||
|
||||
|
@ -79,7 +79,7 @@ public class TestValidateConfigurationSettings {
|
|||
* Tests setting the rpc port to a different as the web port that an
|
||||
* exception is NOT thrown
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testThatDifferentRPCandHttpPortsAreOK()
|
||||
throws IOException {
|
||||
|
||||
|
@ -117,7 +117,7 @@ public class TestValidateConfigurationSettings {
|
|||
* HDFS-3013: NameNode format command doesn't pick up
|
||||
* dfs.namenode.name.dir.NameServiceId configuration.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testGenericKeysForNameNodeFormat()
|
||||
throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
|
|
@ -17,27 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.core.Response;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -45,11 +25,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
|
|||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.*;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
||||
|
@ -75,7 +51,20 @@ import org.junit.Test;
|
|||
import org.mockito.internal.util.reflection.Whitebox;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.core.Response;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* Test case for client support of delegation tokens in an HA cluster.
|
||||
|
@ -128,8 +117,8 @@ public class TestDelegationTokensWithHA {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@Test(timeout = 300000)
|
||||
public void testDelegationTokenDFSApi() throws Exception {
|
||||
final Token<DelegationTokenIdentifier> token =
|
||||
getDelegationToken(fs, "JobTracker");
|
||||
|
@ -192,7 +181,7 @@ public class TestDelegationTokensWithHA {
|
|||
* Test if correct exception (StandbyException or RetriableException) can be
|
||||
* thrown during the NN failover.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testDelegationTokenDuringNNFailover() throws Exception {
|
||||
EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer();
|
||||
// stop the editLogTailer of nn1
|
||||
|
@ -260,7 +249,7 @@ public class TestDelegationTokensWithHA {
|
|||
doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testDelegationTokenWithDoAs() throws Exception {
|
||||
final Token<DelegationTokenIdentifier> token =
|
||||
getDelegationToken(fs, "JobTracker");
|
||||
|
@ -291,8 +280,8 @@ public class TestDelegationTokensWithHA {
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@Test(timeout = 300000)
|
||||
public void testHAUtilClonesDelegationTokens() throws Exception {
|
||||
final Token<DelegationTokenIdentifier> token =
|
||||
getDelegationToken(fs, "JobTracker");
|
||||
|
@ -354,7 +343,7 @@ public class TestDelegationTokensWithHA {
|
|||
* exception if the URI is a logical URI. This bug fails the combination of
|
||||
* ha + mapred + security.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testDFSGetCanonicalServiceName() throws Exception {
|
||||
URI hAUri = HATestUtil.getLogicalUri(cluster);
|
||||
String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
|
||||
|
@ -368,8 +357,8 @@ public class TestDelegationTokensWithHA {
|
|||
token.renew(dfs.getConf());
|
||||
token.cancel(dfs.getConf());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@Test(timeout = 300000)
|
||||
public void testHdfsGetCanonicalServiceName() throws Exception {
|
||||
Configuration conf = dfs.getConf();
|
||||
URI haUri = HATestUtil.getLogicalUri(cluster);
|
||||
|
@ -390,7 +379,7 @@ public class TestDelegationTokensWithHA {
|
|||
* password. (HDFS-6475). With StandbyException, the client can failover to try
|
||||
* activeNN.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testDelegationTokenStandbyNNAppearFirst() throws Exception {
|
||||
// make nn0 the standby NN, and nn1 the active NN
|
||||
cluster.transitionToStandby(0);
|
||||
|
|
|
@ -17,9 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -33,14 +30,17 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Make sure HA-related metrics are updated and reported appropriately.
|
||||
*/
|
||||
public class TestHAMetrics {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TestHAMetrics.class);
|
||||
|
||||
@Test
|
||||
|
||||
@Test(timeout = 300000)
|
||||
public void testHAMetrics() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||
|
|
|
@ -17,20 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import com.google.common.util.concurrent.Uninterruptibles;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
|
@ -40,13 +27,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.*;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
|
@ -66,7 +47,16 @@ import org.junit.Assert;
|
|||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import com.google.common.util.concurrent.Uninterruptibles;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* Tests state transition from active->standby, and manual failover
|
||||
|
@ -92,7 +82,7 @@ public class TestHAStateTransitions {
|
|||
* active and standby mode, making sure it doesn't
|
||||
* double-play any edits.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testTransitionActiveToStandby() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
|
@ -148,7 +138,7 @@ public class TestHAStateTransitions {
|
|||
* Test that transitioning a service to the state that it is already
|
||||
* in is a nop, specifically, an exception is not thrown.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testTransitionToCurrentStateIsANop() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1L);
|
||||
|
@ -220,7 +210,7 @@ public class TestHAStateTransitions {
|
|||
/**
|
||||
* Tests manual failover back and forth between two NameNodes.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testManualFailoverAndFailback() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
|
@ -346,7 +336,7 @@ public class TestHAStateTransitions {
|
|||
/**
|
||||
* Test that delegation tokens continue to work after the failover.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testDelegationTokensAfterFailover() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean(
|
||||
|
@ -383,7 +373,7 @@ public class TestHAStateTransitions {
|
|||
* Tests manual failover back and forth between two NameNodes
|
||||
* for federation cluster with two namespaces.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testManualFailoverFailbackFederationHA() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
|
@ -403,12 +393,12 @@ public class TestHAStateTransitions {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testFailoverWithEmptyInProgressEditLog() throws Exception {
|
||||
testFailoverAfterCrashDuringLogRoll(false);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@Test(timeout = 300000)
|
||||
public void testFailoverWithEmptyInProgressEditLogWithHeader()
|
||||
throws Exception {
|
||||
testFailoverAfterCrashDuringLogRoll(true);
|
||||
|
@ -570,7 +560,7 @@ public class TestHAStateTransitions {
|
|||
* by virtue of the fact that it wouldn't work properly if the proxies
|
||||
* returned were not for the correct NNs.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testIsAtLeastOneActive() throws Exception {
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
|
||||
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||
|
|
|
@ -17,23 +17,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.BindException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadInfo;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -43,14 +31,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.JournalSet;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.*;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.hdfs.util.Canceler;
|
||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||
import org.apache.hadoop.io.compress.CompressionOutputStream;
|
||||
|
@ -64,11 +46,19 @@ import org.junit.Before;
|
|||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.io.Files;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadInfo;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
import java.net.BindException;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
public class TestStandbyCheckpoints {
|
||||
private static final int NUM_DIRS_IN_LOG = 200000;
|
||||
|
@ -143,7 +133,7 @@ public class TestStandbyCheckpoints {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testSBNCheckpoints() throws Exception {
|
||||
JournalSet standbyJournalSet = NameNodeAdapter.spyOnJournalSet(nn1);
|
||||
|
||||
|
@ -185,7 +175,7 @@ public class TestStandbyCheckpoints {
|
|||
* checkpoint for the given txid, but this should not cause
|
||||
* an abort, etc.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testBothNodesInStandbyState() throws Exception {
|
||||
doEdits(0, 10);
|
||||
|
||||
|
@ -216,7 +206,7 @@ public class TestStandbyCheckpoints {
|
|||
* same txid, which is a no-op. This test makes sure this doesn't
|
||||
* cause any problem.
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 300000)
|
||||
public void testCheckpointWhenNoNewTransactionsHappened()
|
||||
throws Exception {
|
||||
// Checkpoint as fast as we can, in a tight loop.
|
||||
|
|
|
@ -90,7 +90,7 @@ public class TestTools {
|
|||
fail("testDFSAdminHelp error" + e);
|
||||
}
|
||||
|
||||
String pattern = "Usage: java DFSAdmin";
|
||||
String pattern = "Usage: hdfs dfsadmin";
|
||||
checkOutput(new String[] { "-cancel", "-renew" }, pattern, System.err,
|
||||
DFSAdmin.class);
|
||||
}
|
||||
|
|
|
@ -181,6 +181,9 @@ Release 2.6.0 - UNRELEASED
|
|||
YARN-2511. Allowed all origins by default when CrossOriginFilter is
|
||||
enabled. (Jonathan Eagles via zjshen)
|
||||
|
||||
YARN-2508. Cross Origin configuration parameters prefix are not honored
|
||||
(Mit Desai via jeagles)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -284,6 +287,9 @@ Release 2.6.0 - UNRELEASED
|
|||
YARN-2431. NM restart: cgroup is not removed for reacquired containers
|
||||
(jlowe)
|
||||
|
||||
YARN-2519. Credential Provider related unit tests failed on Windows.
|
||||
(Xiaoyu Yao via cnauroth)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals;
|
|||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
import org.apache.hadoop.http.HttpServer2.Builder;
|
||||
import org.apache.hadoop.security.alias.CredentialProvider;
|
||||
|
@ -74,8 +75,9 @@ public class TestWebAppUtils {
|
|||
"target/test-dir"));
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
final Path jksPath = new Path(testDir.toString(), "test.jks");
|
||||
final String ourUrl =
|
||||
JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks";
|
||||
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
|
||||
|
||||
File file = new File(testDir, "test.jks");
|
||||
file.delete();
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.yarn.server.timeline.webapp;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -37,6 +38,15 @@ public class CrossOriginFilterInitializer extends FilterInitializer {
|
|||
}
|
||||
|
||||
static Map<String, String> getFilterParameters(Configuration conf) {
|
||||
return conf.getValByRegex(PREFIX);
|
||||
Map<String, String> filterParams =
|
||||
new HashMap<String, String>();
|
||||
for (Map.Entry<String, String> entry : conf.getValByRegex(PREFIX)
|
||||
.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
String value = entry.getValue();
|
||||
name = name.substring(PREFIX.length());
|
||||
filterParams.put(name, value);
|
||||
}
|
||||
return filterParams;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,11 +42,8 @@ public class TestCrossOriginFilterInitializer {
|
|||
CrossOriginFilterInitializer.getFilterParameters(conf);
|
||||
|
||||
// retrieve values
|
||||
String rootvalue =
|
||||
filterParameters.get(CrossOriginFilterInitializer.PREFIX + "rootparam");
|
||||
String nestedvalue =
|
||||
filterParameters.get(CrossOriginFilterInitializer.PREFIX
|
||||
+ "nested.param");
|
||||
String rootvalue = filterParameters.get("rootparam");
|
||||
String nestedvalue = filterParameters.get("nested.param");
|
||||
String outofscopeparam = filterParameters.get("outofscopeparam");
|
||||
|
||||
// verify expected values are in place
|
||||
|
|
Loading…
Reference in New Issue