Merge branch 'trunk' into HDFS-7240
This commit is contained in:
commit
3231ead84b
@ -5,7 +5,7 @@ Requirements:
|
||||
|
||||
* Unix System
|
||||
* JDK 1.8+
|
||||
* Maven 3.0 or later
|
||||
* Maven 3.3 or later
|
||||
* Findbugs 1.3.9 (if running findbugs)
|
||||
* ProtocolBuffer 2.5.0
|
||||
* CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
|
||||
|
@ -631,9 +631,9 @@ Azure Data Lake Store - Java client SDK 2.0.11
|
||||
JCodings 1.0.8
|
||||
Joni 2.1.2
|
||||
Mockito 1.8.5
|
||||
JUL to SLF4J bridge 1.7.10
|
||||
SLF4J API Module 1.7.10
|
||||
SLF4J LOG4J-12 Binding 1.7.10
|
||||
JUL to SLF4J bridge 1.7.25
|
||||
SLF4J API Module 1.7.25
|
||||
SLF4J LOG4J-12 Binding 1.7.25
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The MIT License (MIT)
|
||||
@ -736,7 +736,7 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3-LICENSE
|
||||
|
||||
The binary distribution of this product bundles these dependencies under the
|
||||
following license:
|
||||
HSQLDB Database 2.0.0
|
||||
HSQLDB Database 2.3.4
|
||||
--------------------------------------------------------------------------------
|
||||
(HSQL License)
|
||||
"COPYRIGHTS AND LICENSES (based on BSD License)
|
||||
@ -1711,7 +1711,6 @@ Hamcrest Core 1.3
|
||||
ASM Core 5.0.4
|
||||
ASM Commons 5.0.2
|
||||
ASM Tree 5.0.2
|
||||
xmlenc Library 0.52
|
||||
--------------------------------------------------------------------------------
|
||||
(3-clause BSD)
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -14,6 +14,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# you must be this high to ride the ride
|
||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then
|
||||
echo "bash v3.2+ is required. Sorry."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -o pipefail
|
||||
|
||||
## @description Print a message to stderr
|
||||
@ -39,6 +47,7 @@ function yetus_abs
|
||||
declare obj=$1
|
||||
declare dir
|
||||
declare fn
|
||||
declare dirret
|
||||
|
||||
if [[ ! -e ${obj} ]]; then
|
||||
return 1
|
||||
@ -51,7 +60,8 @@ function yetus_abs
|
||||
fi
|
||||
|
||||
dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
|
||||
if [[ $? = 0 ]]; then
|
||||
dirret=$?
|
||||
if [[ ${dirret} = 0 ]]; then
|
||||
echo "${dir}${fn}"
|
||||
return 0
|
||||
fi
|
||||
@ -63,7 +73,7 @@ WANTED="$1"
|
||||
shift
|
||||
ARGV=("$@")
|
||||
|
||||
HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.3.0}
|
||||
HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.4.0}
|
||||
BIN=$(yetus_abs "${BASH_SOURCE-$0}")
|
||||
BINDIR=$(dirname "${BIN}")
|
||||
|
||||
@ -85,7 +95,8 @@ if [[ ! -d "${HADOOP_PATCHPROCESS}" ]]; then
|
||||
fi
|
||||
|
||||
mytmpdir=$(yetus_abs "${HADOOP_PATCHPROCESS}")
|
||||
if [[ $? != 0 ]]; then
|
||||
ret=$?
|
||||
if [[ ${ret} != 0 ]]; then
|
||||
yetus_error "yetus-dl: Unable to cwd to ${HADOOP_PATCHPROCESS}"
|
||||
exit 1
|
||||
fi
|
||||
@ -108,15 +119,13 @@ TARBALL="yetus-${HADOOP_YETUS_VERSION}-bin.tar"
|
||||
GPGBIN=$(command -v gpg)
|
||||
CURLBIN=$(command -v curl)
|
||||
|
||||
pushd "${HADOOP_PATCHPROCESS}" >/dev/null
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! pushd "${HADOOP_PATCHPROCESS}" >/dev/null; then
|
||||
yetus_error "ERROR: yetus-dl: Cannot pushd to ${HADOOP_PATCHPROCESS}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "${CURLBIN}" ]]; then
|
||||
"${CURLBIN}" -f -s -L -O "${BASEURL}/${TARBALL}.gz"
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! "${CURLBIN}" -f -s -L -O "${BASEURL}/${TARBALL}.gz"; then
|
||||
yetus_error "ERROR: yetus-dl: unable to download ${BASEURL}/${TARBALL}.gz"
|
||||
exit 1
|
||||
fi
|
||||
@ -126,40 +135,33 @@ else
|
||||
fi
|
||||
|
||||
if [[ -n "${GPGBIN}" ]]; then
|
||||
mkdir -p .gpg
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! mkdir -p .gpg; then
|
||||
yetus_error "ERROR: yetus-dl: Unable to create ${HADOOP_PATCHPROCESS}/.gpg"
|
||||
exit 1
|
||||
fi
|
||||
chmod -R 700 .gpg
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! chmod -R 700 .gpg; then
|
||||
yetus_error "ERROR: yetus-dl: Unable to chmod ${HADOOP_PATCHPROCESS}/.gpg"
|
||||
exit 1
|
||||
fi
|
||||
"${CURLBIN}" -s -L -o KEYS_YETUS https://dist.apache.org/repos/dist/release/yetus/KEYS
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! "${CURLBIN}" -s -L -o KEYS_YETUS https://dist.apache.org/repos/dist/release/yetus/KEYS; then
|
||||
yetus_error "ERROR: yetus-dl: unable to fetch https://dist.apache.org/repos/dist/release/yetus/KEYS"
|
||||
exit 1
|
||||
fi
|
||||
"${CURLBIN}" -s -L -O "${BASEURL}/${TARBALL}.gz.asc"
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! "${CURLBIN}" -s -L -O "${BASEURL}/${TARBALL}.gz.asc"; then
|
||||
yetus_error "ERROR: yetus-dl: unable to fetch ${BASEURL}/${TARBALL}.gz.asc"
|
||||
exit 1
|
||||
fi
|
||||
"${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --import "${HADOOP_PATCHPROCESS}/KEYS_YETUS" >/dev/null 2>&1
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! "${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --import "${HADOOP_PATCHPROCESS}/KEYS_YETUS" >/dev/null 2>&1; then
|
||||
yetus_error "ERROR: yetus-dl: gpg unable to import ${HADOOP_PATCHPROCESS}/KEYS_YETUS"
|
||||
exit 1
|
||||
fi
|
||||
"${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --verify "${TARBALL}.gz.asc" >/dev/null 2>&1
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! "${GPGBIN}" --homedir "${HADOOP_PATCHPROCESS}/.gpg" --verify "${TARBALL}.gz.asc" >/dev/null 2>&1; then
|
||||
yetus_error "ERROR: yetus-dl: gpg verify of tarball in ${HADOOP_PATCHPROCESS} failed"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
gunzip -c "${TARBALL}.gz" | tar xpf -
|
||||
if [[ $? != 0 ]]; then
|
||||
if ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
|
||||
yetus_error "ERROR: ${TARBALL}.gz is corrupt. Investigate and then remove ${HADOOP_PATCHPROCESS} to try again."
|
||||
exit 1
|
||||
fi
|
||||
|
@ -81,13 +81,17 @@ RUN apt-get -q install --no-install-recommends -y oracle-java8-installer
|
||||
####
|
||||
# Apps that require Java
|
||||
###
|
||||
RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
|
||||
ant \
|
||||
maven
|
||||
RUN apt-get -q update && apt-get -q install --no-install-recommends -y ant
|
||||
|
||||
# Fixing the Apache commons / Maven dependency problem under Ubuntu:
|
||||
# See http://wiki.apache.org/commons/VfsProblems
|
||||
RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
|
||||
######
|
||||
# Install Apache Maven
|
||||
######
|
||||
RUN mkdir -p /opt/maven && \
|
||||
curl -L -s -S \
|
||||
http://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \
|
||||
-o /opt/maven.tar.gz && \
|
||||
tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
|
||||
ENV MAVEN_HOME /opt/maven
|
||||
|
||||
######
|
||||
# Install findbugs
|
||||
|
@ -145,6 +145,11 @@
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs</artifactId>
|
||||
|
@ -218,10 +218,6 @@
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>xmlenc</groupId>
|
||||
<artifactId>xmlenc</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<!-- Add optional runtime dependency on the in-development timeline server module
|
||||
|
@ -15,8 +15,9 @@
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
@ -32,7 +33,7 @@ public class RandomSignerSecretProvider extends RolloverSignerSecretProvider {
|
||||
|
||||
public RandomSignerSecretProvider() {
|
||||
super();
|
||||
rand = new Random();
|
||||
rand = new SecureRandom();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -48,6 +49,8 @@ public RandomSignerSecretProvider(long seed) {
|
||||
|
||||
@Override
|
||||
protected byte[] generateNewSecret() {
|
||||
return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
|
||||
byte[] secret = new byte[32]; // 32 bytes = 256 bits
|
||||
rand.nextBytes(secret);
|
||||
return secret;
|
||||
}
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.Charset;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
@ -149,7 +150,7 @@ public class ZKSignerSecretProvider extends RolloverSignerSecretProvider {
|
||||
|
||||
public ZKSignerSecretProvider() {
|
||||
super();
|
||||
rand = new Random();
|
||||
rand = new SecureRandom();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -342,8 +343,11 @@ private synchronized void pullFromZK(boolean isInit) {
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] generateRandomSecret() {
|
||||
return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
|
||||
@VisibleForTesting
|
||||
protected byte[] generateRandomSecret() {
|
||||
byte[] secret = new byte[32]; // 32 bytes = 256 bits
|
||||
rand.nextBytes(secret);
|
||||
return secret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -14,22 +14,37 @@
|
||||
package org.apache.hadoop.security.authentication.util;
|
||||
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.mockito.Mockito.spy;
|
||||
import static org.mockito.Mockito.timeout;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
public class TestRandomSignerSecretProvider {
|
||||
|
||||
// rollover every 50 msec
|
||||
private final int timeout = 100;
|
||||
private final long rolloverFrequency = timeout / 2;
|
||||
|
||||
{
|
||||
LogManager.getLogger(
|
||||
RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetAndRollSecrets() throws Exception {
|
||||
long rolloverFrequency = 15 * 1000; // rollover every 15 sec
|
||||
// use the same seed so we can predict the RNG
|
||||
// Use the same seed and a "plain" Random so we can predict the RNG
|
||||
long seed = System.currentTimeMillis();
|
||||
Random rand = new Random(seed);
|
||||
byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
|
||||
RandomSignerSecretProvider secretProvider =
|
||||
new RandomSignerSecretProvider(seed);
|
||||
byte[] secret1 = generateNewSecret(rand);
|
||||
byte[] secret2 = generateNewSecret(rand);
|
||||
byte[] secret3 = generateNewSecret(rand);
|
||||
MockRandomSignerSecretProvider secretProvider =
|
||||
spy(new MockRandomSignerSecretProvider(seed));
|
||||
try {
|
||||
secretProvider.init(null, null, rolloverFrequency);
|
||||
|
||||
@ -39,7 +54,8 @@ public void testGetAndRollSecrets() throws Exception {
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret1, allSecrets[0]);
|
||||
Assert.assertNull(allSecrets[1]);
|
||||
Thread.sleep(rolloverFrequency + 2000);
|
||||
verify(secretProvider, timeout(timeout).atLeastOnce()).rollSecret();
|
||||
secretProvider.realRollSecret();
|
||||
|
||||
currentSecret = secretProvider.getCurrentSecret();
|
||||
allSecrets = secretProvider.getAllSecrets();
|
||||
@ -47,7 +63,8 @@ public void testGetAndRollSecrets() throws Exception {
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret2, allSecrets[0]);
|
||||
Assert.assertArrayEquals(secret1, allSecrets[1]);
|
||||
Thread.sleep(rolloverFrequency + 2000);
|
||||
verify(secretProvider, timeout(timeout).atLeast(2)).rollSecret();
|
||||
secretProvider.realRollSecret();
|
||||
|
||||
currentSecret = secretProvider.getCurrentSecret();
|
||||
allSecrets = secretProvider.getAllSecrets();
|
||||
@ -55,9 +72,40 @@ public void testGetAndRollSecrets() throws Exception {
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret3, allSecrets[0]);
|
||||
Assert.assertArrayEquals(secret2, allSecrets[1]);
|
||||
Thread.sleep(rolloverFrequency + 2000);
|
||||
verify(secretProvider, timeout(timeout).atLeast(3)).rollSecret();
|
||||
secretProvider.realRollSecret();
|
||||
} finally {
|
||||
secretProvider.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A hack to test RandomSignerSecretProvider.
|
||||
* We want to test that RandomSignerSecretProvider.rollSecret() is
|
||||
* periodically called at the expected frequency, but we want to exclude the
|
||||
* race-condition and not take a long time to run the test.
|
||||
*/
|
||||
private class MockRandomSignerSecretProvider
|
||||
extends RandomSignerSecretProvider {
|
||||
MockRandomSignerSecretProvider(long seed) {
|
||||
super(seed);
|
||||
}
|
||||
@Override
|
||||
protected synchronized void rollSecret() {
|
||||
// this is a no-op: simply used for Mockito to verify that rollSecret()
|
||||
// is periodically called at the expected frequency
|
||||
}
|
||||
|
||||
public void realRollSecret() {
|
||||
// the test code manually calls RandomSignerSecretProvider.rollSecret()
|
||||
// to update the state
|
||||
super.rollSecret();
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] generateNewSecret(Random rand) {
|
||||
byte[] secret = new byte[32];
|
||||
rand.nextBytes(secret);
|
||||
return secret;
|
||||
}
|
||||
}
|
||||
|
@ -13,13 +13,11 @@
|
||||
*/
|
||||
package org.apache.hadoop.security.authentication.util;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.Properties;
|
||||
import java.util.Random;
|
||||
import javax.servlet.ServletContext;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.curator.test.TestingServer;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.LogManager;
|
||||
@ -37,13 +35,13 @@ public class TestZKSignerSecretProvider {
|
||||
|
||||
private TestingServer zkServer;
|
||||
|
||||
// rollover every 2 sec
|
||||
// rollover every 50 msec
|
||||
private final int timeout = 100;
|
||||
private final long rolloverFrequency = timeout / 2;
|
||||
|
||||
static final Log LOG = LogFactory.getLog(TestZKSignerSecretProvider.class);
|
||||
{
|
||||
LogManager.getLogger( RolloverSignerSecretProvider.LOG.getName() ).setLevel(Level.DEBUG);
|
||||
LogManager.getLogger(
|
||||
RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
|
||||
}
|
||||
|
||||
@Before
|
||||
@ -63,12 +61,12 @@ public void teardown() throws Exception {
|
||||
// Test just one ZKSignerSecretProvider to verify that it works in the
|
||||
// simplest case
|
||||
public void testOne() throws Exception {
|
||||
// use the same seed so we can predict the RNG
|
||||
// Use the same seed and a "plain" Random so we can predict the RNG
|
||||
long seed = System.currentTimeMillis();
|
||||
Random rand = new Random(seed);
|
||||
byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secret2 = generateNewSecret(rand);
|
||||
byte[] secret1 = generateNewSecret(rand);
|
||||
byte[] secret3 = generateNewSecret(rand);
|
||||
MockZKSignerSecretProvider secretProvider =
|
||||
spy(new MockZKSignerSecretProvider(seed));
|
||||
Properties config = new Properties();
|
||||
@ -115,7 +113,7 @@ public void testOne() throws Exception {
|
||||
* A hack to test ZKSignerSecretProvider.
|
||||
* We want to test that ZKSignerSecretProvider.rollSecret() is periodically
|
||||
* called at the expected frequency, but we want to exclude the
|
||||
* race-condition.
|
||||
* race-condition and not take a long time to run the test.
|
||||
*/
|
||||
private class MockZKSignerSecretProvider extends ZKSignerSecretProvider {
|
||||
MockZKSignerSecretProvider(long seed) {
|
||||
@ -134,6 +132,116 @@ public void realRollSecret() {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
// HADOOP-14246 increased the length of the secret from 160 bits to 256 bits.
|
||||
// This test verifies that the upgrade goes smoothly.
|
||||
public void testUpgradeChangeSecretLength() throws Exception {
|
||||
// Use the same seed and a "plain" Random so we can predict the RNG
|
||||
long seed = System.currentTimeMillis();
|
||||
Random rand = new Random(seed);
|
||||
byte[] secret2 = Long.toString(rand.nextLong())
|
||||
.getBytes(Charset.forName("UTF-8"));
|
||||
byte[] secret1 = Long.toString(rand.nextLong())
|
||||
.getBytes(Charset.forName("UTF-8"));
|
||||
byte[] secret3 = Long.toString(rand.nextLong())
|
||||
.getBytes(Charset.forName("UTF-8"));
|
||||
rand = new Random(seed);
|
||||
// Secrets 4 and 5 get thrown away by ZK when the new secret provider tries
|
||||
// to init
|
||||
byte[] secret4 = generateNewSecret(rand);
|
||||
byte[] secret5 = generateNewSecret(rand);
|
||||
byte[] secret6 = generateNewSecret(rand);
|
||||
byte[] secret7 = generateNewSecret(rand);
|
||||
// Initialize the znode data with the old secret length
|
||||
MockZKSignerSecretProvider oldSecretProvider =
|
||||
spy(new OldMockZKSignerSecretProvider(seed));
|
||||
Properties config = new Properties();
|
||||
config.setProperty(
|
||||
ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
|
||||
zkServer.getConnectString());
|
||||
config.setProperty(ZKSignerSecretProvider.ZOOKEEPER_PATH,
|
||||
"/secret");
|
||||
try {
|
||||
oldSecretProvider.init(config, getDummyServletContext(),
|
||||
rolloverFrequency);
|
||||
|
||||
byte[] currentSecret = oldSecretProvider.getCurrentSecret();
|
||||
byte[][] allSecrets = oldSecretProvider.getAllSecrets();
|
||||
Assert.assertArrayEquals(secret1, currentSecret);
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret1, allSecrets[0]);
|
||||
Assert.assertNull(allSecrets[1]);
|
||||
oldSecretProvider.realRollSecret();
|
||||
|
||||
currentSecret = oldSecretProvider.getCurrentSecret();
|
||||
allSecrets = oldSecretProvider.getAllSecrets();
|
||||
Assert.assertArrayEquals(secret2, currentSecret);
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret2, allSecrets[0]);
|
||||
Assert.assertArrayEquals(secret1, allSecrets[1]);
|
||||
} finally {
|
||||
oldSecretProvider.destroy();
|
||||
}
|
||||
// Now use a ZKSignerSecretProvider with the newer length
|
||||
MockZKSignerSecretProvider newSecretProvider =
|
||||
spy(new MockZKSignerSecretProvider(seed));
|
||||
try {
|
||||
newSecretProvider.init(config, getDummyServletContext(),
|
||||
rolloverFrequency);
|
||||
|
||||
byte[] currentSecret = newSecretProvider.getCurrentSecret();
|
||||
byte[][] allSecrets = newSecretProvider.getAllSecrets();
|
||||
Assert.assertArrayEquals(secret2, currentSecret);
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret2, allSecrets[0]);
|
||||
Assert.assertArrayEquals(secret1, allSecrets[1]);
|
||||
newSecretProvider.realRollSecret();
|
||||
|
||||
currentSecret = newSecretProvider.getCurrentSecret();
|
||||
allSecrets = newSecretProvider.getAllSecrets();
|
||||
Assert.assertArrayEquals(secret3, currentSecret);
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret3, allSecrets[0]);
|
||||
Assert.assertArrayEquals(secret2, allSecrets[1]);
|
||||
newSecretProvider.realRollSecret();
|
||||
|
||||
currentSecret = newSecretProvider.getCurrentSecret();
|
||||
allSecrets = newSecretProvider.getAllSecrets();
|
||||
Assert.assertArrayEquals(secret6, currentSecret);
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret6, allSecrets[0]);
|
||||
Assert.assertArrayEquals(secret3, allSecrets[1]);
|
||||
newSecretProvider.realRollSecret();
|
||||
|
||||
currentSecret = newSecretProvider.getCurrentSecret();
|
||||
allSecrets = newSecretProvider.getAllSecrets();
|
||||
Assert.assertArrayEquals(secret7, currentSecret);
|
||||
Assert.assertEquals(2, allSecrets.length);
|
||||
Assert.assertArrayEquals(secret7, allSecrets[0]);
|
||||
Assert.assertArrayEquals(secret6, allSecrets[1]);
|
||||
} finally {
|
||||
newSecretProvider.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A version of {@link MockZKSignerSecretProvider} that uses the old way of
|
||||
* generating secrets (160 bit long).
|
||||
*/
|
||||
private class OldMockZKSignerSecretProvider
|
||||
extends MockZKSignerSecretProvider {
|
||||
private Random rand;
|
||||
OldMockZKSignerSecretProvider(long seed) {
|
||||
super(seed);
|
||||
rand = new Random(seed);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected byte[] generateRandomSecret() {
|
||||
return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultiple1() throws Exception {
|
||||
testMultiple(1);
|
||||
@ -151,19 +259,19 @@ public void testMultiple2() throws Exception {
|
||||
* @throws Exception
|
||||
*/
|
||||
public void testMultiple(int order) throws Exception {
|
||||
// Use the same seed and a "plain" Random so we can predict the RNG
|
||||
long seedA = System.currentTimeMillis();
|
||||
Random rand = new Random(seedA);
|
||||
byte[] secretA2 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secretA1 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secretA3 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secretA4 = Long.toString(rand.nextLong()).getBytes();
|
||||
// use the same seed so we can predict the RNG
|
||||
byte[] secretA2 = generateNewSecret(rand);
|
||||
byte[] secretA1 = generateNewSecret(rand);
|
||||
byte[] secretA3 = generateNewSecret(rand);
|
||||
byte[] secretA4 = generateNewSecret(rand);
|
||||
long seedB = System.currentTimeMillis() + rand.nextLong();
|
||||
rand = new Random(seedB);
|
||||
byte[] secretB2 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secretB1 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secretB3 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secretB4 = Long.toString(rand.nextLong()).getBytes();
|
||||
byte[] secretB2 = generateNewSecret(rand);
|
||||
byte[] secretB1 = generateNewSecret(rand);
|
||||
byte[] secretB3 = generateNewSecret(rand);
|
||||
byte[] secretB4 = generateNewSecret(rand);
|
||||
MockZKSignerSecretProvider secretProviderA =
|
||||
spy(new MockZKSignerSecretProvider(seedA));
|
||||
MockZKSignerSecretProvider secretProviderB =
|
||||
@ -258,4 +366,10 @@ private ServletContext getDummyServletContext() {
|
||||
.thenReturn(null);
|
||||
return servletContext;
|
||||
}
|
||||
|
||||
private byte[] generateNewSecret(Random rand) {
|
||||
byte[] secret = new byte[32];
|
||||
rand.nextBytes(secret);
|
||||
return secret;
|
||||
}
|
||||
}
|
||||
|
@ -60,11 +60,6 @@
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>xmlenc</groupId>
|
||||
<artifactId>xmlenc</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpclient</artifactId>
|
||||
|
@ -67,13 +67,11 @@ function hadoopcmd_case
|
||||
hadoop_error ""
|
||||
#try to locate hdfs and if present, delegate to it.
|
||||
if [[ -f "${HADOOP_HDFS_HOME}/bin/hdfs" ]]; then
|
||||
# shellcheck disable=SC2086
|
||||
exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||
--config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
|
||||
--config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
|
||||
elif [[ -f "${HADOOP_HOME}/bin/hdfs" ]]; then
|
||||
# shellcheck disable=SC2086
|
||||
exec "${HADOOP_HOME}/bin/hdfs" \
|
||||
--config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
|
||||
--config "${HADOOP_CONF_DIR}" "${subcmd}" "$@"
|
||||
else
|
||||
hadoop_error "HADOOP_HDFS_HOME not found!"
|
||||
exit 1
|
||||
@ -174,9 +172,9 @@ else
|
||||
fi
|
||||
|
||||
HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_NEW_CONFIG=true
|
||||
if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
||||
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
|
||||
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
||||
else
|
||||
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
|
||||
@ -201,7 +199,7 @@ if hadoop_need_reexec hadoop "${HADOOP_SUBCMD}"; then
|
||||
exit $?
|
||||
fi
|
||||
|
||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
HADOOP_SUBCMD_ARGS=("$@")
|
||||
|
||||
@ -221,60 +219,5 @@ fi
|
||||
|
||||
hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
|
||||
|
||||
hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
hadoop_verify_secure_prereq
|
||||
hadoop_setup_secure_service
|
||||
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
|
||||
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
else
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
fi
|
||||
|
||||
if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
|
||||
else
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
|
||||
fi
|
||||
fi
|
||||
|
||||
hadoop_finalize
|
||||
|
||||
if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
hadoop_secure_daemon_handler \
|
||||
"${HADOOP_DAEMON_MODE}" \
|
||||
"${HADOOP_SUBCMD}" \
|
||||
"${HADOOP_CLASSNAME}" \
|
||||
"${daemon_pidfile}" \
|
||||
"${daemon_outfile}" \
|
||||
"${priv_pidfile}" \
|
||||
"${priv_outfile}" \
|
||||
"${priv_errfile}" \
|
||||
"${HADOOP_SUBCMD_ARGS[@]}"
|
||||
else
|
||||
hadoop_daemon_handler \
|
||||
"${HADOOP_DAEMON_MODE}" \
|
||||
"${HADOOP_SUBCMD}" \
|
||||
"${HADOOP_CLASSNAME}" \
|
||||
"${daemon_pidfile}" \
|
||||
"${daemon_outfile}" \
|
||||
"${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
exit $?
|
||||
else
|
||||
# shellcheck disable=SC2086
|
||||
hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
# everything is in globals at this point, so call the generic handler
|
||||
hadoop_generic_java_subcmd_handler
|
||||
|
@ -38,8 +38,10 @@
|
||||
# settings that might be different between daemons & interactive
|
||||
|
||||
# you must be this high to ride the ride
|
||||
if [[ -z "${BASH_VERSINFO}" ]] || [[ "${BASH_VERSINFO}" -lt 3 ]]; then
|
||||
echo "Hadoop requires bash v3 or better. Sorry."
|
||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then
|
||||
echo "bash v3.2+ is required. Sorry."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -55,8 +57,10 @@ fi
|
||||
# get our functions defined for usage later
|
||||
if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
||||
[[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then
|
||||
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
|
||||
. "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh"
|
||||
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
|
||||
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
|
||||
. "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
|
||||
else
|
||||
echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
|
||||
@ -68,8 +72,10 @@ hadoop_deprecate_envvar HADOOP_PREFIX HADOOP_HOME
|
||||
# allow overrides of the above and pre-defines of the below
|
||||
if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
||||
[[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
|
||||
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
|
||||
. "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh"
|
||||
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
|
||||
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
|
||||
. "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
|
||||
fi
|
||||
|
||||
|
@ -115,9 +115,9 @@ function hadoop_verify_entry
|
||||
[[ ${!1} =~ \ ${2}\ ]]
|
||||
}
|
||||
|
||||
## @description Check if we are running with privilege
|
||||
## @description Check if we are running with priv
|
||||
## @description by default, this implementation looks for
|
||||
## @description EUID=0. For OSes that have true privilege
|
||||
## @description EUID=0. For OSes that have true priv
|
||||
## @description separation, this should be something more complex
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
@ -144,16 +144,13 @@ function hadoop_su
|
||||
{
|
||||
declare user=$1
|
||||
shift
|
||||
declare idret
|
||||
|
||||
if hadoop_privilege_check; then
|
||||
id -u "${user}" >/dev/null 2>&1
|
||||
idret=$?
|
||||
if [[ ${idret} != 0 ]]; then
|
||||
if hadoop_verify_user_resolves user; then
|
||||
su -l "${user}" -- "$@"
|
||||
else
|
||||
hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
|
||||
return 1
|
||||
else
|
||||
su -l "${user}" -- "$@"
|
||||
fi
|
||||
else
|
||||
"$@"
|
||||
@ -194,15 +191,23 @@ function hadoop_uservar_su
|
||||
declare uprogram
|
||||
declare ucommand
|
||||
declare uvar
|
||||
declare svar
|
||||
|
||||
if hadoop_privilege_check; then
|
||||
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
|
||||
uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
|
||||
|
||||
svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)
|
||||
|
||||
if [[ -n "${!uvar}" ]]; then
|
||||
hadoop_su "${!uvar}" "$@"
|
||||
elif [[ -n "${!svar}" ]]; then
|
||||
## if we are here, then SECURE_USER with no USER defined
|
||||
## we are already privileged, so just run the command and hope
|
||||
## for the best
|
||||
"$@"
|
||||
else
|
||||
hadoop_error "ERROR: Attempting to launch ${program} ${command} as root"
|
||||
hadoop_error "ERROR: but there is no ${uvar} defined. Aborting launch."
|
||||
hadoop_error "ERROR: Attempting to operate on ${program} ${command} as root"
|
||||
hadoop_error "ERROR: but there is no ${uvar} defined. Aborting operation."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
@ -477,9 +482,11 @@ function hadoop_bootstrap
|
||||
# by default, we have not been self-re-execed
|
||||
HADOOP_REEXECED_CMD=false
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SUBCMD_SECURESERVICE=false
|
||||
|
||||
# This is the default we claim in hadoop-env.sh
|
||||
JSVC_HOME=${JSVC_HOME:-"/usr/bin"}
|
||||
|
||||
# usage output set to zero
|
||||
hadoop_reset_usage
|
||||
|
||||
@ -533,7 +540,7 @@ function hadoop_exec_hadoopenv
|
||||
if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
|
||||
if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
|
||||
export HADOOP_ENV_PROCESSED=true
|
||||
# shellcheck disable=SC1090
|
||||
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
|
||||
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
|
||||
fi
|
||||
fi
|
||||
@ -789,10 +796,8 @@ function hadoop_populate_workers_file
|
||||
local workersfile=$1
|
||||
shift
|
||||
if [[ -f "${workersfile}" ]]; then
|
||||
# shellcheck disable=2034
|
||||
HADOOP_WORKERS="${workersfile}"
|
||||
elif [[ -f "${HADOOP_CONF_DIR}/${workersfile}" ]]; then
|
||||
# shellcheck disable=2034
|
||||
HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}"
|
||||
else
|
||||
hadoop_error "ERROR: Cannot find hosts file \"${workersfile}\""
|
||||
@ -2128,16 +2133,47 @@ function hadoop_secure_daemon_handler
|
||||
esac
|
||||
}
|
||||
|
||||
## @description Get the environment variable used to validate users
|
||||
## @description autodetect whether this is a priv subcmd
|
||||
## @description by whether or not a priv user var exists
|
||||
## @description and if HADOOP_SECURE_CLASSNAME is defined
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
## @param command
|
||||
## @param subcommand
|
||||
## @return string
|
||||
function hadoop_get_verify_uservar
|
||||
## @return 1 = not priv
|
||||
## @return 0 = priv
|
||||
function hadoop_detect_priv_subcmd
|
||||
{
|
||||
declare program=$1
|
||||
declare command=$2
|
||||
|
||||
if [[ -z "${HADOOP_SECURE_CLASSNAME}" ]]; then
|
||||
hadoop_debug "No secure classname defined."
|
||||
return 1
|
||||
fi
|
||||
|
||||
uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)
|
||||
if [[ -z "${!uvar}" ]]; then
|
||||
hadoop_debug "No secure user defined."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
## @description Build custom subcommand var
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
## @param command
|
||||
## @param subcommand
|
||||
## @param customid
|
||||
## @return string
|
||||
function hadoop_build_custom_subcmd_var
|
||||
{
|
||||
declare program=$1
|
||||
declare command=$2
|
||||
declare custom=$3
|
||||
declare uprogram
|
||||
declare ucommand
|
||||
|
||||
@ -2150,7 +2186,25 @@ function hadoop_get_verify_uservar
|
||||
ucommand=${command^^}
|
||||
fi
|
||||
|
||||
echo "${uprogram}_${ucommand}_USER"
|
||||
echo "${uprogram}_${ucommand}_${custom}"
|
||||
}
|
||||
|
||||
## @description Verify that username in a var converts to user id
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
## @param userstring
|
||||
## @return 0 for success
|
||||
## @return 1 for failure
|
||||
function hadoop_verify_user_resolves
|
||||
{
|
||||
declare userstr=$1
|
||||
|
||||
if [[ -z ${userstr} || -z ${!userstr} ]] ; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
id -u "${!userstr}" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
## @description Verify that ${USER} is allowed to execute the
|
||||
@ -2162,13 +2216,13 @@ function hadoop_get_verify_uservar
|
||||
## @param subcommand
|
||||
## @return return 0 on success
|
||||
## @return exit 1 on failure
|
||||
function hadoop_verify_user
|
||||
function hadoop_verify_user_perm
|
||||
{
|
||||
declare program=$1
|
||||
declare command=$2
|
||||
declare uvar
|
||||
|
||||
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
|
||||
uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
|
||||
|
||||
if [[ -n ${!uvar} ]]; then
|
||||
if [[ ${!uvar} != "${USER}" ]]; then
|
||||
@ -2204,7 +2258,7 @@ function hadoop_need_reexec
|
||||
# otherwise no, don't re-exec and let the system deal with it.
|
||||
|
||||
if hadoop_privilege_check; then
|
||||
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
|
||||
uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
|
||||
if [[ -n ${!uvar} ]]; then
|
||||
if [[ ${!uvar} != "${USER}" ]]; then
|
||||
return 0
|
||||
@ -2217,7 +2271,7 @@ function hadoop_need_reexec
|
||||
## @description Add custom (program)_(command)_OPTS to HADOOP_OPTS.
|
||||
## @description Also handles the deprecated cases from pre-3.x.
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @stability evolving
|
||||
## @replaceable yes
|
||||
## @param program
|
||||
## @param subcommand
|
||||
@ -2239,6 +2293,10 @@ function hadoop_subcommand_opts
|
||||
# case the contents of vars. This is faster than
|
||||
# calling tr.
|
||||
|
||||
## We don't call hadoop_build_custom_subcmd_var here
|
||||
## since we need to construct this for the deprecation
|
||||
## cases. For Hadoop 4.x, this needs to get cleaned up.
|
||||
|
||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
|
||||
uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
|
||||
@ -2288,23 +2346,10 @@ function hadoop_subcommand_secure_opts
|
||||
return 1
|
||||
fi
|
||||
|
||||
# bash 4 and up have built-in ways to upper and lower
|
||||
# case the contents of vars. This is faster than
|
||||
# calling tr.
|
||||
|
||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
|
||||
uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
|
||||
ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
|
||||
else
|
||||
uprogram=${program^^}
|
||||
ucommand=${command^^}
|
||||
fi
|
||||
|
||||
# HDFS_DATANODE_SECURE_EXTRA_OPTS
|
||||
# HDFS_NFS3_SECURE_EXTRA_OPTS
|
||||
# ...
|
||||
uvar="${uprogram}_${ucommand}_SECURE_EXTRA_OPTS"
|
||||
uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_EXTRA_OPTS)
|
||||
|
||||
if [[ -n ${!uvar} ]]; then
|
||||
hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
|
||||
@ -2353,7 +2398,6 @@ function hadoop_parse_args
|
||||
hadoop_debug "hadoop_parse_args: processing $1"
|
||||
case $1 in
|
||||
--buildpaths)
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_ENABLE_BUILD_PATHS=true
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
|
||||
@ -2364,7 +2408,6 @@ function hadoop_parse_args
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
||||
if [[ -d "${confdir}" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_CONF_DIR="${confdir}"
|
||||
elif [[ -z "${confdir}" ]]; then
|
||||
hadoop_error "ERROR: No parameter provided for --config "
|
||||
@ -2387,7 +2430,6 @@ function hadoop_parse_args
|
||||
;;
|
||||
--debug)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SHELL_SCRIPT_DEBUG=true
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
|
||||
;;
|
||||
@ -2396,7 +2438,6 @@ function hadoop_parse_args
|
||||
;;
|
||||
--hostnames)
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_WORKER_NAMES="$1"
|
||||
shift
|
||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
||||
@ -2460,3 +2501,99 @@ function hadoop_sed_escape
|
||||
{
|
||||
sed -e 's/[\/&]/\\&/g' <<< "$1"
|
||||
}
|
||||
|
||||
## @description Handle subcommands from main program entries
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @replaceable yes
|
||||
function hadoop_generic_java_subcmd_handler
|
||||
{
|
||||
declare priv_outfile
|
||||
declare priv_errfile
|
||||
declare priv_pidfile
|
||||
declare daemon_outfile
|
||||
declare daemon_pidfile
|
||||
declare secureuser
|
||||
|
||||
# The default/expected way to determine if a daemon is going to run in secure
|
||||
# mode is defined by hadoop_detect_priv_subcmd. If this returns true
|
||||
# then setup the secure user var and tell the world we're in secure mode
|
||||
|
||||
if hadoop_detect_priv_subcmd "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"; then
|
||||
HADOOP_SUBCMD_SECURESERVICE=true
|
||||
secureuser=$(hadoop_build_custom_subcmd_var "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" SECURE_USER)
|
||||
|
||||
if ! hadoop_verify_user_resolves "${secureuser}"; then
|
||||
hadoop_error "ERROR: User defined in ${secureuser} (${!secureuser}) does not exist. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
HADOOP_SECURE_USER="${!secureuser}"
|
||||
fi
|
||||
|
||||
# check if we're running in secure mode.
|
||||
# breaking this up from the above lets 3rd parties
|
||||
# do things a bit different
|
||||
# secure services require some extra setup
|
||||
# if yes, then we need to define all of the priv and daemon stuff
|
||||
# if not, then we just need to define daemon stuff.
|
||||
# note the daemon vars are purposefully different between the two
|
||||
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
|
||||
hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
hadoop_verify_secure_prereq
|
||||
hadoop_setup_secure_service
|
||||
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
|
||||
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
else
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
fi
|
||||
|
||||
# are we actually in daemon mode?
|
||||
# if yes, use the daemon logger and the appropriate log file.
|
||||
if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
|
||||
HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
|
||||
else
|
||||
HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
|
||||
fi
|
||||
fi
|
||||
|
||||
# finish defining the environment: system properties, env vars, class paths, etc.
|
||||
hadoop_finalize
|
||||
|
||||
# do the hard work of launching a daemon or just executing our interactive
|
||||
# java class
|
||||
if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
hadoop_secure_daemon_handler \
|
||||
"${HADOOP_DAEMON_MODE}" \
|
||||
"${HADOOP_SUBCMD}" \
|
||||
"${HADOOP_SECURE_CLASSNAME}" \
|
||||
"${daemon_pidfile}" \
|
||||
"${daemon_outfile}" \
|
||||
"${priv_pidfile}" \
|
||||
"${priv_outfile}" \
|
||||
"${priv_errfile}" \
|
||||
"${HADOOP_SUBCMD_ARGS[@]}"
|
||||
else
|
||||
hadoop_daemon_handler \
|
||||
"${HADOOP_DAEMON_MODE}" \
|
||||
"${HADOOP_SUBCMD}" \
|
||||
"${HADOOP_CLASSNAME}" \
|
||||
"${daemon_pidfile}" \
|
||||
"${daemon_outfile}" \
|
||||
"${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
exit $?
|
||||
else
|
||||
hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ esac
|
||||
#
|
||||
# When running a secure daemon, the default value of HADOOP_IDENT_STRING
|
||||
# ends up being a bit bogus. Therefore, by default, the code will
|
||||
# replace HADOOP_IDENT_STRING with HADOOP_SECURE_xx_USER. If one wants
|
||||
# replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER. If one wants
|
||||
# to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
|
||||
# export HADOOP_SECURE_IDENT_PRESERVE="true"
|
||||
|
||||
@ -325,20 +325,13 @@ esac
|
||||
# defined if SASL is configured for authentication of data transfer protocol
|
||||
# using non-privileged ports.
|
||||
# This will replace the hadoop.id.str Java property in secure mode.
|
||||
# export HADOOP_SECURE_DN_USER=hdfs
|
||||
# export HDFS_DATANODE_SECURE_USER=hdfs
|
||||
|
||||
# Supplemental options for secure datanodes
|
||||
# By default, Hadoop uses jsvc which needs to know to launch a
|
||||
# server jvm.
|
||||
# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
|
||||
|
||||
# Where datanode log files are stored in the secure data environment.
|
||||
# This will replace the hadoop.log.dir Java property in secure mode.
|
||||
# export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
|
||||
|
||||
# Where datanode pid files are stored in the secure data environment.
|
||||
# export HADOOP_SECURE_DN_PID_DIR=${HADOOP_SECURE_PID_DIR}
|
||||
|
||||
###
|
||||
# NFS3 Gateway specific parameters
|
||||
###
|
||||
@ -361,7 +354,7 @@ esac
|
||||
|
||||
# On privileged gateways, user to run the gateway as after dropping privileges
|
||||
# This will replace the hadoop.id.str Java property in secure mode.
|
||||
# export HADOOP_PRIVILEGED_NFS_USER=nfsserver
|
||||
# export HDFS_NFS3_SECURE_USER=nfsserver
|
||||
|
||||
###
|
||||
# ZKFailoverController specific parameters
|
||||
|
@ -874,7 +874,8 @@ public FsServerDefaults getServerDefaults() throws IOException {
|
||||
config.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),
|
||||
false,
|
||||
FS_TRASH_INTERVAL_DEFAULT,
|
||||
DataChecksum.Type.CRC32);
|
||||
DataChecksum.Type.CRC32,
|
||||
"");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -54,6 +54,8 @@ public Writable newInstance() {
|
||||
private boolean encryptDataTransfer;
|
||||
private long trashInterval;
|
||||
private DataChecksum.Type checksumType;
|
||||
private String keyProviderUri;
|
||||
private byte storagepolicyId;
|
||||
|
||||
public FsServerDefaults() {
|
||||
}
|
||||
@ -61,7 +63,17 @@ public FsServerDefaults() {
|
||||
public FsServerDefaults(long blockSize, int bytesPerChecksum,
|
||||
int writePacketSize, short replication, int fileBufferSize,
|
||||
boolean encryptDataTransfer, long trashInterval,
|
||||
DataChecksum.Type checksumType) {
|
||||
DataChecksum.Type checksumType, String keyProviderUri) {
|
||||
this(blockSize, bytesPerChecksum, writePacketSize, replication,
|
||||
fileBufferSize, encryptDataTransfer, trashInterval, checksumType,
|
||||
keyProviderUri, (byte) 0);
|
||||
}
|
||||
|
||||
public FsServerDefaults(long blockSize, int bytesPerChecksum,
|
||||
int writePacketSize, short replication, int fileBufferSize,
|
||||
boolean encryptDataTransfer, long trashInterval,
|
||||
DataChecksum.Type checksumType,
|
||||
String keyProviderUri, byte storagepolicy) {
|
||||
this.blockSize = blockSize;
|
||||
this.bytesPerChecksum = bytesPerChecksum;
|
||||
this.writePacketSize = writePacketSize;
|
||||
@ -70,6 +82,8 @@ public FsServerDefaults(long blockSize, int bytesPerChecksum,
|
||||
this.encryptDataTransfer = encryptDataTransfer;
|
||||
this.trashInterval = trashInterval;
|
||||
this.checksumType = checksumType;
|
||||
this.keyProviderUri = keyProviderUri;
|
||||
this.storagepolicyId = storagepolicy;
|
||||
}
|
||||
|
||||
public long getBlockSize() {
|
||||
@ -104,6 +118,18 @@ public DataChecksum.Type getChecksumType() {
|
||||
return checksumType;
|
||||
}
|
||||
|
||||
/* null means old style namenode.
|
||||
* "" (empty string) means namenode is upgraded but EZ is not supported.
|
||||
* some string means that value is the key provider.
|
||||
*/
|
||||
public String getKeyProviderUri() {
|
||||
return keyProviderUri;
|
||||
}
|
||||
|
||||
public byte getDefaultStoragePolicyId() {
|
||||
return storagepolicyId;
|
||||
}
|
||||
|
||||
// /////////////////////////////////////////
|
||||
// Writable
|
||||
// /////////////////////////////////////////
|
||||
@ -116,6 +142,7 @@ public void write(DataOutput out) throws IOException {
|
||||
out.writeShort(replication);
|
||||
out.writeInt(fileBufferSize);
|
||||
WritableUtils.writeEnum(out, checksumType);
|
||||
out.writeByte(storagepolicyId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -127,5 +154,6 @@ public void readFields(DataInput in) throws IOException {
|
||||
replication = in.readShort();
|
||||
fileBufferSize = in.readInt();
|
||||
checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
|
||||
storagepolicyId = in.readByte();
|
||||
}
|
||||
}
|
||||
|
@ -27,12 +27,6 @@
|
||||
import org.apache.hadoop.io.MD5Hash;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.xml.sax.Attributes;
|
||||
import org.xml.sax.SAXException;
|
||||
import org.znerd.xmlenc.XMLOutputter;
|
||||
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
||||
|
||||
/** MD5 of MD5 of CRC32. */
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||
@ -107,62 +101,6 @@ public void write(DataOutput out) throws IOException {
|
||||
md5.write(out);
|
||||
}
|
||||
|
||||
/** Write that object to xml output. */
|
||||
public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that
|
||||
) throws IOException {
|
||||
xml.startTag(MD5MD5CRC32FileChecksum.class.getName());
|
||||
if (that != null) {
|
||||
xml.attribute("bytesPerCRC", "" + that.bytesPerCRC);
|
||||
xml.attribute("crcPerBlock", "" + that.crcPerBlock);
|
||||
xml.attribute("crcType", ""+ that.getCrcType().name());
|
||||
xml.attribute("md5", "" + that.md5);
|
||||
}
|
||||
xml.endTag();
|
||||
}
|
||||
|
||||
/** Return the object represented in the attributes. */
|
||||
public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs
|
||||
) throws SAXException {
|
||||
final String bytesPerCRC = attrs.getValue("bytesPerCRC");
|
||||
final String crcPerBlock = attrs.getValue("crcPerBlock");
|
||||
final String md5 = attrs.getValue("md5");
|
||||
String crcType = attrs.getValue("crcType");
|
||||
DataChecksum.Type finalCrcType;
|
||||
if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
// old versions don't support crcType.
|
||||
if (crcType == null || crcType.equals("")) {
|
||||
finalCrcType = DataChecksum.Type.CRC32;
|
||||
} else {
|
||||
finalCrcType = DataChecksum.Type.valueOf(crcType);
|
||||
}
|
||||
|
||||
switch (finalCrcType) {
|
||||
case CRC32:
|
||||
return new MD5MD5CRC32GzipFileChecksum(
|
||||
Integer.parseInt(bytesPerCRC),
|
||||
Integer.parseInt(crcPerBlock),
|
||||
new MD5Hash(md5));
|
||||
case CRC32C:
|
||||
return new MD5MD5CRC32CastagnoliFileChecksum(
|
||||
Integer.parseInt(bytesPerCRC),
|
||||
Integer.parseInt(crcPerBlock),
|
||||
new MD5Hash(md5));
|
||||
default:
|
||||
// we should never get here since finalCrcType will
|
||||
// hold a valid type or we should have got an exception.
|
||||
return null;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
|
||||
+ ", crcPerBlock=" + crcPerBlock + ", crcType=" + crcType
|
||||
+ ", md5=" + md5, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getAlgorithmName() + ":" + md5;
|
||||
|
@ -54,6 +54,7 @@ public class FtpConfigKeys extends CommonConfigurationKeys {
|
||||
public static final long FS_TRASH_INTERVAL_DEFAULT = 0;
|
||||
public static final DataChecksum.Type CHECKSUM_TYPE_DEFAULT =
|
||||
DataChecksum.Type.CRC32;
|
||||
public static final String KEY_PROVIDER_URI_DEFAULT = "";
|
||||
|
||||
protected static FsServerDefaults getServerDefaults() throws IOException {
|
||||
return new FsServerDefaults(
|
||||
@ -64,7 +65,8 @@ protected static FsServerDefaults getServerDefaults() throws IOException {
|
||||
STREAM_BUFFER_SIZE_DEFAULT,
|
||||
ENCRYPT_DATA_TRANSFER_DEFAULT,
|
||||
FS_TRASH_INTERVAL_DEFAULT,
|
||||
CHECKSUM_TYPE_DEFAULT);
|
||||
CHECKSUM_TYPE_DEFAULT,
|
||||
KEY_PROVIDER_URI_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,8 @@ public class LocalConfigKeys extends CommonConfigurationKeys {
|
||||
public static final long FS_TRASH_INTERVAL_DEFAULT = 0;
|
||||
public static final DataChecksum.Type CHECKSUM_TYPE_DEFAULT =
|
||||
DataChecksum.Type.CRC32;
|
||||
public static final String KEY_PROVIDER_URI_DEFAULT = "";
|
||||
|
||||
public static FsServerDefaults getServerDefaults() throws IOException {
|
||||
return new FsServerDefaults(
|
||||
BLOCK_SIZE_DEFAULT,
|
||||
@ -63,7 +65,8 @@ public static FsServerDefaults getServerDefaults() throws IOException {
|
||||
STREAM_BUFFER_SIZE_DEFAULT,
|
||||
ENCRYPT_DATA_TRANSFER_DEFAULT,
|
||||
FS_TRASH_INTERVAL_DEFAULT,
|
||||
CHECKSUM_TYPE_DEFAULT);
|
||||
CHECKSUM_TYPE_DEFAULT,
|
||||
KEY_PROVIDER_URI_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,10 @@
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.erasurecode.codec.ErasureCodec;
|
||||
@ -26,6 +30,8 @@
|
||||
import org.apache.hadoop.io.erasurecode.codec.XORErasureCodec;
|
||||
import org.apache.hadoop.io.erasurecode.coder.ErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.coder.ErasureEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
|
||||
@ -35,6 +41,7 @@
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A codec & coder utility to help create coders conveniently.
|
||||
@ -49,41 +56,50 @@
|
||||
@InterfaceAudience.Private
|
||||
public final class CodecUtil {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(CodecUtil.class);
|
||||
|
||||
public static final String IO_ERASURECODE_CODEC = "io.erasurecode.codec.";
|
||||
|
||||
/** Erasure coder XOR codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_KEY =
|
||||
"io.erasurecode.codec.xor";
|
||||
IO_ERASURECODE_CODEC + "xor";
|
||||
public static final String IO_ERASURECODE_CODEC_XOR =
|
||||
XORErasureCodec.class.getCanonicalName();
|
||||
/** Erasure coder Reed-Solomon codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_RS_KEY =
|
||||
"io.erasurecode.codec.rs";
|
||||
IO_ERASURECODE_CODEC + "rs";
|
||||
public static final String IO_ERASURECODE_CODEC_RS =
|
||||
RSErasureCodec.class.getCanonicalName();
|
||||
/** Erasure coder hitch hiker XOR codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_HHXOR_KEY =
|
||||
"io.erasurecode.codec.hhxor";
|
||||
IO_ERASURECODE_CODEC + "hhxor";
|
||||
public static final String IO_ERASURECODE_CODEC_HHXOR =
|
||||
HHXORErasureCodec.class.getCanonicalName();
|
||||
|
||||
/** Supported erasure codec classes. */
|
||||
|
||||
/** Raw coder factory for the RS codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
|
||||
"io.erasurecode.codec.rs.rawcoder";
|
||||
public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT =
|
||||
RSRawErasureCoderFactory.class.getCanonicalName();
|
||||
|
||||
/** Raw coder factory for the RS legacy codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY =
|
||||
"io.erasurecode.codec.rs-legacy.rawcoder";
|
||||
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_DEFAULT =
|
||||
/** Comma separated raw codec name. The first coder is prior to the latter. */
|
||||
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODERS_KEY =
|
||||
IO_ERASURECODE_CODEC + "rs-legacy.rawcoders";
|
||||
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODERS_DEFAULT =
|
||||
RSRawErasureCoderFactoryLegacy.class.getCanonicalName();
|
||||
public static final String IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY =
|
||||
IO_ERASURECODE_CODEC + "rs.rawcoders";
|
||||
public static final String IO_ERASURECODE_CODEC_RS_RAWCODERS_DEFAULT =
|
||||
NativeRSRawErasureCoderFactory.class.getCanonicalName() +
|
||||
"," + RSRawErasureCoderFactory.class.getCanonicalName();
|
||||
|
||||
/** Raw coder factory for the XOR codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY =
|
||||
"io.erasurecode.codec.xor.rawcoder";
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODER_DEFAULT =
|
||||
XORRawErasureCoderFactory.class.getCanonicalName();
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODERS_KEY =
|
||||
IO_ERASURECODE_CODEC + "xor.rawcoders";
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODERS_DEFAULT =
|
||||
NativeXORRawErasureCoderFactory.class.getCanonicalName() +
|
||||
"," + XORRawErasureCoderFactory.class.getCanonicalName();
|
||||
|
||||
// Default coders for each codec names.
|
||||
public static final Map<String, String> DEFAULT_CODERS_MAP = ImmutableMap.of(
|
||||
"rs", IO_ERASURECODE_CODEC_RS_RAWCODERS_DEFAULT,
|
||||
"rs-legacy", IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODERS_DEFAULT,
|
||||
"xor", IO_ERASURECODE_CODEC_XOR_RAWCODERS_DEFAULT
|
||||
);
|
||||
|
||||
private CodecUtil() { }
|
||||
|
||||
@ -133,12 +149,7 @@ public static RawErasureEncoder createRawEncoder(
|
||||
Preconditions.checkNotNull(conf);
|
||||
Preconditions.checkNotNull(codec);
|
||||
|
||||
String rawCoderFactoryKey = getRawCoderFactNameFromCodec(conf, codec);
|
||||
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(conf,
|
||||
rawCoderFactoryKey);
|
||||
|
||||
return fact.createEncoder(coderOptions);
|
||||
return createRawEncoderWithFallback(conf, codec, coderOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -153,12 +164,7 @@ public static RawErasureDecoder createRawDecoder(
|
||||
Preconditions.checkNotNull(conf);
|
||||
Preconditions.checkNotNull(codec);
|
||||
|
||||
String rawCoderFactoryKey = getRawCoderFactNameFromCodec(conf, codec);
|
||||
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(conf,
|
||||
rawCoderFactoryKey);
|
||||
|
||||
return fact.createDecoder(coderOptions);
|
||||
return createRawDecoderWithFallback(conf, codec, coderOptions);
|
||||
}
|
||||
|
||||
private static RawErasureCoderFactory createRawCoderFactory(
|
||||
@ -180,31 +186,52 @@ private static RawErasureCoderFactory createRawCoderFactory(
|
||||
return fact;
|
||||
}
|
||||
|
||||
private static String getRawCoderFactNameFromCodec(Configuration conf,
|
||||
String codec) {
|
||||
switch (codec) {
|
||||
case ErasureCodeConstants.RS_CODEC_NAME:
|
||||
return conf.get(
|
||||
IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
|
||||
IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT);
|
||||
case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
|
||||
return conf.get(
|
||||
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY,
|
||||
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_DEFAULT);
|
||||
case ErasureCodeConstants.XOR_CODEC_NAME:
|
||||
return conf.get(
|
||||
IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY,
|
||||
IO_ERASURECODE_CODEC_XOR_RAWCODER_DEFAULT);
|
||||
default:
|
||||
// For custom codec, we throw exception if the factory is not configured
|
||||
String rawCoderKey = "io.erasurecode.codec." + codec + ".rawcoder";
|
||||
String factName = conf.get(rawCoderKey);
|
||||
if (factName == null) {
|
||||
throw new IllegalArgumentException("Raw coder factory not configured " +
|
||||
"for custom codec " + codec);
|
||||
// Return comma separated coder names
|
||||
private static String getRawCoders(Configuration conf, String codec) {
|
||||
return conf.get(
|
||||
IO_ERASURECODE_CODEC + codec + ".rawcoders",
|
||||
DEFAULT_CODERS_MAP.getOrDefault(codec, codec)
|
||||
);
|
||||
}
|
||||
|
||||
private static RawErasureEncoder createRawEncoderWithFallback(
|
||||
Configuration conf, String codec, ErasureCoderOptions coderOptions) {
|
||||
String coders = getRawCoders(conf, codec);
|
||||
for (String factName : Splitter.on(",").split(coders)) {
|
||||
try {
|
||||
if (factName != null) {
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(conf,
|
||||
factName);
|
||||
return fact.createEncoder(coderOptions);
|
||||
}
|
||||
} catch (LinkageError | Exception e) {
|
||||
// Fallback to next coder if possible
|
||||
LOG.warn("Failed to create raw erasure encoder " + factName +
|
||||
", fallback to next codec if possible", e);
|
||||
}
|
||||
return factName;
|
||||
}
|
||||
throw new IllegalArgumentException("Fail to create raw erasure " +
|
||||
"encoder with given codec: " + codec);
|
||||
}
|
||||
|
||||
private static RawErasureDecoder createRawDecoderWithFallback(
|
||||
Configuration conf, String codec, ErasureCoderOptions coderOptions) {
|
||||
String coders = getRawCoders(conf, codec);
|
||||
for (String factName : Splitter.on(",").split(coders)) {
|
||||
try {
|
||||
if (factName != null) {
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(conf,
|
||||
factName);
|
||||
return fact.createDecoder(coderOptions);
|
||||
}
|
||||
} catch (LinkageError | Exception e) {
|
||||
// Fallback to next coder if possible
|
||||
LOG.warn("Failed to create raw erasure decoder " + factName +
|
||||
", fallback to next codec if possible", e);
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("Fail to create raw erasure " +
|
||||
"encoder with given codec: " + codec);
|
||||
}
|
||||
|
||||
private static ErasureCodec createCodec(Configuration conf,
|
||||
|
@ -143,6 +143,7 @@ MutableCounterLong newCounter(MetricsInfo info, long iVal) {
|
||||
public MutableGaugeInt newGauge(String name, String desc, int iVal) {
|
||||
return newGauge(Interns.info(name, desc), iVal);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mutable integer gauge
|
||||
* @param info metadata of the metric
|
||||
@ -180,6 +181,30 @@ public synchronized MutableGaugeLong newGauge(MetricsInfo info, long iVal) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mutable float gauge
|
||||
* @param name of the metric
|
||||
* @param desc metric description
|
||||
* @param iVal initial value
|
||||
* @return a new gauge object
|
||||
*/
|
||||
public MutableGaugeFloat newGauge(String name, String desc, float iVal) {
|
||||
return newGauge(Interns.info(name, desc), iVal);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mutable float gauge
|
||||
* @param info metadata of the metric
|
||||
* @param iVal initial value
|
||||
* @return a new gauge object
|
||||
*/
|
||||
public synchronized MutableGaugeFloat newGauge(MetricsInfo info, float iVal) {
|
||||
checkMetricName(info.name());
|
||||
MutableGaugeFloat ret = new MutableGaugeFloat(info, iVal);
|
||||
metricsMap.put(info.name(), ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mutable metric that estimates quantiles of a stream of values
|
||||
* @param name of the metric
|
||||
@ -420,4 +445,5 @@ public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) {
|
||||
.add("info", metricsInfo).add("tags", tags()).add("metrics", metrics())
|
||||
.toString();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,80 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.metrics2.lib;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.hadoop.metrics2.MetricsInfo;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
|
||||
/**
|
||||
* A mutable float gauge.
|
||||
*/
|
||||
public class MutableGaugeFloat extends MutableGauge {
|
||||
|
||||
private AtomicInteger value = new AtomicInteger();
|
||||
|
||||
MutableGaugeFloat(MetricsInfo info, float initValue) {
|
||||
super(info);
|
||||
this.value.set(Float.floatToIntBits(initValue));
|
||||
}
|
||||
|
||||
public float value() {
|
||||
return Float.intBitsToFloat(value.get());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incr() {
|
||||
incr(1.0f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void decr() {
|
||||
incr(-1.0f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void snapshot(MetricsRecordBuilder builder, boolean all) {
|
||||
if (all || changed()) {
|
||||
builder.addGauge(info(), value());
|
||||
clearChanged();
|
||||
}
|
||||
}
|
||||
|
||||
public void set(float value) {
|
||||
this.value.set(Float.floatToIntBits(value));
|
||||
setChanged();
|
||||
}
|
||||
|
||||
private final boolean compareAndSet(float expect, float update) {
|
||||
return value.compareAndSet(Float.floatToIntBits(expect),
|
||||
Float.floatToIntBits(update));
|
||||
}
|
||||
|
||||
private void incr(float delta) {
|
||||
while (true) {
|
||||
float current = value.get();
|
||||
float next = current + delta;
|
||||
if (compareAndSet(current, next)) {
|
||||
setChanged();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -60,6 +60,9 @@ MutableMetric newForField(Field field, Metric annotation,
|
||||
if (cls == MutableGaugeLong.class) {
|
||||
return registry.newGauge(info, 0L);
|
||||
}
|
||||
if (cls == MutableGaugeFloat.class) {
|
||||
return registry.newGauge(info, 0f);
|
||||
}
|
||||
if (cls == MutableRate.class) {
|
||||
return registry.newRate(info.name(), info.description(),
|
||||
annotation.always());
|
||||
|
@ -53,6 +53,8 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
************************************************************************
|
||||
@ -228,6 +230,12 @@ public final void check(final String[] host, final String[] cns,
|
||||
|
||||
abstract class AbstractVerifier implements SSLHostnameVerifier {
|
||||
|
||||
/**
|
||||
* Writes as SSLFactory logs as it is the only consumer of this verifier
|
||||
* class.
|
||||
*/
|
||||
static final Logger LOG = LoggerFactory.getLogger(SSLFactory.class);
|
||||
|
||||
/**
|
||||
* This contains a list of 2nd-level domains that aren't allowed to
|
||||
* have wildcards when combined with country-codes.
|
||||
@ -354,13 +362,24 @@ public void check(String[] host, X509Certificate cert)
|
||||
throws SSLException {
|
||||
String[] cns = Certificates.getCNs(cert);
|
||||
String[] subjectAlts = Certificates.getDNSSubjectAlts(cert);
|
||||
check(host, cns, subjectAlts);
|
||||
try {
|
||||
check(host, cns, subjectAlts);
|
||||
} catch (SSLException e) {
|
||||
LOG.error("Host check error {}", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public void check(final String[] hosts, final String[] cns,
|
||||
final String[] subjectAlts, final boolean ie6,
|
||||
final boolean strictWithSubDomains)
|
||||
throws SSLException {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Hosts:{}, CNs:{} subjectAlts:{}, ie6:{}, " +
|
||||
"strictWithSubDomains{}", Arrays.toString(hosts),
|
||||
Arrays.toString(cns), Arrays.toString(subjectAlts), ie6,
|
||||
strictWithSubDomains);
|
||||
}
|
||||
// Build up lists of allowed hosts For logging/debugging purposes.
|
||||
StringBuffer buf = new StringBuffer(32);
|
||||
buf.append('<');
|
||||
|
@ -26,7 +26,6 @@
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
/**
|
||||
* Utils for KMS.
|
||||
@ -51,21 +50,20 @@ private KMSUtil() { /* Hidden constructor */ }
|
||||
public static KeyProvider createKeyProvider(final Configuration conf,
|
||||
final String configKeyName) throws IOException {
|
||||
LOG.debug("Creating key provider with config key {}", configKeyName);
|
||||
final String providerUriStr = conf.getTrimmed(configKeyName, "");
|
||||
final String providerUriStr = conf.getTrimmed(configKeyName);
|
||||
// No provider set in conf
|
||||
if (providerUriStr.isEmpty()) {
|
||||
if (providerUriStr == null || providerUriStr.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
final URI providerUri;
|
||||
try {
|
||||
providerUri = new URI(providerUriStr);
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return createKeyProviderFromUri(conf, URI.create(providerUriStr));
|
||||
}
|
||||
|
||||
public static KeyProvider createKeyProviderFromUri(final Configuration conf,
|
||||
final URI providerUri) throws IOException {
|
||||
KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf);
|
||||
if (keyProvider == null) {
|
||||
throw new IOException("Could not instantiate KeyProvider from " +
|
||||
configKeyName + " setting of '" + providerUriStr + "'");
|
||||
throw new IOException("Could not instantiate KeyProvider for uri: " +
|
||||
providerUri);
|
||||
}
|
||||
if (keyProvider.isTransient()) {
|
||||
throw new IOException("KeyProvider " + keyProvider.toString()
|
||||
|
@ -76,14 +76,6 @@ public int compare(Entry left, Entry right) {
|
||||
return l > r? 1: l < r? -1: 0;
|
||||
}
|
||||
};
|
||||
|
||||
/** A clock for measuring time so that it can be mocked in unit tests. */
|
||||
static class Clock {
|
||||
/** @return the current time. */
|
||||
long currentTime() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
}
|
||||
|
||||
private static int updateRecommendedLength(int recommendedLength,
|
||||
int sizeLimit) {
|
||||
@ -102,7 +94,7 @@ private static int updateRecommendedLength(int recommendedLength,
|
||||
private final long creationExpirationPeriod;
|
||||
private final long accessExpirationPeriod;
|
||||
private final int sizeLimit;
|
||||
private final Clock clock;
|
||||
private final Timer timer;
|
||||
|
||||
/**
|
||||
* @param recommendedLength Recommended size of the internal array.
|
||||
@ -120,7 +112,7 @@ public LightWeightCache(final int recommendedLength,
|
||||
final long creationExpirationPeriod,
|
||||
final long accessExpirationPeriod) {
|
||||
this(recommendedLength, sizeLimit,
|
||||
creationExpirationPeriod, accessExpirationPeriod, new Clock());
|
||||
creationExpirationPeriod, accessExpirationPeriod, new Timer());
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@ -128,7 +120,7 @@ public LightWeightCache(final int recommendedLength,
|
||||
final int sizeLimit,
|
||||
final long creationExpirationPeriod,
|
||||
final long accessExpirationPeriod,
|
||||
final Clock clock) {
|
||||
final Timer timer) {
|
||||
super(updateRecommendedLength(recommendedLength, sizeLimit));
|
||||
|
||||
this.sizeLimit = sizeLimit;
|
||||
@ -147,11 +139,11 @@ public LightWeightCache(final int recommendedLength,
|
||||
|
||||
this.queue = new PriorityQueue<Entry>(
|
||||
sizeLimit > 0? sizeLimit + 1: 1 << 10, expirationTimeComparator);
|
||||
this.clock = clock;
|
||||
this.timer = timer;
|
||||
}
|
||||
|
||||
void setExpirationTime(final Entry e, final long expirationPeriod) {
|
||||
e.setExpirationTime(clock.currentTime() + expirationPeriod);
|
||||
e.setExpirationTime(timer.monotonicNowNanos() + expirationPeriod);
|
||||
}
|
||||
|
||||
boolean isExpired(final Entry e, final long now) {
|
||||
@ -168,7 +160,7 @@ private E evict() {
|
||||
|
||||
/** Evict expired entries. */
|
||||
private void evictExpiredEntries() {
|
||||
final long now = clock.currentTime();
|
||||
final long now = timer.monotonicNowNanos();
|
||||
for(int i = 0; i < EVICTION_LIMIT; i++) {
|
||||
final Entry peeked = queue.peek();
|
||||
if (peeked == null || !isExpired(peeked, now)) {
|
||||
|
@ -65,6 +65,16 @@ public static long monotonicNow() {
|
||||
return System.nanoTime() / NANOSECONDS_PER_MILLISECOND;
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as {@link #monotonicNow()} but returns its result in nanoseconds.
|
||||
* Note that this is subject to the same resolution constraints as
|
||||
* {@link System#nanoTime()}.
|
||||
* @return a monotonic clock that counts in nanoseconds.
|
||||
*/
|
||||
public static long monotonicNowNanos() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert time in millisecond to human readable format.
|
||||
* @return a human readable string for the input time
|
||||
|
@ -48,4 +48,14 @@ public long now() {
|
||||
* @return a monotonic clock that counts in milliseconds.
|
||||
*/
|
||||
public long monotonicNow() { return Time.monotonicNow(); }
|
||||
|
||||
/**
|
||||
* Same as {@link #monotonicNow()} but returns its result in nanoseconds.
|
||||
* Note that this is subject to the same resolution constraints as
|
||||
* {@link System#nanoTime()}.
|
||||
* @return a monotonic clock that counts in nanoseconds.
|
||||
*/
|
||||
public long monotonicNowNanos() {
|
||||
return Time.monotonicNowNanos();
|
||||
}
|
||||
}
|
||||
|
@ -31,8 +31,8 @@
|
||||
/**
|
||||
* This class returns build information about Hadoop components.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class VersionInfo {
|
||||
private static final Log LOG = LogFactory.getLog(VersionInfo.class);
|
||||
|
||||
@ -102,8 +102,8 @@ public static String getVersion() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the subversion revision number for the root directory
|
||||
* @return the revision number, eg. "451451"
|
||||
* Get the Git commit hash of the repository when compiled.
|
||||
* @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a"
|
||||
*/
|
||||
public static String getRevision() {
|
||||
return COMMON_VERSION_INFO._getRevision();
|
||||
@ -124,7 +124,7 @@ public static String getBranch() {
|
||||
public static String getDate() {
|
||||
return COMMON_VERSION_INFO._getDate();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The user that compiled Hadoop.
|
||||
* @return the username of the user
|
||||
@ -132,25 +132,27 @@ public static String getDate() {
|
||||
public static String getUser() {
|
||||
return COMMON_VERSION_INFO._getUser();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the subversion URL for the root Hadoop directory.
|
||||
* Get the URL for the Hadoop repository.
|
||||
* @return the URL of the Hadoop repository
|
||||
*/
|
||||
public static String getUrl() {
|
||||
return COMMON_VERSION_INFO._getUrl();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the checksum of the source files from which Hadoop was
|
||||
* built.
|
||||
**/
|
||||
* Get the checksum of the source files from which Hadoop was built.
|
||||
* @return the checksum of the source files
|
||||
*/
|
||||
public static String getSrcChecksum() {
|
||||
return COMMON_VERSION_INFO._getSrcChecksum();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the buildVersion which includes version,
|
||||
* revision, user and date.
|
||||
* Returns the buildVersion which includes version,
|
||||
* revision, user and date.
|
||||
* @return the buildVersion
|
||||
*/
|
||||
public static String getBuildVersion(){
|
||||
return COMMON_VERSION_INFO._getBuildVersion();
|
||||
@ -158,6 +160,7 @@ public static String getBuildVersion(){
|
||||
|
||||
/**
|
||||
* Returns the protoc version used for the build.
|
||||
* @return the protoc version
|
||||
*/
|
||||
public static String getProtocVersion(){
|
||||
return COMMON_VERSION_INFO._getProtocVersion();
|
||||
|
@ -657,34 +657,33 @@
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.erasurecode.codec.rs.rawcoder</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory</value>
|
||||
<name>io.erasurecode.codec.rs.rawcoders</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory,org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory</value>
|
||||
<description>
|
||||
Raw coder implementation for the rs codec. The default value is a
|
||||
pure Java implementation. There is also a native implementation. Its value
|
||||
is org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory.
|
||||
Comma separated raw coder implementations for the rs codec. The earlier
|
||||
factory is prior to followings in case of failure of creating raw coders.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.erasurecode.codec.rs-legacy.rawcoder</name>
|
||||
<name>io.erasurecode.codec.rs-legacy.rawcoders</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy</value>
|
||||
<description>
|
||||
Raw coder implementation for the rs-legacy codec.
|
||||
Comma separated raw coder implementations for the rs-legacy codec. The earlier
|
||||
factory is prior to followings in case of failure of creating raw coders.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.erasurecode.codec.xor.rawcoder</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory</value>
|
||||
<name>io.erasurecode.codec.xor.rawcoders</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory,org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory</value>
|
||||
<description>
|
||||
Raw coder implementation for the xor codec. The default value is a pure Java
|
||||
implementation. There is also a native implementation. Its value is
|
||||
org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory.
|
||||
Comma separated raw coder implementations for the xor codec. The earlier
|
||||
factory is prior to followings in case of failure of creating raw coders.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- file system properties -->
|
||||
<!-- file system properties -->
|
||||
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
@ -956,13 +955,8 @@
|
||||
configuration of AWS access key ID and secret access key in
|
||||
environment variables named AWS_ACCESS_KEY_ID and
|
||||
AWS_SECRET_ACCESS_KEY, as documented in the AWS SDK.
|
||||
3. org.apache.hadoop.fs.s3a.SharedInstanceProfileCredentialsProvider:
|
||||
a shared instance of
|
||||
com.amazonaws.auth.InstanceProfileCredentialsProvider from the AWS
|
||||
SDK, which supports use of instance profile credentials if running
|
||||
in an EC2 VM. Using this shared instance potentially reduces load
|
||||
on the EC2 instance metadata service for multi-threaded
|
||||
applications.
|
||||
3. com.amazonaws.auth.InstanceProfileCredentialsProvider: supports use
|
||||
of instance profile credentials if running in an EC2 VM.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
@ -2202,6 +2196,8 @@
|
||||
<description>
|
||||
The KeyProvider to use when managing zone keys, and interacting with
|
||||
encryption keys when reading and writing to an encryption zone.
|
||||
For hdfs clients, the provider path will be same as namenode's
|
||||
provider path.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
@ -2454,6 +2450,7 @@
|
||||
|
||||
|
||||
<!-- Azure Data Lake File System Configurations -->
|
||||
|
||||
<property>
|
||||
<name>fs.adl.impl</name>
|
||||
<value>org.apache.hadoop.fs.adl.AdlFileSystem</value>
|
||||
@ -2463,6 +2460,68 @@
|
||||
<name>fs.AbstractFileSystem.adl.impl</name>
|
||||
<value>org.apache.hadoop.fs.adl.Adl</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>adl.feature.ownerandgroup.enableupn</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
When true : User and Group in FileStatus/AclStatus response is
|
||||
represented as user friendly name as per Azure AD profile.
|
||||
|
||||
When false (default) : User and Group in FileStatus/AclStatus
|
||||
response is represented by the unique identifier from Azure AD
|
||||
profile (Object ID as GUID).
|
||||
|
||||
For optimal performance, false is recommended.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.access.token.provider.type</name>
|
||||
<value>ClientCredential</value>
|
||||
<description>
|
||||
Defines Azure Active Directory OAuth2 access token provider type.
|
||||
Supported types are ClientCredential, RefreshToken, and Custom.
|
||||
The ClientCredential type requires property fs.adl.oauth2.client.id,
|
||||
fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url.
|
||||
The RefreshToken type requires property fs.adl.oauth2.client.id and
|
||||
fs.adl.oauth2.refresh.token.
|
||||
The Custom type requires property fs.adl.oauth2.access.token.provider.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.client.id</name>
|
||||
<value></value>
|
||||
<description>The OAuth2 client id.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.credential</name>
|
||||
<value></value>
|
||||
<description>The OAuth2 access key.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.refresh.url</name>
|
||||
<value></value>
|
||||
<description>The OAuth2 token endpoint.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.refresh.token</name>
|
||||
<value></value>
|
||||
<description>The OAuth2 refresh token.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.adl.oauth2.access.token.provider</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The class name of the OAuth2 access token provider.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- Azure Data Lake File System Configurations Ends Here-->
|
||||
|
||||
<property>
|
||||
|
@ -168,9 +168,9 @@ Some products such as Apache Oozie which access the services of Hadoop on behalf
|
||||
|
||||
Because the DataNode data transfer protocol does not use the Hadoop RPC framework, DataNodes must authenticate themselves using privileged ports which are specified by `dfs.datanode.address` and `dfs.datanode.http.address`. This authentication is based on the assumption that the attacker won't be able to get root privileges on DataNode hosts.
|
||||
|
||||
When you execute the `hdfs datanode` command as root, the server process binds privileged ports at first, then drops privilege and runs as the user account specified by `HADOOP_SECURE_DN_USER`. This startup process uses [the jsvc program](https://commons.apache.org/proper/commons-daemon/jsvc.html "Link to Apache Commons Jsvc") installed to `JSVC_HOME`. You must specify `HADOOP_SECURE_DN_USER` and `JSVC_HOME` as environment variables on start up (in `hadoop-env.sh`).
|
||||
When you execute the `hdfs datanode` command as root, the server process binds privileged ports at first, then drops privilege and runs as the user account specified by `HDFS_DATANODE_SECURE_USER`. This startup process uses [the jsvc program](https://commons.apache.org/proper/commons-daemon/jsvc.html "Link to Apache Commons Jsvc") installed to `JSVC_HOME`. You must specify `HDFS_DATANODE_SECURE_USER` and `JSVC_HOME` as environment variables on start up (in `hadoop-env.sh`).
|
||||
|
||||
As of version 2.6.0, SASL can be used to authenticate the data transfer protocol. In this configuration, it is no longer required for secured clusters to start the DataNode as root using `jsvc` and bind to privileged ports. To enable SASL on data transfer protocol, set `dfs.data.transfer.protection` in hdfs-site.xml, set a non-privileged port for `dfs.datanode.address`, set `dfs.http.policy` to `HTTPS_ONLY` and make sure the `HADOOP_SECURE_DN_USER` environment variable is not defined. Note that it is not possible to use SASL on data transfer protocol if `dfs.datanode.address` is set to a privileged port. This is required for backwards-compatibility reasons.
|
||||
As of version 2.6.0, SASL can be used to authenticate the data transfer protocol. In this configuration, it is no longer required for secured clusters to start the DataNode as root using `jsvc` and bind to privileged ports. To enable SASL on data transfer protocol, set `dfs.data.transfer.protection` in hdfs-site.xml, set a non-privileged port for `dfs.datanode.address`, set `dfs.http.policy` to `HTTPS_ONLY` and make sure the `HDFS_DATANODE_SECURE_USER` environment variable is not defined. Note that it is not possible to use SASL on data transfer protocol if `dfs.datanode.address` is set to a privileged port. This is required for backwards-compatibility reasons.
|
||||
|
||||
In order to migrate an existing cluster that used root authentication to start using SASL instead, first ensure that version 2.6.0 or later has been deployed to all cluster nodes as well as any external applications that need to connect to the cluster. Only versions 2.6.0 and later of the HDFS client can connect to a DataNode that uses SASL for authentication of data transfer protocol, so it is vital that all callers have the correct version before migrating. After version 2.6.0 or later has been deployed everywhere, update configuration of any external applications to enable SASL. If an HDFS client is enabled for SASL, then it can connect successfully to a DataNode running with either root authentication or SASL authentication. Changing configuration for all clients guarantees that subsequent configuration changes on DataNodes will not disrupt the applications. Finally, each individual DataNode can be migrated by changing its configuration and restarting. It is acceptable to have a mix of some DataNodes running with root authentication and some DataNodes running with SASL authentication temporarily during this migration period, because an HDFS client enabled for SASL can connect to both.
|
||||
|
||||
@ -293,7 +293,7 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
|
||||
| `dfs.encrypt.data.transfer.algorithm` | | optionally set to `3des` or `rc4` when using data encryption to control encryption algorithm |
|
||||
| `dfs.encrypt.data.transfer.cipher.suites` | | optionally set to `AES/CTR/NoPadding` to activate AES encryption when using data encryption |
|
||||
| `dfs.encrypt.data.transfer.cipher.key.bitlength` | | optionally set to `128`, `192` or `256` to control key bit length when using AES with data encryption |
|
||||
| `dfs.data.transfer.protection` | | `authentication` : authentication only; `integrity` : integrity check in addition to authentication; `privacy` : data encryption in addition to integrity This property is unspecified by default. Setting this property enables SASL for authentication of data transfer protocol. If this is enabled, then `dfs.datanode.address` must use a non-privileged port, `dfs.http.policy` must be set to `HTTPS_ONLY` and the `HADOOP_SECURE_DN_USER` environment variable must be undefined when starting the DataNode process. |
|
||||
| `dfs.data.transfer.protection` | | `authentication` : authentication only; `integrity` : integrity check in addition to authentication; `privacy` : data encryption in addition to integrity This property is unspecified by default. Setting this property enables SASL for authentication of data transfer protocol. If this is enabled, then `dfs.datanode.address` must use a non-privileged port, `dfs.http.policy` must be set to `HTTPS_ONLY` and the `HDFS_DATANODE_SECURE_USER` environment variable must be undefined when starting the DataNode process. |
|
||||
|
||||
### WebHDFS
|
||||
|
||||
@ -413,7 +413,7 @@ Set the environment variable `HADOOP_JAAS_DEBUG` to `true`.
|
||||
export HADOOP_JAAS_DEBUG=true
|
||||
```
|
||||
|
||||
Edit the `log4j.properties` file to log Hadoop's security package at `DEBUG` level.
|
||||
Edit the `log4j.properties` file to log Hadoop's security package at `DEBUG` level.
|
||||
|
||||
```
|
||||
log4j.logger.org.apache.hadoop.security=DEBUG
|
||||
@ -434,19 +434,19 @@ It contains a series of probes for the JVM's configuration and the environment,
|
||||
dumps out some system files (`/etc/krb5.conf`, `/etc/ntp.conf`), prints
|
||||
out some system state and then attempts to log in to Kerberos as the current user,
|
||||
or a specific principal in a named keytab.
|
||||
|
||||
|
||||
The output of the command can be used for local diagnostics, or forwarded to
|
||||
whoever supports the cluster.
|
||||
|
||||
The `KDiag` command has its own entry point; it is currently not hooked up
|
||||
to the end-user CLI.
|
||||
to the end-user CLI.
|
||||
|
||||
It is invoked simply by passing its full classname to one of the `bin/hadoop`,
|
||||
`bin/hdfs` or `bin/yarn` commands. Accordingly, it will display the kerberos client
|
||||
state of the command used to invoke it.
|
||||
|
||||
```
|
||||
hadoop org.apache.hadoop.security.KDiag
|
||||
hadoop org.apache.hadoop.security.KDiag
|
||||
hdfs org.apache.hadoop.security.KDiag
|
||||
yarn org.apache.hadoop.security.KDiag
|
||||
```
|
||||
@ -557,8 +557,8 @@ hdfs org.apache.hadoop.security.KDiag --resource hbase-default.xml --resource hb
|
||||
yarn org.apache.hadoop.security.KDiag --resource yarn-default.xml --resource yarn-site.xml
|
||||
```
|
||||
|
||||
For extra logging during the operation, set the logging and `HADOOP_JAAS_DEBUG`
|
||||
environment variable to the values listed in "Troubleshooting". The JVM
|
||||
For extra logging during the operation, set the logging and `HADOOP_JAAS_DEBUG`
|
||||
environment variable to the values listed in "Troubleshooting". The JVM
|
||||
options are automatically set in KDiag.
|
||||
|
||||
#### `--secure`: Fail if the command is not executed on a secure cluster.
|
||||
@ -589,7 +589,7 @@ hdfs org.apache.hadoop.security.KDiag \
|
||||
--keylen 1024 \
|
||||
--keytab zk.service.keytab --principal zookeeper/devix.example.org@REALM
|
||||
```
|
||||
|
||||
|
||||
This attempts to to perform all diagnostics without failing early, load in
|
||||
the HDFS and YARN XML resources, require a minimum key length of 1024 bytes,
|
||||
and log in as the principal `zookeeper/devix.example.org@REALM`, whose key must be in
|
||||
|
@ -32,6 +32,8 @@ HADOOP_CLIENT_OPTS="-Xmx1g -Dhadoop.socks.server=localhost:4000" hadoop fs -ls /
|
||||
|
||||
will increase the memory and send this command via a SOCKS proxy server.
|
||||
|
||||
NOTE: If 'YARN_CLIENT_OPTS' is defined, it will replace 'HADOOP_CLIENT_OPTS' when commands are run with 'yarn'.
|
||||
|
||||
### `(command)_(subcommand)_OPTS`
|
||||
|
||||
It is also possible to set options on a per subcommand basis. This allows for one to create special options for particular cases. The first part of the pattern is the command being used, but all uppercase. The second part of the command is the subcommand being used. Then finally followed by the string `_OPT`.
|
||||
@ -103,13 +105,15 @@ In addition, daemons that run in an extra security mode also support `(command)_
|
||||
|
||||
Apache Hadoop provides a way to do a user check per-subcommand. While this method is easily circumvented and should not be considered a security-feature, it does provide a mechanism by which to prevent accidents. For example, setting `HDFS_NAMENODE_USER=hdfs` will make the `hdfs namenode` and `hdfs --daemon start namenode` commands verify that the user running the commands are the hdfs user by checking the `USER` environment variable. This also works for non-daemons. Setting `HADOOP_DISTCP_USER=jane` will verify that `USER` is set to `jane` before being allowed to execute the `hadoop distcp` command.
|
||||
|
||||
If a \_USER environment variable exists and commands are run with a privilege (e.g., as root; see hadoop_privilege_check in the API documentation), execution will switch to the specified user. For commands that support user account switching for security and therefore have a SECURE\_USER variable, the base \_USER variable needs to be the user that is expected to be used to switch to the SECURE\_USER account. For example:
|
||||
If a \_USER environment variable exists and commands are run with a privilege (e.g., as root; see hadoop_privilege_check in the API documentation), execution will switch to the specified user first. For commands that support user account switching for security reasons and therefore have a SECURE\_USER variable (see more below), the base \_USER variable needs to be the user that is expected to be used to switch to the SECURE\_USER account. For example:
|
||||
|
||||
```bash
|
||||
HDFS_DATANODE_USER=root
|
||||
HDFS_DATANODE_SECURE_USER=hdfs
|
||||
```
|
||||
|
||||
will force 'hdfs --daemon start datanode' to be root, but will eventually switch to the hdfs user after the privileged work has been completed.
|
||||
|
||||
Be aware that if the \-\-workers flag is used, the user switch happens *after* ssh is invoked. The multi-daemon start and stop commands in sbin will, however, switch (if appropriate) prior and will therefore use the keys of the specified \_USER.
|
||||
|
||||
## Developer and Advanced Administrator Environment
|
||||
@ -172,7 +176,7 @@ which will result in the output of:
|
||||
world I see you
|
||||
```
|
||||
|
||||
It is also possible to add the new subcommands to the usage output. The `hadoop_add_subcommand` function adds text to the usage output. Utilizing the standard HADOOP_SHELL_EXECNAME variable, we can limit which command gets our new function.
|
||||
It is also possible to add the new subcommands to the usage output. The `hadoop_add_subcommand` function adds text to the usage output. Utilizing the standard HADOOP\_SHELL\_EXECNAME variable, we can limit which command gets our new function.
|
||||
|
||||
```bash
|
||||
if [[ "${HADOOP_SHELL_EXECNAME}" = "yarn" ]]; then
|
||||
@ -191,12 +195,16 @@ function hdfs_subcommand_fetchdt
|
||||
|
||||
... will replace the existing `hdfs fetchdt` subcommand with a custom one.
|
||||
|
||||
Some key environment variables related to Dynamic Subcommands:
|
||||
Some key environment variables for Dynamic Subcommands:
|
||||
|
||||
* HADOOP\_CLASSNAME
|
||||
|
||||
This is the name of the Java class to use when program execution continues.
|
||||
|
||||
* HADOOP\_PRIV\_CLASSNAME
|
||||
|
||||
This is the name of the Java class to use when a daemon is expected to be run in a privileged mode. (See more below.)
|
||||
|
||||
* HADOOP\_SHELL\_EXECNAME
|
||||
|
||||
This is the name of the script that is being executed. It will be one of hadoop, hdfs, mapred, or yarn.
|
||||
@ -209,13 +217,13 @@ This is the subcommand that was passed on the command line.
|
||||
|
||||
This array contains the argument list after the Apache Hadoop common argument processing has taken place and is the same list that is passed to the subcommand function as arguments. For example, if `hadoop --debug subcmd 1 2 3` has been executed on the command line, then `${HADOOP_SUBCMD_ARGS[0]}` will be 1 and `hadoop_subcommand_subcmd` will also have $1 equal to 1. This array list MAY be modified by subcommand functions to add or delete values from the argument list for further processing.
|
||||
|
||||
* HADOOP\_SECURE\_CLASSNAME
|
||||
|
||||
If this subcommand runs a service that supports the secure mode, this variable should be set to the classname of the secure version.
|
||||
|
||||
* HADOOP\_SUBCMD\_SECURESERVICE
|
||||
|
||||
If this command should/will be executed as a secure daemon, set this to true.
|
||||
|
||||
* HADOOP\_SUBCMD\_SECUREUSER
|
||||
|
||||
If this command should/will be executed as a secure daemon, set the user name to be used.
|
||||
Setting this to true will force the subcommand to run in secure mode regardless of hadoop\_detect\_priv\_subcmd. It is expected that HADOOP\_SECURE\_USER will be set to the user that will be executing the final process. See more about secure mode.
|
||||
|
||||
* HADOOP\_SUBCMD\_SUPPORTDAEMONIZATION
|
||||
|
||||
@ -226,3 +234,12 @@ If this command can be executed as a daemon, set this to true.
|
||||
This is the full content of the command line, prior to any parsing done. It will contain flags such as `--debug`. It MAY NOT be manipulated.
|
||||
|
||||
The Apache Hadoop runtime facilities require functions exit if no further processing is required. For example, in the hello example above, Java and other facilities were not required so a simple `exit $?` was sufficient. However, if the function were to utilize `HADOOP_CLASSNAME`, then program execution must continue so that Java with the Apache Hadoop-specific parameters will be launched against the given Java class. Another example would be in the case of an unrecoverable error. It is the function's responsibility to print an appropriate message (preferably using the hadoop_error API call) and exit appropriately.
|
||||
|
||||
### Running with Privilege (Secure Mode)
|
||||
|
||||
Some daemons, such as the DataNode and the NFS gateway, may be run in a privileged mode. This means that they are expected to be launched as root and (by default) switched to another userid via jsvc. This allows for these daemons to grab a low, privileged port and then drop superuser privileges during normal execution. Running with privilege is also possible for 3rd parties utilizing Dynamic Subcommands. If the following are true:
|
||||
|
||||
* (command)\_(subcommand)\_SECURE\_USER environment variable is defined and points to a valid username
|
||||
* HADOOP\_SECURE\_CLASSNAME is defined and points to a valid Java class
|
||||
|
||||
then the shell scripts will attempt to run the class as a command with privilege as it would the built-ins. In general, users are expected to define the \_SECURE\_USER variable and developers define the \_CLASSNAME in their shell script bootstrap.
|
||||
|
@ -105,7 +105,7 @@ public void initializeMemberVariables() {
|
||||
// ADL properties are in a different subtree
|
||||
// - org.apache.hadoop.hdfs.web.ADLConfKeys
|
||||
xmlPrefixToSkipCompare.add("adl.");
|
||||
xmlPropsToSkipCompare.add("fs.adl.impl");
|
||||
xmlPrefixToSkipCompare.add("fs.adl.");
|
||||
xmlPropsToSkipCompare.add("fs.AbstractFileSystem.adl.impl");
|
||||
|
||||
// Azure properties are in a different class
|
||||
|
@ -126,4 +126,46 @@ public void testMkdirSlashHandling() throws Throwable {
|
||||
assertPathExists("check path existence without trailing slash failed",
|
||||
path("testmkdir/b"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMkdirsPopulatingAllNonexistentAncestors() throws IOException {
|
||||
describe("Verify mkdir will populate all its non-existent ancestors");
|
||||
final FileSystem fs = getFileSystem();
|
||||
|
||||
final Path parent = path("testMkdirsPopulatingAllNonexistentAncestors");
|
||||
assertTrue(fs.mkdirs(parent));
|
||||
assertPathExists(parent + " should exist before making nested dir", parent);
|
||||
|
||||
Path nested = path(parent + "/a/b/c/d/e/f/g/h/i/j/k/L");
|
||||
assertTrue(fs.mkdirs(nested));
|
||||
while (nested != null && !nested.equals(parent) && !nested.isRoot()) {
|
||||
assertPathExists(nested + " nested dir should exist", nested);
|
||||
nested = nested.getParent();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMkdirsDoesNotRemoveParentDirectories() throws IOException {
|
||||
describe("Verify mkdir will make its parent existent");
|
||||
final FileSystem fs = getFileSystem();
|
||||
|
||||
final Path parent = path("testMkdirsDoesNotRemoveParentDirectories");
|
||||
assertTrue(fs.mkdirs(parent));
|
||||
|
||||
Path p = parent;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
assertTrue(fs.mkdirs(p));
|
||||
assertPathExists(p + " should exist after mkdir(" + p + ")", p);
|
||||
p = path(p + "/dir-" + i);
|
||||
}
|
||||
|
||||
// After mkdirs(sub-directory), its parent directory still exists
|
||||
p = p.getParent();
|
||||
while (p != null && !p.equals(parent) && !p.isRoot()) {
|
||||
assertPathExists("Path " + p + " should exist", p);
|
||||
assertIsDirectory(p);
|
||||
p = p.getParent();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -18,12 +18,16 @@
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoderLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoderLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawEncoder;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
@ -70,20 +74,73 @@ public void testDedicatedRawCoderKey() {
|
||||
numDataUnit, numParityUnit);
|
||||
|
||||
String dummyFactName = "DummyNoneExistingFactory";
|
||||
// set the dummy factory to rs-legacy and create a raw coder
|
||||
// with rs, which is OK as the raw coder key is not used
|
||||
// set the dummy factory to raw coders then fail to create any rs raw coder.
|
||||
conf.set(CodecUtil.
|
||||
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY, dummyFactName);
|
||||
RawErasureEncoder encoder = CodecUtil.createRawEncoder(conf,
|
||||
ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(encoder instanceof RSRawEncoder);
|
||||
IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, dummyFactName);
|
||||
try {
|
||||
CodecUtil.createRawEncoder(conf,
|
||||
ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
|
||||
Assert.fail();
|
||||
} catch (Exception e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Fail to create raw erasure encoder with given codec: rs", e);
|
||||
}
|
||||
|
||||
// now create the raw coder with rs-legacy, which should throw exception
|
||||
conf.set(CodecUtil.
|
||||
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODERS_KEY, dummyFactName);
|
||||
try {
|
||||
CodecUtil.createRawEncoder(conf,
|
||||
ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
|
||||
Assert.fail();
|
||||
} catch (Exception e) {
|
||||
GenericTestUtils.assertExceptionContains("Failed to create raw coder", e);
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Fail to create raw erasure encoder with given codec: rs", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFallbackCoders() {
|
||||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
numDataUnit, numParityUnit);
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
|
||||
RSRawErasureCoderFactory.class.getCanonicalName() +
|
||||
"," + NativeRSRawErasureCoderFactory.class.getCanonicalName());
|
||||
// should return default raw coder of rs codec
|
||||
RawErasureEncoder encoder = CodecUtil.createRawEncoder(
|
||||
conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(encoder instanceof RSRawEncoder);
|
||||
RawErasureDecoder decoder = CodecUtil.createRawDecoder(
|
||||
conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(decoder instanceof RSRawDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLegacyCodecFallback() {
|
||||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
numDataUnit, numParityUnit);
|
||||
// should return default raw coder of rs-legacy codec
|
||||
RawErasureEncoder encoder = CodecUtil.createRawEncoder(
|
||||
conf, ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(encoder instanceof RSRawEncoderLegacy);
|
||||
RawErasureDecoder decoder = CodecUtil.createRawDecoder(
|
||||
conf, ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(decoder instanceof RSRawDecoderLegacy);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIgnoreInvalidCodec() {
|
||||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
numDataUnit, numParityUnit);
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_XOR_RAWCODERS_KEY,
|
||||
"invalid-codec," +
|
||||
"org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory");
|
||||
// should return second coder specified by IO_ERASURECODE_CODEC_CODERS
|
||||
RawErasureEncoder encoder = CodecUtil.createRawEncoder(
|
||||
conf, ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(encoder instanceof XORRawEncoder);
|
||||
RawErasureDecoder decoder = CodecUtil.createRawDecoder(
|
||||
conf, ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
|
||||
Assert.assertTrue(decoder instanceof XORRawDecoder);
|
||||
}
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ public void testCodingDirectBufferWithConf_10x4_erasing_d0() {
|
||||
* This tests if the configuration items work or not.
|
||||
*/
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
|
||||
RSRawErasureCoderFactory.class.getCanonicalName());
|
||||
prepare(conf, 10, 4, new int[]{0}, new int[0]);
|
||||
|
||||
|
@ -57,7 +57,7 @@ public void testCodingDirectBufferWithConf_10x4_erasing_d0() {
|
||||
* This tests if the configuration items work or not.
|
||||
*/
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
|
||||
conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
|
||||
RSRawErasureCoderFactory.class.getCanonicalName());
|
||||
prepare(conf, 10, 4, new int[]{0}, new int[0]);
|
||||
|
||||
|
@ -39,6 +39,7 @@ static class MyMetrics {
|
||||
@Metric({"Counter2", "Counter2 desc"}) MutableCounterLong c2;
|
||||
@Metric MutableGaugeInt g1, g2;
|
||||
@Metric("g3 desc") MutableGaugeLong g3;
|
||||
@Metric("g4 desc") MutableGaugeFloat g4;
|
||||
@Metric MutableRate r1;
|
||||
@Metric MutableStat s1;
|
||||
@Metric MutableRates rs1;
|
||||
@ -53,6 +54,7 @@ static class MyMetrics {
|
||||
metrics.g1.incr();
|
||||
metrics.g2.incr();
|
||||
metrics.g3.incr();
|
||||
metrics.g4.incr();
|
||||
metrics.r1.add(1);
|
||||
metrics.s1.add(1);
|
||||
metrics.rs1.add("rs1", 1);
|
||||
@ -64,6 +66,7 @@ static class MyMetrics {
|
||||
verify(rb).addGauge(info("G1", "G1"), 1);
|
||||
verify(rb).addGauge(info("G2", "G2"), 1);
|
||||
verify(rb).addGauge(info("G3", "g3 desc"), 1L);
|
||||
verify(rb).addGauge(info("G4", "g4 desc"), 1f);
|
||||
verify(rb).addCounter(info("R1NumOps", "Number of ops for r1"), 1L);
|
||||
verify(rb).addGauge(info("R1AvgTime", "Average time for r1"), 1.0);
|
||||
verify(rb).addCounter(info("S1NumOps", "Number of ops for s1"), 1L);
|
||||
|
@ -42,13 +42,15 @@ public class TestMetricsRegistry {
|
||||
r.newCounter("c2", "c2 desc", 2L);
|
||||
r.newGauge("g1", "g1 desc", 3);
|
||||
r.newGauge("g2", "g2 desc", 4L);
|
||||
r.newGauge("g3", "g3 desc", 5f);
|
||||
r.newStat("s1", "s1 desc", "ops", "time");
|
||||
|
||||
assertEquals("num metrics in registry", 5, r.metrics().size());
|
||||
assertEquals("num metrics in registry", 6, r.metrics().size());
|
||||
assertTrue("c1 found", r.get("c1") instanceof MutableCounterInt);
|
||||
assertTrue("c2 found", r.get("c2") instanceof MutableCounterLong);
|
||||
assertTrue("g1 found", r.get("g1") instanceof MutableGaugeInt);
|
||||
assertTrue("g2 found", r.get("g2") instanceof MutableGaugeLong);
|
||||
assertTrue("g3 found", r.get("g3") instanceof MutableGaugeFloat);
|
||||
assertTrue("s1 found", r.get("s1") instanceof MutableStat);
|
||||
|
||||
expectMetricsException("Metric name c1 already exists", new Runnable() {
|
||||
|
@ -59,6 +59,7 @@ public class TestMutableMetrics {
|
||||
registry.newCounter("c2", "long counter", 2L);
|
||||
registry.newGauge("g1", "int gauge", 3);
|
||||
registry.newGauge("g2", "long gauge", 4L);
|
||||
registry.newGauge("g3", "float gauge", 5f);
|
||||
registry.newStat("s1", "stat", "Ops", "Time", true).add(0);
|
||||
registry.newRate("s2", "stat", false).add(0);
|
||||
|
||||
@ -74,6 +75,7 @@ public class TestMutableMetrics {
|
||||
verify(mb).addCounter(info("c2", "long counter"), 2L);
|
||||
verify(mb).addGauge(info("g1", "int gauge"), 3);
|
||||
verify(mb).addGauge(info("g2", "long gauge"), 4L);
|
||||
verify(mb).addGauge(info("g3", "float gauge"), 5f);
|
||||
verify(mb).addCounter(info("S1NumOps", "Number of ops for stat"), 1L);
|
||||
verify(mb).addGauge(eq(info("S1AvgTime", "Average time for stat")),
|
||||
eq(0.0, EPSILON));
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
@ -28,25 +29,39 @@
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class FakeTimer extends Timer {
|
||||
private long nowMillis;
|
||||
private long nowNanos;
|
||||
|
||||
/** Constructs a FakeTimer with a non-zero value */
|
||||
public FakeTimer() {
|
||||
nowMillis = 1000; // Initialize with a non-trivial value.
|
||||
// Initialize with a non-trivial value.
|
||||
nowNanos = TimeUnit.MILLISECONDS.toNanos(1000);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long now() {
|
||||
return nowMillis;
|
||||
return TimeUnit.NANOSECONDS.toMillis(nowNanos);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long monotonicNow() {
|
||||
return nowMillis;
|
||||
return TimeUnit.NANOSECONDS.toMillis(nowNanos);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long monotonicNowNanos() {
|
||||
return nowNanos;
|
||||
}
|
||||
|
||||
/** Increases the time by milliseconds */
|
||||
public void advance(long advMillis) {
|
||||
nowMillis += advMillis;
|
||||
nowNanos += TimeUnit.MILLISECONDS.toNanos(advMillis);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increases the time by nanoseconds.
|
||||
* @param advNanos Nanoseconds to advance by.
|
||||
*/
|
||||
public void advanceNanos(long advNanos) {
|
||||
nowNanos += advNanos;
|
||||
}
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ private static class LightWeightCacheTestCase implements GSet<IntEntry, IntEntry
|
||||
int iterate_count = 0;
|
||||
int contain_count = 0;
|
||||
|
||||
private long currentTestTime = ran.nextInt();
|
||||
private FakeTimer fakeTimer = new FakeTimer();
|
||||
|
||||
LightWeightCacheTestCase(int tablelength, int sizeLimit,
|
||||
long creationExpirationPeriod, long accessExpirationPeriod,
|
||||
@ -230,12 +230,7 @@ private static class LightWeightCacheTestCase implements GSet<IntEntry, IntEntry
|
||||
|
||||
data = new IntData(datasize, modulus);
|
||||
cache = new LightWeightCache<IntEntry, IntEntry>(tablelength, sizeLimit,
|
||||
creationExpirationPeriod, 0, new LightWeightCache.Clock() {
|
||||
@Override
|
||||
long currentTime() {
|
||||
return currentTestTime;
|
||||
}
|
||||
});
|
||||
creationExpirationPeriod, 0, fakeTimer);
|
||||
|
||||
Assert.assertEquals(0, cache.size());
|
||||
}
|
||||
@ -247,7 +242,7 @@ private boolean containsTest(IntEntry key) {
|
||||
} else {
|
||||
final IntEntry h = hashMap.remove(key);
|
||||
if (h != null) {
|
||||
Assert.assertTrue(cache.isExpired(h, currentTestTime));
|
||||
Assert.assertTrue(cache.isExpired(h, fakeTimer.monotonicNowNanos()));
|
||||
}
|
||||
}
|
||||
return c;
|
||||
@ -266,7 +261,7 @@ private IntEntry getTest(IntEntry key) {
|
||||
} else {
|
||||
final IntEntry h = hashMap.remove(key);
|
||||
if (h != null) {
|
||||
Assert.assertTrue(cache.isExpired(h, currentTestTime));
|
||||
Assert.assertTrue(cache.isExpired(h, fakeTimer.monotonicNowNanos()));
|
||||
}
|
||||
}
|
||||
return c;
|
||||
@ -286,7 +281,7 @@ private IntEntry putTest(IntEntry entry) {
|
||||
final IntEntry h = hashMap.put(entry);
|
||||
if (h != null && h != entry) {
|
||||
// if h == entry, its expiration time is already updated
|
||||
Assert.assertTrue(cache.isExpired(h, currentTestTime));
|
||||
Assert.assertTrue(cache.isExpired(h, fakeTimer.monotonicNowNanos()));
|
||||
}
|
||||
}
|
||||
return c;
|
||||
@ -305,7 +300,7 @@ private IntEntry removeTest(IntEntry key) {
|
||||
} else {
|
||||
final IntEntry h = hashMap.remove(key);
|
||||
if (h != null) {
|
||||
Assert.assertTrue(cache.isExpired(h, currentTestTime));
|
||||
Assert.assertTrue(cache.isExpired(h, fakeTimer.monotonicNowNanos()));
|
||||
}
|
||||
}
|
||||
return c;
|
||||
@ -339,7 +334,7 @@ boolean tossCoin() {
|
||||
}
|
||||
|
||||
void check() {
|
||||
currentTestTime += ran.nextInt() & 0x3;
|
||||
fakeTimer.advanceNanos(ran.nextInt() & 0x3);
|
||||
|
||||
//test size
|
||||
sizeTest();
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_get_verify_uservar" {
|
||||
run hadoop_get_verify_uservar cool program
|
||||
@test "hadoop_build_custom_subcmd_var" {
|
||||
run hadoop_build_custom_subcmd_var cool program USER
|
||||
[ "${output}" = "COOL_PROGRAM_USER" ]
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_detect_priv_subcmd (no classname) " {
|
||||
run hadoop_detect_priv_subcmd test app
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_detect_priv_subcmd (classname; no user) " {
|
||||
export HADOOP_SECURE_CLASSNAME=fake
|
||||
run hadoop_detect_priv_subcmd test app
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_detect_priv_subcmd (classname; user) " {
|
||||
export HADOOP_SECURE_CLASSNAME=fake
|
||||
export TEST_APP_SECURE_USER=test
|
||||
run hadoop_detect_priv_subcmd test app
|
||||
[ "${status}" = "0" ]
|
||||
}
|
@ -15,39 +15,39 @@
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_verify_user (hadoop: no setting)" {
|
||||
run hadoop_verify_user hadoop test
|
||||
@test "hadoop_verify_user_perm (hadoop: no setting)" {
|
||||
run hadoop_verify_user_perm hadoop test
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user (yarn: no setting)" {
|
||||
run hadoop_verify_user yarn test
|
||||
@test "hadoop_verify_user_perm (yarn: no setting)" {
|
||||
run hadoop_verify_user_perm yarn test
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user (hadoop: allow)" {
|
||||
@test "hadoop_verify_user_perm (hadoop: allow)" {
|
||||
HADOOP_TEST_USER=${USER}
|
||||
run hadoop_verify_user hadoop test
|
||||
run hadoop_verify_user_perm hadoop test
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user (yarn: allow)" {
|
||||
@test "hadoop_verify_user_perm (yarn: allow)" {
|
||||
YARN_TEST_USER=${USER}
|
||||
run hadoop_verify_user yarn test
|
||||
run hadoop_verify_user_perm yarn test
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
# colon isn't a valid username, so let's use it
|
||||
# this should fail regardless of who the user is
|
||||
# that is running the test code
|
||||
@test "hadoop_verify_user (hadoop: disallow)" {
|
||||
@test "hadoop_verify_user_perm (hadoop: disallow)" {
|
||||
HADOOP_TEST_USER=:
|
||||
run hadoop_verify_user hadoop test
|
||||
run hadoop_verify_user_perm hadoop test
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user (yarn: disallow)" {
|
||||
@test "hadoop_verify_user_perm (yarn: disallow)" {
|
||||
YARN_TEST_USER=:
|
||||
run hadoop_verify_user yarn test
|
||||
run hadoop_verify_user_perm yarn test
|
||||
[ "${status}" = "1" ]
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_verify_user_resolves (bad: null)" {
|
||||
run hadoop_verify_user_resolves
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user_resolves (bad: var string)" {
|
||||
run hadoop_verify_user_resolves PrObAbLyWiLlNoTeXiSt
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user_resolves (bad: number as var)" {
|
||||
run hadoop_verify_user_resolves 501
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user_resolves (good: name)" {
|
||||
myvar=$(id -u -n)
|
||||
run hadoop_verify_user_resolves myvar
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user_resolves (skip: number)" {
|
||||
skip "id on uids is not platform consistent"
|
||||
myvar=1
|
||||
run hadoop_verify_user_resolves myvar
|
||||
[ "${status}" = "0" ]
|
||||
}
|
@ -67,6 +67,7 @@
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.CacheFlag;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
@ -160,6 +161,7 @@
|
||||
import org.apache.hadoop.net.DNS;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
@ -197,6 +199,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||
public static final Logger LOG = LoggerFactory.getLogger(DFSClient.class);
|
||||
// 1 hour
|
||||
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L;
|
||||
private static final String DFS_KMS_PREFIX = "dfs-kms-";
|
||||
|
||||
private final Configuration conf;
|
||||
private final Tracer tracer;
|
||||
@ -214,7 +217,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||
final SocketFactory socketFactory;
|
||||
final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
|
||||
private final FileSystem.Statistics stats;
|
||||
private final String authority;
|
||||
private final URI namenodeUri;
|
||||
private final Random r = new Random();
|
||||
private SocketAddress[] localInterfaceAddrs;
|
||||
private DataEncryptionKey encryptionKey;
|
||||
@ -228,6 +231,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||
private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
|
||||
private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
|
||||
private final int smallBufferSize;
|
||||
private URI keyProviderUri = null;
|
||||
|
||||
public DfsClientConf getConf() {
|
||||
return dfsClientConf;
|
||||
@ -298,7 +302,7 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
|
||||
|
||||
this.ugi = UserGroupInformation.getCurrentUser();
|
||||
|
||||
this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
|
||||
this.namenodeUri = nameNodeUri;
|
||||
this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" +
|
||||
ThreadLocalRandom.current().nextInt() + "_" +
|
||||
Thread.currentThread().getId();
|
||||
@ -454,7 +458,8 @@ void checkOpen() throws IOException {
|
||||
* be returned until all output streams are closed.
|
||||
*/
|
||||
public LeaseRenewer getLeaseRenewer() {
|
||||
return LeaseRenewer.getInstance(authority, ugi, this);
|
||||
return LeaseRenewer.getInstance(
|
||||
namenodeUri != null ? namenodeUri.getAuthority() : "null", ugi, this);
|
||||
}
|
||||
|
||||
/** Get a lease and start automatic renewal */
|
||||
@ -1185,13 +1190,31 @@ public DFSOutputStream create(String src, FsPermission permission,
|
||||
long blockSize, Progressable progress, int buffersize,
|
||||
ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes)
|
||||
throws IOException {
|
||||
return create(src, permission, flag, createParent, replication, blockSize,
|
||||
progress, buffersize, checksumOpt, favoredNodes, null);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
|
||||
* Progressable, int, ChecksumOpt, InetSocketAddress[])} with the addition of
|
||||
* ecPolicyName that is used to specify a specific erasure coding policy
|
||||
* instead of inheriting any policy from this new file's parent directory.
|
||||
* This policy will be persisted in HDFS. A value of null means inheriting
|
||||
* parent groups' whatever policy.
|
||||
*/
|
||||
public DFSOutputStream create(String src, FsPermission permission,
|
||||
EnumSet<CreateFlag> flag, boolean createParent, short replication,
|
||||
long blockSize, Progressable progress, int buffersize,
|
||||
ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes,
|
||||
String ecPolicyName) throws IOException {
|
||||
checkOpen();
|
||||
final FsPermission masked = applyUMask(permission);
|
||||
LOG.debug("{}: masked={}", src, masked);
|
||||
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
|
||||
src, masked, flag, createParent, replication, blockSize, progress,
|
||||
dfsClientConf.createChecksum(checksumOpt),
|
||||
getFavoredNodesStr(favoredNodes));
|
||||
getFavoredNodesStr(favoredNodes), ecPolicyName);
|
||||
beginFileLease(result.getFileId(), result);
|
||||
return result;
|
||||
}
|
||||
@ -1244,7 +1267,8 @@ public DFSOutputStream primitiveCreate(String src, FsPermission absPermission,
|
||||
if (result == null) {
|
||||
DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
|
||||
result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
|
||||
flag, createParent, replication, blockSize, progress, checksum, null);
|
||||
flag, createParent, replication, blockSize, progress, checksum,
|
||||
null, null);
|
||||
}
|
||||
beginFileLease(result.getFileId(), result);
|
||||
return result;
|
||||
@ -2851,8 +2875,66 @@ DFSHedgedReadMetrics getHedgedReadMetrics() {
|
||||
return HEDGED_READ_METRIC;
|
||||
}
|
||||
|
||||
public KeyProvider getKeyProvider() {
|
||||
return clientContext.getKeyProviderCache().get(conf);
|
||||
/**
|
||||
* Returns a key to map namenode uri to key provider uri.
|
||||
* Tasks will lookup this key to find key Provider.
|
||||
*/
|
||||
public Text getKeyProviderMapKey() {
|
||||
return new Text(DFS_KMS_PREFIX + namenodeUri.getScheme()
|
||||
+"://" + namenodeUri.getAuthority());
|
||||
}
|
||||
|
||||
/**
|
||||
* The key provider uri is searched in the following order.
|
||||
* 1. If there is a mapping in Credential's secrets map for namenode uri.
|
||||
* 2. From namenode getServerDefaults rpc.
|
||||
* 3. Finally fallback to local conf.
|
||||
* @return keyProviderUri if found from either of above 3 cases,
|
||||
* null otherwise
|
||||
* @throws IOException
|
||||
*/
|
||||
URI getKeyProviderUri() throws IOException {
|
||||
if (keyProviderUri != null) {
|
||||
return keyProviderUri;
|
||||
}
|
||||
|
||||
// Lookup the secret in credentials object for namenodeuri.
|
||||
Credentials credentials = ugi.getCredentials();
|
||||
byte[] keyProviderUriBytes = credentials.getSecretKey(getKeyProviderMapKey());
|
||||
if(keyProviderUriBytes != null) {
|
||||
keyProviderUri =
|
||||
URI.create(DFSUtilClient.bytes2String(keyProviderUriBytes));
|
||||
return keyProviderUri;
|
||||
}
|
||||
|
||||
// Query the namenode for the key provider uri.
|
||||
FsServerDefaults serverDefaults = getServerDefaults();
|
||||
if (serverDefaults.getKeyProviderUri() != null) {
|
||||
if (!serverDefaults.getKeyProviderUri().isEmpty()) {
|
||||
keyProviderUri = URI.create(serverDefaults.getKeyProviderUri());
|
||||
}
|
||||
return keyProviderUri;
|
||||
}
|
||||
|
||||
// Last thing is to trust its own conf to be backwards compatible.
|
||||
String keyProviderUriStr = conf.getTrimmed(
|
||||
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
|
||||
if (keyProviderUriStr != null && !keyProviderUriStr.isEmpty()) {
|
||||
keyProviderUri = URI.create(keyProviderUriStr);
|
||||
}
|
||||
return keyProviderUri;
|
||||
}
|
||||
|
||||
public KeyProvider getKeyProvider() throws IOException {
|
||||
return clientContext.getKeyProviderCache().get(conf, getKeyProviderUri());
|
||||
}
|
||||
|
||||
/*
|
||||
* Should be used only for testing.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public void setKeyProviderUri(URI providerUri) {
|
||||
this.keyProviderUri = providerUri;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@ -2862,11 +2944,10 @@ public void setKeyProvider(KeyProvider provider) {
|
||||
|
||||
/**
|
||||
* Probe for encryption enabled on this filesystem.
|
||||
* See {@link DFSUtilClient#isHDFSEncryptionEnabled(Configuration)}
|
||||
* @return true if encryption is enabled
|
||||
*/
|
||||
public boolean isHDFSEncryptionEnabled() {
|
||||
return DFSUtilClient.isHDFSEncryptionEnabled(this.conf);
|
||||
public boolean isHDFSEncryptionEnabled() throws IOException{
|
||||
return getKeyProviderUri() != null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -50,6 +50,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
@ -122,6 +123,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||
private final EnumSet<AddBlockFlag> addBlockFlags;
|
||||
protected final AtomicReference<CachingStrategy> cachingStrategy;
|
||||
private FileEncryptionInfo fileEncryptionInfo;
|
||||
private int writePacketSize;
|
||||
|
||||
/** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
|
||||
protected DFSPacket createPacket(int packetSize, int chunksPerPkt,
|
||||
@ -202,6 +204,8 @@ private DFSOutputStream(DFSClient dfsClient, String src,
|
||||
+"{}", src);
|
||||
}
|
||||
|
||||
initWritePacketSize();
|
||||
|
||||
this.bytesPerChecksum = checksum.getBytesPerChecksum();
|
||||
if (bytesPerChecksum <= 0) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
@ -216,6 +220,21 @@ private DFSOutputStream(DFSClient dfsClient, String src,
|
||||
this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the configured writePacketSize never exceeds
|
||||
* PacketReceiver.MAX_PACKET_SIZE.
|
||||
*/
|
||||
private void initWritePacketSize() {
|
||||
writePacketSize = dfsClient.getConf().getWritePacketSize();
|
||||
if (writePacketSize > PacketReceiver.MAX_PACKET_SIZE) {
|
||||
LOG.warn(
|
||||
"Configured write packet exceeds {} bytes as max,"
|
||||
+ " using {} bytes.",
|
||||
PacketReceiver.MAX_PACKET_SIZE, PacketReceiver.MAX_PACKET_SIZE);
|
||||
writePacketSize = PacketReceiver.MAX_PACKET_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
/** Construct a new output stream for creating a file. */
|
||||
protected DFSOutputStream(DFSClient dfsClient, String src,
|
||||
HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress,
|
||||
@ -236,7 +255,8 @@ protected DFSOutputStream(DFSClient dfsClient, String src,
|
||||
static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
|
||||
FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize, Progressable progress,
|
||||
DataChecksum checksum, String[] favoredNodes) throws IOException {
|
||||
DataChecksum checksum, String[] favoredNodes, String ecPolicyName)
|
||||
throws IOException {
|
||||
try (TraceScope ignored =
|
||||
dfsClient.newPathTraceScope("newStreamForCreate", src)) {
|
||||
HdfsFileStatus stat = null;
|
||||
@ -250,7 +270,7 @@ static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
|
||||
try {
|
||||
stat = dfsClient.namenode.create(src, masked, dfsClient.clientName,
|
||||
new EnumSetWritable<>(flag), createParent, replication,
|
||||
blockSize, SUPPORTED_CRYPTO_VERSIONS);
|
||||
blockSize, SUPPORTED_CRYPTO_VERSIONS, ecPolicyName);
|
||||
break;
|
||||
} catch (RemoteException re) {
|
||||
IOException e = re.unwrapRemoteException(
|
||||
@ -489,12 +509,28 @@ protected void adjustChunkBoundary() {
|
||||
}
|
||||
|
||||
if (!getStreamer().getAppendChunk()) {
|
||||
int psize = Math.min((int)(blockSize- getStreamer().getBytesCurBlock()),
|
||||
dfsClient.getConf().getWritePacketSize());
|
||||
final int psize = (int) Math
|
||||
.min(blockSize - getStreamer().getBytesCurBlock(), writePacketSize);
|
||||
computePacketChunkSize(psize, bytesPerChecksum);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Used in test only.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void setAppendChunk(final boolean appendChunk) {
|
||||
getStreamer().setAppendChunk(appendChunk);
|
||||
}
|
||||
|
||||
/**
|
||||
* Used in test only.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void setBytesCurBlock(final long bytesCurBlock) {
|
||||
getStreamer().setBytesCurBlock(bytesCurBlock);
|
||||
}
|
||||
|
||||
/**
|
||||
* if encountering a block boundary, send an empty packet to
|
||||
* indicate the end of block and reset bytesCurBlock.
|
||||
|
@ -169,6 +169,19 @@ static String addSuffix(String key, String suffix) {
|
||||
return key + "." + suffix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of InetSocketAddress corresponding to HA NN RPC addresses from
|
||||
* the configuration.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @return list of InetSocketAddresses
|
||||
*/
|
||||
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
|
||||
Configuration conf) {
|
||||
return DFSUtilClient.getAddresses(conf, null,
|
||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
|
||||
* the configuration.
|
||||
|
@ -100,6 +100,8 @@
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
/****************************************************************
|
||||
* Implementation of the abstract FileSystem for the DFS system.
|
||||
* This object is the way end-user code interacts with a Hadoop
|
||||
@ -456,13 +458,18 @@ public FSDataOutputStream next(final FileSystem fs, final Path p)
|
||||
* at the creation time only. And with favored nodes, blocks will be pinned
|
||||
* on the datanodes to prevent balancing move the block. HDFS could move the
|
||||
* blocks during replication, to move the blocks from favored nodes. A value
|
||||
* of null means no favored nodes for this create
|
||||
* of null means no favored nodes for this create.
|
||||
* Another addition is ecPolicyName. A non-null ecPolicyName specifies an
|
||||
* explicit erasure coding policy for this file, overriding the inherited
|
||||
* policy. A null ecPolicyName means the file will inherit its EC policy from
|
||||
* an ancestor (the default).
|
||||
*/
|
||||
private HdfsDataOutputStream create(final Path f,
|
||||
final FsPermission permission, EnumSet<CreateFlag> flag,
|
||||
final FsPermission permission, final EnumSet<CreateFlag> flag,
|
||||
final int bufferSize, final short replication, final long blockSize,
|
||||
final Progressable progress, final ChecksumOpt checksumOpt,
|
||||
final InetSocketAddress[] favoredNodes) throws IOException {
|
||||
final InetSocketAddress[] favoredNodes, final String ecPolicyName)
|
||||
throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.CREATE);
|
||||
Path absF = fixRelativePart(f);
|
||||
@ -471,7 +478,7 @@ private HdfsDataOutputStream create(final Path f,
|
||||
public HdfsDataOutputStream doCall(final Path p) throws IOException {
|
||||
final DFSOutputStream out = dfs.create(getPathName(f), permission,
|
||||
flag, true, replication, blockSize, progress, bufferSize,
|
||||
checksumOpt, favoredNodes);
|
||||
checksumOpt, favoredNodes, ecPolicyName);
|
||||
return dfs.createWrappedOutputStream(out, statistics);
|
||||
}
|
||||
@Override
|
||||
@ -480,7 +487,7 @@ public HdfsDataOutputStream next(final FileSystem fs, final Path p)
|
||||
if (fs instanceof DistributedFileSystem) {
|
||||
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
|
||||
return myDfs.create(p, permission, flag, bufferSize, replication,
|
||||
blockSize, progress, checksumOpt, favoredNodes);
|
||||
blockSize, progress, checksumOpt, favoredNodes, ecPolicyName);
|
||||
}
|
||||
throw new UnsupportedOperationException("Cannot create with" +
|
||||
" favoredNodes through a symlink to a non-DistributedFileSystem: "
|
||||
@ -2409,12 +2416,15 @@ public Void next(final FileSystem fs, final Path p)
|
||||
public Token<?>[] addDelegationTokens(
|
||||
final String renewer, Credentials credentials) throws IOException {
|
||||
Token<?>[] tokens = super.addDelegationTokens(renewer, credentials);
|
||||
if (dfs.isHDFSEncryptionEnabled()) {
|
||||
URI keyProviderUri = dfs.getKeyProviderUri();
|
||||
if (keyProviderUri != null) {
|
||||
KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension =
|
||||
KeyProviderDelegationTokenExtension.
|
||||
createKeyProviderDelegationTokenExtension(dfs.getKeyProvider());
|
||||
Token<?>[] kpTokens = keyProviderDelegationTokenExtension.
|
||||
addDelegationTokens(renewer, credentials);
|
||||
credentials.addSecretKey(dfs.getKeyProviderMapKey(),
|
||||
DFSUtilClient.string2Bytes(keyProviderUri.toString()));
|
||||
if (tokens != null && kpTokens != null) {
|
||||
Token<?>[] all = new Token<?>[tokens.length + kpTokens.length];
|
||||
System.arraycopy(tokens, 0, all, 0, tokens.length);
|
||||
@ -2551,7 +2561,13 @@ public Void next(final FileSystem fs, final Path p) throws IOException {
|
||||
*/
|
||||
@Override
|
||||
public Path getTrashRoot(Path path) {
|
||||
if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
|
||||
try {
|
||||
if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
|
||||
return super.getTrashRoot(path);
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
DFSClient.LOG.warn("Exception while checking whether encryption zone is "
|
||||
+ "supported", ioe);
|
||||
return super.getTrashRoot(path);
|
||||
}
|
||||
|
||||
@ -2636,6 +2652,7 @@ public static class HdfsDataOutputStreamBuilder
|
||||
extends FSDataOutputStreamBuilder {
|
||||
private final DistributedFileSystem dfs;
|
||||
private InetSocketAddress[] favoredNodes = null;
|
||||
private String ecPolicyName = null;
|
||||
|
||||
public HdfsDataOutputStreamBuilder(DistributedFileSystem dfs, Path path) {
|
||||
super(dfs, path);
|
||||
@ -2647,17 +2664,29 @@ protected InetSocketAddress[] getFavoredNodes() {
|
||||
}
|
||||
|
||||
public HdfsDataOutputStreamBuilder setFavoredNodes(
|
||||
final InetSocketAddress[] nodes) {
|
||||
@Nonnull final InetSocketAddress[] nodes) {
|
||||
Preconditions.checkNotNull(nodes);
|
||||
favoredNodes = nodes.clone();
|
||||
return this;
|
||||
}
|
||||
|
||||
protected String getEcPolicyName() {
|
||||
return ecPolicyName;
|
||||
}
|
||||
|
||||
public HdfsDataOutputStreamBuilder setEcPolicyName(
|
||||
@Nonnull final String policyName) {
|
||||
Preconditions.checkNotNull(policyName);
|
||||
ecPolicyName = policyName;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HdfsDataOutputStream build() throws IOException {
|
||||
return dfs.create(getPath(), getPermission(), getFlags(),
|
||||
getBufferSize(), getReplication(), getBlockSize(),
|
||||
getProgress(), getChecksumOpt(), getFavoredNodes());
|
||||
getProgress(), getChecksumOpt(), getFavoredNodes(),
|
||||
getEcPolicyName());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,15 +20,29 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX;
|
||||
import static org.apache.hadoop.security.SecurityUtil.buildTokenService;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class HAUtilClient {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HAUtilClient.class);
|
||||
|
||||
private static final DelegationTokenSelector tokenSelector =
|
||||
new DelegationTokenSelector();
|
||||
|
||||
/**
|
||||
* @return true if the given nameNodeUri appears to be a logical URI.
|
||||
*/
|
||||
@ -92,4 +106,45 @@ public static URI getServiceUriFromToken(final String scheme, Token<?> token) {
|
||||
public static boolean isTokenForLogicalUri(Token<?> token) {
|
||||
return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate a delegation token associated with the given HA cluster URI, and if
|
||||
* one is found, clone it to also represent the underlying namenode address.
|
||||
* @param ugi the UGI to modify
|
||||
* @param haUri the logical URI for the cluster
|
||||
* @param nnAddrs collection of NNs in the cluster to which the token
|
||||
* applies
|
||||
*/
|
||||
public static void cloneDelegationTokenForLogicalUri(
|
||||
UserGroupInformation ugi, URI haUri,
|
||||
Collection<InetSocketAddress> nnAddrs) {
|
||||
// this cloning logic is only used by hdfs
|
||||
Text haService = HAUtilClient.buildTokenServiceForLogicalUri(haUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME);
|
||||
Token<DelegationTokenIdentifier> haToken =
|
||||
tokenSelector.selectToken(haService, ugi.getTokens());
|
||||
if (haToken != null) {
|
||||
for (InetSocketAddress singleNNAddr : nnAddrs) {
|
||||
// this is a minor hack to prevent physical HA tokens from being
|
||||
// exposed to the user via UGI.getCredentials(), otherwise these
|
||||
// cloned tokens may be inadvertently propagated to jobs
|
||||
Token<DelegationTokenIdentifier> specificToken =
|
||||
haToken.privateClone(buildTokenService(singleNNAddr));
|
||||
Text alias = new Text(
|
||||
HAUtilClient.buildTokenServicePrefixForLogicalUri(
|
||||
HdfsConstants.HDFS_URI_SCHEME)
|
||||
+ "//" + specificToken.getService());
|
||||
ugi.addToken(alias, specificToken);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Mapped HA service delegation token for logical URI " +
|
||||
haUri + " to namenode " + singleNNAddr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("No HA service delegation token found for logical URI " +
|
||||
haUri);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.util.KMSUtil;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.cache.Cache;
|
||||
@ -66,29 +67,29 @@ public void onRemoval(
|
||||
.build();
|
||||
}
|
||||
|
||||
public KeyProvider get(final Configuration conf) {
|
||||
URI kpURI = createKeyProviderURI(conf);
|
||||
if (kpURI == null) {
|
||||
public KeyProvider get(final Configuration conf,
|
||||
final URI serverProviderUri) {
|
||||
if (serverProviderUri == null) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return cache.get(kpURI, new Callable<KeyProvider>() {
|
||||
return cache.get(serverProviderUri, new Callable<KeyProvider>() {
|
||||
@Override
|
||||
public KeyProvider call() throws Exception {
|
||||
return DFSUtilClient.createKeyProvider(conf);
|
||||
return KMSUtil.createKeyProviderFromUri(conf, serverProviderUri);
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
LOG.error("Could not create KeyProvider for DFSClient !!", e.getCause());
|
||||
LOG.error("Could not create KeyProvider for DFSClient !!", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private URI createKeyProviderURI(Configuration conf) {
|
||||
final String providerUriStr = conf.getTrimmed(
|
||||
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, "");
|
||||
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
|
||||
// No provider set in conf
|
||||
if (providerUriStr.isEmpty()) {
|
||||
if (providerUriStr == null || providerUriStr.isEmpty()) {
|
||||
LOG.error("Could not find uri with key ["
|
||||
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH
|
||||
+ "] to create a keyProvider !!");
|
||||
|
@ -28,6 +28,8 @@
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@ -212,6 +214,14 @@ public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
|
||||
public static <T> AbstractNNFailoverProxyProvider<T> createFailoverProxyProvider(
|
||||
Configuration conf, URI nameNodeUri, Class<T> xface, boolean checkPort,
|
||||
AtomicBoolean fallbackToSimpleAuth) throws IOException {
|
||||
return createFailoverProxyProvider(conf, nameNodeUri, xface, checkPort,
|
||||
fallbackToSimpleAuth, new ClientHAProxyFactory<T>());
|
||||
}
|
||||
|
||||
protected static <T> AbstractNNFailoverProxyProvider<T> createFailoverProxyProvider(
|
||||
Configuration conf, URI nameNodeUri, Class<T> xface, boolean checkPort,
|
||||
AtomicBoolean fallbackToSimpleAuth, HAProxyFactory<T> proxyFactory)
|
||||
throws IOException {
|
||||
Class<FailoverProxyProvider<T>> failoverProxyProviderClass = null;
|
||||
AbstractNNFailoverProxyProvider<T> providerNN;
|
||||
try {
|
||||
@ -223,9 +233,10 @@ public static <T> AbstractNNFailoverProxyProvider<T> createFailoverProxyProvider
|
||||
}
|
||||
// Create a proxy provider instance.
|
||||
Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
|
||||
.getConstructor(Configuration.class, URI.class, Class.class);
|
||||
.getConstructor(Configuration.class, URI.class,
|
||||
Class.class, HAProxyFactory.class);
|
||||
FailoverProxyProvider<T> provider = ctor.newInstance(conf, nameNodeUri,
|
||||
xface);
|
||||
xface, proxyFactory);
|
||||
|
||||
// If the proxy provider is of an old implementation, wrap it.
|
||||
if (!(provider instanceof AbstractNNFailoverProxyProvider)) {
|
||||
|
@ -67,6 +67,7 @@ public interface HdfsClientConfigKeys {
|
||||
|
||||
String PREFIX = "dfs.client.";
|
||||
String DFS_NAMESERVICES = "dfs.nameservices";
|
||||
String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
|
||||
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 9870;
|
||||
String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
|
||||
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
|
||||
|
@ -160,6 +160,10 @@ LocatedBlocks getBlockLocations(String src, long offset, long length)
|
||||
* @param replication block replication factor.
|
||||
* @param blockSize maximum block size.
|
||||
* @param supportedVersions CryptoProtocolVersions supported by the client
|
||||
* @param ecPolicyName the name of erasure coding policy. A null value means
|
||||
* this file will inherit its parent directory's policy,
|
||||
* either traditional replication or erasure coding
|
||||
* policy.
|
||||
*
|
||||
* @return the status of the created file, it could be null if the server
|
||||
* doesn't support returning the file status
|
||||
@ -193,7 +197,7 @@ LocatedBlocks getBlockLocations(String src, long offset, long length)
|
||||
HdfsFileStatus create(String src, FsPermission masked,
|
||||
String clientName, EnumSetWritable<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize,
|
||||
CryptoProtocolVersion[] supportedVersions)
|
||||
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -144,12 +144,6 @@ public enum DatanodeReportType {
|
||||
ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE
|
||||
}
|
||||
|
||||
public static final byte RS_6_3_POLICY_ID = 1;
|
||||
public static final byte RS_3_2_POLICY_ID = 2;
|
||||
public static final byte RS_6_3_LEGACY_POLICY_ID = 3;
|
||||
public static final byte XOR_2_1_POLICY_ID = 4;
|
||||
public static final byte RS_10_4_POLICY_ID = 5;
|
||||
|
||||
/* Hidden constructor */
|
||||
protected HdfsConstants() {
|
||||
}
|
||||
|
@ -0,0 +1,121 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* <p>The set of built-in erasure coding policies.</p>
|
||||
* <p>Although this is a private class, EC policy IDs need to be treated like a
|
||||
* stable interface. Adding, modifying, or removing built-in policies can cause
|
||||
* inconsistencies with older clients.</p>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Stable
|
||||
public final class SystemErasureCodingPolicies {
|
||||
|
||||
// Private constructor, this is a utility class.
|
||||
private SystemErasureCodingPolicies() {}
|
||||
|
||||
// 64 KB
|
||||
private static final int DEFAULT_CELLSIZE = 64 * 1024;
|
||||
|
||||
public static final byte RS_6_3_POLICY_ID = 1;
|
||||
private static final ErasureCodingPolicy SYS_POLICY1 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_SCHEMA,
|
||||
DEFAULT_CELLSIZE, RS_6_3_POLICY_ID);
|
||||
|
||||
public static final byte RS_3_2_POLICY_ID = 2;
|
||||
private static final ErasureCodingPolicy SYS_POLICY2 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA,
|
||||
DEFAULT_CELLSIZE, RS_3_2_POLICY_ID);
|
||||
|
||||
public static final byte RS_6_3_LEGACY_POLICY_ID = 3;
|
||||
private static final ErasureCodingPolicy SYS_POLICY3 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
|
||||
DEFAULT_CELLSIZE, RS_6_3_LEGACY_POLICY_ID);
|
||||
|
||||
public static final byte XOR_2_1_POLICY_ID = 4;
|
||||
private static final ErasureCodingPolicy SYS_POLICY4 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA,
|
||||
DEFAULT_CELLSIZE, XOR_2_1_POLICY_ID);
|
||||
|
||||
public static final byte RS_10_4_POLICY_ID = 5;
|
||||
private static final ErasureCodingPolicy SYS_POLICY5 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA,
|
||||
DEFAULT_CELLSIZE, RS_10_4_POLICY_ID);
|
||||
|
||||
private static final List<ErasureCodingPolicy> SYS_POLICIES =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, SYS_POLICY4,
|
||||
SYS_POLICY5));
|
||||
|
||||
/**
|
||||
* System policies sorted by name for fast querying.
|
||||
*/
|
||||
private static final Map<String, ErasureCodingPolicy> SYSTEM_POLICIES_BY_NAME;
|
||||
|
||||
/**
|
||||
* System policies sorted by ID for fast querying.
|
||||
*/
|
||||
private static final Map<Byte, ErasureCodingPolicy> SYSTEM_POLICIES_BY_ID;
|
||||
|
||||
/**
|
||||
* Populate the lookup maps in a static block.
|
||||
*/
|
||||
static {
|
||||
SYSTEM_POLICIES_BY_NAME = new TreeMap<>();
|
||||
SYSTEM_POLICIES_BY_ID = new TreeMap<>();
|
||||
for (ErasureCodingPolicy policy : SYS_POLICIES) {
|
||||
SYSTEM_POLICIES_BY_NAME.put(policy.getName(), policy);
|
||||
SYSTEM_POLICIES_BY_ID.put(policy.getId(), policy);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system defined policies.
|
||||
* @return system policies
|
||||
*/
|
||||
public static List<ErasureCodingPolicy> getPolicies() {
|
||||
return SYS_POLICIES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a policy by policy ID.
|
||||
* @return ecPolicy, or null if not found
|
||||
*/
|
||||
public static ErasureCodingPolicy getByID(byte id) {
|
||||
return SYSTEM_POLICIES_BY_ID.get(id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a policy by policy name.
|
||||
* @return ecPolicy, or null if not found
|
||||
*/
|
||||
public static ErasureCodingPolicy getByName(String name) {
|
||||
return SYSTEM_POLICIES_BY_NAME.get(name);
|
||||
}
|
||||
}
|
@ -45,7 +45,7 @@ public class PacketReceiver implements Closeable {
|
||||
* The max size of any single packet. This prevents OOMEs when
|
||||
* invalid data is sent.
|
||||
*/
|
||||
private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024;
|
||||
public static final int MAX_PACKET_SIZE = 16 * 1024 * 1024;
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(PacketReceiver.class);
|
||||
|
||||
|
@ -285,7 +285,7 @@ public FsServerDefaults getServerDefaults() throws IOException {
|
||||
public HdfsFileStatus create(String src, FsPermission masked,
|
||||
String clientName, EnumSetWritable<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize,
|
||||
CryptoProtocolVersion[] supportedVersions)
|
||||
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
|
||||
throws IOException {
|
||||
CreateRequestProto.Builder builder = CreateRequestProto.newBuilder()
|
||||
.setSrc(src)
|
||||
@ -295,6 +295,9 @@ public HdfsFileStatus create(String src, FsPermission masked,
|
||||
.setCreateParent(createParent)
|
||||
.setReplication(replication)
|
||||
.setBlockSize(blockSize);
|
||||
if (ecPolicyName != null) {
|
||||
builder.setEcPolicyName(ecPolicyName);
|
||||
}
|
||||
FsPermission unmasked = masked.getUnmasked();
|
||||
if (unmasked != null) {
|
||||
builder.setUnmasked(PBHelperClient.convert(unmasked));
|
||||
|
@ -94,6 +94,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
|
||||
@ -1760,7 +1761,9 @@ public static FsServerDefaults convert(FsServerDefaultsProto fs) {
|
||||
fs.getFileBufferSize(),
|
||||
fs.getEncryptDataTransfer(),
|
||||
fs.getTrashInterval(),
|
||||
convert(fs.getChecksumType()));
|
||||
convert(fs.getChecksumType()),
|
||||
fs.hasKeyProviderUri() ? fs.getKeyProviderUri() : null,
|
||||
(byte) fs.getPolicyId());
|
||||
}
|
||||
|
||||
public static List<CryptoProtocolVersionProto> convert(
|
||||
@ -1934,6 +1937,8 @@ public static FsServerDefaultsProto convert(FsServerDefaults fs) {
|
||||
.setEncryptDataTransfer(fs.getEncryptDataTransfer())
|
||||
.setTrashInterval(fs.getTrashInterval())
|
||||
.setChecksumType(convert(fs.getChecksumType()))
|
||||
.setKeyProviderUri(fs.getKeyProviderUri())
|
||||
.setPolicyId(fs.getDefaultStoragePolicyId())
|
||||
.build();
|
||||
}
|
||||
|
||||
@ -2652,20 +2657,37 @@ public static HdfsProtos.ECSchemaProto convertECSchema(ECSchema schema) {
|
||||
}
|
||||
|
||||
public static ErasureCodingPolicy convertErasureCodingPolicy(
|
||||
ErasureCodingPolicyProto policy) {
|
||||
return new ErasureCodingPolicy(policy.getName(),
|
||||
convertECSchema(policy.getSchema()),
|
||||
policy.getCellSize(), (byte) policy.getId());
|
||||
ErasureCodingPolicyProto proto) {
|
||||
final byte id = (byte) (proto.getId() & 0xFF);
|
||||
ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByID(id);
|
||||
if (policy == null) {
|
||||
// If it's not a built-in policy, populate from the optional PB fields.
|
||||
// The optional fields are required in this case.
|
||||
Preconditions.checkArgument(proto.hasName(),
|
||||
"Missing name field in ErasureCodingPolicy proto");
|
||||
Preconditions.checkArgument(proto.hasSchema(),
|
||||
"Missing schema field in ErasureCodingPolicy proto");
|
||||
Preconditions.checkArgument(proto.hasCellSize(),
|
||||
"Missing cellsize field in ErasureCodingPolicy proto");
|
||||
|
||||
return new ErasureCodingPolicy(proto.getName(),
|
||||
convertECSchema(proto.getSchema()),
|
||||
proto.getCellSize(), id);
|
||||
}
|
||||
return policy;
|
||||
}
|
||||
|
||||
public static ErasureCodingPolicyProto convertErasureCodingPolicy(
|
||||
ErasureCodingPolicy policy) {
|
||||
ErasureCodingPolicyProto.Builder builder = ErasureCodingPolicyProto
|
||||
.newBuilder()
|
||||
.setName(policy.getName())
|
||||
.setSchema(convertECSchema(policy.getSchema()))
|
||||
.setCellSize(policy.getCellSize())
|
||||
.setId(policy.getId());
|
||||
// If it's not a built-in policy, need to set the optional fields.
|
||||
if (SystemErasureCodingPolicies.getByID(policy.getId()) == null) {
|
||||
builder.setName(policy.getName())
|
||||
.setSchema(convertECSchema(policy.getSchema()))
|
||||
.setCellSize(policy.getCellSize());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,44 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxiesClient;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public class ClientHAProxyFactory<T> implements HAProxyFactory<T> {
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public T createProxy(Configuration conf, InetSocketAddress nnAddr,
|
||||
Class<T> xface, UserGroupInformation ugi, boolean withRetries,
|
||||
AtomicBoolean fallbackToSimpleAuth) throws IOException {
|
||||
return (T) NameNodeProxiesClient.createNonHAProxyWithClientProtocol(
|
||||
nnAddr, conf, ugi, false, fallbackToSimpleAuth);
|
||||
}
|
||||
|
||||
@Override
|
||||
public T createProxy(Configuration conf, InetSocketAddress nnAddr,
|
||||
Class<T> xface, UserGroupInformation ugi, boolean withRetries)
|
||||
throws IOException {
|
||||
return createProxy(conf, nnAddr, xface, ugi, withRetries, null);
|
||||
}
|
||||
}
|
@ -26,22 +26,16 @@
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.HAUtilClient;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* A FailoverProxyProvider implementation which allows one to configure
|
||||
@ -51,25 +45,9 @@
|
||||
*/
|
||||
public class ConfiguredFailoverProxyProvider<T> extends
|
||||
AbstractNNFailoverProxyProvider<T> {
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(ConfiguredFailoverProxyProvider.class);
|
||||
|
||||
interface ProxyFactory<T> {
|
||||
T createProxy(Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
|
||||
UserGroupInformation ugi, boolean withRetries,
|
||||
AtomicBoolean fallbackToSimpleAuth) throws IOException;
|
||||
}
|
||||
|
||||
static class DefaultProxyFactory<T> implements ProxyFactory<T> {
|
||||
@Override
|
||||
public T createProxy(Configuration conf, InetSocketAddress nnAddr,
|
||||
Class<T> xface, UserGroupInformation ugi, boolean withRetries,
|
||||
AtomicBoolean fallbackToSimpleAuth) throws IOException {
|
||||
return NameNodeProxies.createNonHAProxy(conf,
|
||||
nnAddr, xface, ugi, false, fallbackToSimpleAuth).getProxy();
|
||||
}
|
||||
}
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(ConfiguredFailoverProxyProvider.class);
|
||||
|
||||
protected final Configuration conf;
|
||||
protected final List<AddressRpcProxyPair<T>> proxies =
|
||||
@ -78,22 +56,11 @@ public T createProxy(Configuration conf, InetSocketAddress nnAddr,
|
||||
protected final Class<T> xface;
|
||||
|
||||
private int currentProxyIndex = 0;
|
||||
private final ProxyFactory<T> factory;
|
||||
private final HAProxyFactory<T> factory;
|
||||
|
||||
public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
|
||||
Class<T> xface) {
|
||||
this(conf, uri, xface, new DefaultProxyFactory<T>());
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
|
||||
Class<T> xface, ProxyFactory<T> factory) {
|
||||
|
||||
Preconditions.checkArgument(
|
||||
xface.isAssignableFrom(NamenodeProtocols.class),
|
||||
"Interface class %s is not a valid NameNode protocol!");
|
||||
Class<T> xface, HAProxyFactory<T> factory) {
|
||||
this.xface = xface;
|
||||
|
||||
this.conf = new Configuration(conf);
|
||||
int maxRetries = this.conf.getInt(
|
||||
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_KEY,
|
||||
@ -101,7 +68,7 @@ public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
|
||||
this.conf.setInt(
|
||||
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
|
||||
maxRetries);
|
||||
|
||||
|
||||
int maxRetriesOnSocketTimeouts = this.conf.getInt(
|
||||
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
|
||||
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
|
||||
@ -112,16 +79,16 @@ public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
|
||||
|
||||
try {
|
||||
ugi = UserGroupInformation.getCurrentUser();
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> map = DFSUtil.getHaNnRpcAddresses(
|
||||
conf);
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> map =
|
||||
DFSUtilClient.getHaNnRpcAddresses(conf);
|
||||
Map<String, InetSocketAddress> addressesInNN = map.get(uri.getHost());
|
||||
|
||||
|
||||
if (addressesInNN == null || addressesInNN.size() == 0) {
|
||||
throw new RuntimeException("Could not find any configured addresses " +
|
||||
"for URI " + uri);
|
||||
}
|
||||
|
||||
|
||||
Collection<InetSocketAddress> addressesOfNns = addressesInNN.values();
|
||||
for (InetSocketAddress address : addressesOfNns) {
|
||||
proxies.add(new AddressRpcProxyPair<T>(address));
|
||||
@ -137,13 +104,13 @@ public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
|
||||
// The client may have a delegation token set for the logical
|
||||
// URI of the cluster. Clone this token to apply to each of the
|
||||
// underlying IPC addresses so that the IPC code can find it.
|
||||
HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns);
|
||||
HAUtilClient.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns);
|
||||
this.factory = factory;
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Class<T> getInterface() {
|
||||
return xface;
|
||||
@ -183,7 +150,7 @@ synchronized void incrementProxyIndex() {
|
||||
private static class AddressRpcProxyPair<T> {
|
||||
public final InetSocketAddress address;
|
||||
public T namenode;
|
||||
|
||||
|
||||
public AddressRpcProxyPair(InetSocketAddress address) {
|
||||
this.address = address;
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* This interface aims to decouple the proxy creation implementation that used
|
||||
* in {@link AbstractNNFailoverProxyProvider}. Client side can use
|
||||
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} to initialize the
|
||||
* proxy while the server side can use NamenodeProtocols
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface HAProxyFactory<T> {
|
||||
|
||||
T createProxy(Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
|
||||
UserGroupInformation ugi, boolean withRetries,
|
||||
AtomicBoolean fallbackToSimpleAuth) throws IOException;
|
||||
|
||||
T createProxy(Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
|
||||
UserGroupInformation ugi, boolean withRetries) throws IOException;
|
||||
|
||||
}
|
@ -25,14 +25,10 @@
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A NNFailoverProxyProvider implementation which works on IP failover setup.
|
||||
* Only one proxy is used to connect to both servers and switching between
|
||||
@ -40,7 +36,7 @@
|
||||
* clients can consistently reach only one node at a time.
|
||||
*
|
||||
* Clients with a live connection will likely get connection reset after an
|
||||
* IP failover. This case will be handled by the
|
||||
* IP failover. This case will be handled by the
|
||||
* FailoverOnNetworkExceptionRetry retry policy. I.e. if the call is
|
||||
* not idempotent, it won't get retried.
|
||||
*
|
||||
@ -54,15 +50,14 @@ public class IPFailoverProxyProvider<T> extends
|
||||
private final Configuration conf;
|
||||
private final Class<T> xface;
|
||||
private final URI nameNodeUri;
|
||||
private final HAProxyFactory<T> factory;
|
||||
private ProxyInfo<T> nnProxyInfo = null;
|
||||
|
||||
|
||||
public IPFailoverProxyProvider(Configuration conf, URI uri,
|
||||
Class<T> xface) {
|
||||
Preconditions.checkArgument(
|
||||
xface.isAssignableFrom(NamenodeProtocols.class),
|
||||
"Interface class %s is not a valid NameNode protocol!");
|
||||
Class<T> xface, HAProxyFactory<T> factory) {
|
||||
this.xface = xface;
|
||||
this.nameNodeUri = uri;
|
||||
this.factory = factory;
|
||||
|
||||
this.conf = new Configuration(conf);
|
||||
int maxRetries = this.conf.getInt(
|
||||
@ -71,7 +66,7 @@ public IPFailoverProxyProvider(Configuration conf, URI uri,
|
||||
this.conf.setInt(
|
||||
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
|
||||
maxRetries);
|
||||
|
||||
|
||||
int maxRetriesOnSocketTimeouts = this.conf.getInt(
|
||||
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
|
||||
HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
|
||||
@ -79,7 +74,7 @@ public IPFailoverProxyProvider(Configuration conf, URI uri,
|
||||
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
|
||||
maxRetriesOnSocketTimeouts);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Class<T> getInterface() {
|
||||
return xface;
|
||||
@ -92,9 +87,8 @@ public synchronized ProxyInfo<T> getProxy() {
|
||||
try {
|
||||
// Create a proxy that is not wrapped in RetryProxy
|
||||
InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri);
|
||||
nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
|
||||
conf, nnAddr, xface, UserGroupInformation.getCurrentUser(),
|
||||
false).getProxy(), nnAddr.toString());
|
||||
nnProxyInfo = new ProxyInfo<T>(factory.createProxy(conf, nnAddr, xface,
|
||||
UserGroupInformation.getCurrentUser(), false), nnAddr.toString());
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
@ -34,7 +34,6 @@
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.io.retry.MultiException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@ -147,15 +146,9 @@ public Object call() throws Exception {
|
||||
private volatile ProxyInfo<T> successfulProxy = null;
|
||||
private volatile String toIgnore = null;
|
||||
|
||||
public RequestHedgingProxyProvider(
|
||||
Configuration conf, URI uri, Class<T> xface) {
|
||||
this(conf, uri, xface, new DefaultProxyFactory<T>());
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
RequestHedgingProxyProvider(Configuration conf, URI uri,
|
||||
Class<T> xface, ProxyFactory<T> factory) {
|
||||
super(conf, uri, xface, factory);
|
||||
public RequestHedgingProxyProvider(Configuration conf, URI uri,
|
||||
Class<T> xface, HAProxyFactory<T> proxyFactory) {
|
||||
super(conf, uri, xface, proxyFactory);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
@ -80,6 +80,7 @@ message CreateRequestProto {
|
||||
required uint64 blockSize = 7;
|
||||
repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8;
|
||||
optional FsPermissionProto unmasked = 9;
|
||||
optional string ecPolicyName = 10;
|
||||
}
|
||||
|
||||
message CreateResponseProto {
|
||||
|
@ -353,9 +353,9 @@ message ECSchemaProto {
|
||||
}
|
||||
|
||||
message ErasureCodingPolicyProto {
|
||||
required string name = 1;
|
||||
required ECSchemaProto schema = 2;
|
||||
required uint32 cellSize = 3;
|
||||
optional string name = 1;
|
||||
optional ECSchemaProto schema = 2;
|
||||
optional uint32 cellSize = 3;
|
||||
required uint32 id = 4; // Actually a byte - only 8 bits used
|
||||
}
|
||||
|
||||
@ -421,6 +421,8 @@ message FsServerDefaultsProto {
|
||||
optional bool encryptDataTransfer = 6 [default = false];
|
||||
optional uint64 trashInterval = 7 [default = 0];
|
||||
optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
|
||||
optional string keyProviderUri = 9;
|
||||
optional uint32 policyId = 10 [default = 0];
|
||||
}
|
||||
|
||||
|
||||
|
@ -29,9 +29,8 @@
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider.ProxyFactory;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.io.retry.MultiException;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
@ -66,20 +65,20 @@ public void setup() throws URISyntaxException {
|
||||
ns = "mycluster-" + Time.monotonicNow();
|
||||
nnUri = new URI("hdfs://" + ns);
|
||||
conf = new Configuration();
|
||||
conf.set(DFSConfigKeys.DFS_NAMESERVICES, ns);
|
||||
conf.set(HdfsClientConfigKeys.DFS_NAMESERVICES, ns);
|
||||
conf.set(
|
||||
DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
|
||||
HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
|
||||
conf.set(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
|
||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
|
||||
"machine1.foo.bar:9820");
|
||||
conf.set(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
|
||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
|
||||
"machine2.foo.bar:9820");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHedgingWhenOneFails() throws Exception {
|
||||
final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
|
||||
@Override
|
||||
public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
@ -87,11 +86,11 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
return new long[]{1};
|
||||
}
|
||||
});
|
||||
final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol badMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
|
||||
|
||||
RequestHedgingProxyProvider<NamenodeProtocols> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
|
||||
RequestHedgingProxyProvider<ClientProtocol> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
|
||||
createFactory(badMock, goodMock));
|
||||
long[] stats = provider.getProxy().proxy.getStats();
|
||||
Assert.assertTrue(stats.length == 1);
|
||||
@ -101,7 +100,7 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
|
||||
@Test
|
||||
public void testHedgingWhenOneIsSlow() throws Exception {
|
||||
final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
|
||||
@Override
|
||||
public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
@ -109,11 +108,11 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
return new long[]{1};
|
||||
}
|
||||
});
|
||||
final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol badMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
|
||||
|
||||
RequestHedgingProxyProvider<NamenodeProtocols> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
|
||||
RequestHedgingProxyProvider<ClientProtocol> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
|
||||
createFactory(goodMock, badMock));
|
||||
long[] stats = provider.getProxy().proxy.getStats();
|
||||
Assert.assertTrue(stats.length == 1);
|
||||
@ -124,14 +123,14 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
|
||||
@Test
|
||||
public void testHedgingWhenBothFail() throws Exception {
|
||||
NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
|
||||
ClientProtocol badMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
|
||||
NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
|
||||
ClientProtocol worseMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(worseMock.getStats()).thenThrow(
|
||||
new IOException("Worse mock !!"));
|
||||
|
||||
RequestHedgingProxyProvider<NamenodeProtocols> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
|
||||
RequestHedgingProxyProvider<ClientProtocol> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
|
||||
createFactory(badMock, worseMock));
|
||||
try {
|
||||
provider.getProxy().proxy.getStats();
|
||||
@ -147,7 +146,7 @@ public void testHedgingWhenBothFail() throws Exception {
|
||||
public void testPerformFailover() throws Exception {
|
||||
final AtomicInteger counter = new AtomicInteger(0);
|
||||
final int[] isGood = {1};
|
||||
final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
|
||||
@Override
|
||||
public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
@ -159,7 +158,7 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
throw new IOException("Was Good mock !!");
|
||||
}
|
||||
});
|
||||
final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol badMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
|
||||
@Override
|
||||
public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
@ -171,8 +170,8 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
throw new IOException("Bad mock !!");
|
||||
}
|
||||
});
|
||||
RequestHedgingProxyProvider<NamenodeProtocols> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
|
||||
RequestHedgingProxyProvider<ClientProtocol> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
|
||||
createFactory(goodMock, badMock));
|
||||
long[] stats = provider.getProxy().proxy.getStats();
|
||||
Assert.assertTrue(stats.length == 1);
|
||||
@ -234,14 +233,14 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
|
||||
@Test
|
||||
public void testPerformFailoverWith3Proxies() throws Exception {
|
||||
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
|
||||
conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
|
||||
"nn1,nn2,nn3");
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
|
||||
conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
|
||||
"machine3.foo.bar:9820");
|
||||
|
||||
final AtomicInteger counter = new AtomicInteger(0);
|
||||
final int[] isGood = {1};
|
||||
final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
|
||||
@Override
|
||||
public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
@ -253,7 +252,7 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
throw new IOException("Was Good mock !!");
|
||||
}
|
||||
});
|
||||
final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol badMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
|
||||
@Override
|
||||
public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
@ -265,7 +264,7 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
throw new IOException("Bad mock !!");
|
||||
}
|
||||
});
|
||||
final NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
|
||||
final ClientProtocol worseMock = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(worseMock.getStats()).thenAnswer(new Answer<long[]>() {
|
||||
@Override
|
||||
public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
@ -278,8 +277,8 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
}
|
||||
});
|
||||
|
||||
RequestHedgingProxyProvider<NamenodeProtocols> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class,
|
||||
RequestHedgingProxyProvider<ClientProtocol> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
|
||||
createFactory(goodMock, badMock, worseMock));
|
||||
long[] stats = provider.getProxy().proxy.getStats();
|
||||
Assert.assertTrue(stats.length == 1);
|
||||
@ -355,14 +354,14 @@ public long[] answer(InvocationOnMock invocation) throws Throwable {
|
||||
|
||||
@Test
|
||||
public void testHedgingWhenFileNotFoundException() throws Exception {
|
||||
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
|
||||
ClientProtocol active = Mockito.mock(ClientProtocol.class);
|
||||
Mockito
|
||||
.when(active.getBlockLocations(Matchers.anyString(),
|
||||
Matchers.anyLong(), Matchers.anyLong()))
|
||||
.thenThrow(new RemoteException("java.io.FileNotFoundException",
|
||||
"File does not exist!"));
|
||||
|
||||
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
|
||||
ClientProtocol standby = Mockito.mock(ClientProtocol.class);
|
||||
Mockito
|
||||
.when(standby.getBlockLocations(Matchers.anyString(),
|
||||
Matchers.anyLong(), Matchers.anyLong()))
|
||||
@ -370,9 +369,9 @@ public void testHedgingWhenFileNotFoundException() throws Exception {
|
||||
new RemoteException("org.apache.hadoop.ipc.StandbyException",
|
||||
"Standby NameNode"));
|
||||
|
||||
RequestHedgingProxyProvider<NamenodeProtocols> provider =
|
||||
RequestHedgingProxyProvider<ClientProtocol> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri,
|
||||
NamenodeProtocols.class, createFactory(active, standby));
|
||||
ClientProtocol.class, createFactory(active, standby));
|
||||
try {
|
||||
provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
|
||||
Assert.fail("Should fail since the active namenode throws"
|
||||
@ -394,18 +393,18 @@ public void testHedgingWhenFileNotFoundException() throws Exception {
|
||||
|
||||
@Test
|
||||
public void testHedgingWhenConnectException() throws Exception {
|
||||
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
|
||||
ClientProtocol active = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(active.getStats()).thenThrow(new ConnectException());
|
||||
|
||||
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
|
||||
ClientProtocol standby = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(standby.getStats())
|
||||
.thenThrow(
|
||||
new RemoteException("org.apache.hadoop.ipc.StandbyException",
|
||||
"Standby NameNode"));
|
||||
|
||||
RequestHedgingProxyProvider<NamenodeProtocols> provider =
|
||||
RequestHedgingProxyProvider<ClientProtocol> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri,
|
||||
NamenodeProtocols.class, createFactory(active, standby));
|
||||
ClientProtocol.class, createFactory(active, standby));
|
||||
try {
|
||||
provider.getProxy().proxy.getStats();
|
||||
Assert.fail("Should fail since the active namenode throws"
|
||||
@ -428,15 +427,15 @@ public void testHedgingWhenConnectException() throws Exception {
|
||||
|
||||
@Test
|
||||
public void testHedgingWhenConnectAndEOFException() throws Exception {
|
||||
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
|
||||
ClientProtocol active = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(active.getStats()).thenThrow(new EOFException());
|
||||
|
||||
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
|
||||
ClientProtocol standby = Mockito.mock(ClientProtocol.class);
|
||||
Mockito.when(standby.getStats()).thenThrow(new ConnectException());
|
||||
|
||||
RequestHedgingProxyProvider<NamenodeProtocols> provider =
|
||||
RequestHedgingProxyProvider<ClientProtocol> provider =
|
||||
new RequestHedgingProxyProvider<>(conf, nnUri,
|
||||
NamenodeProtocols.class, createFactory(active, standby));
|
||||
ClientProtocol.class, createFactory(active, standby));
|
||||
try {
|
||||
provider.getProxy().proxy.getStats();
|
||||
Assert.fail("Should fail since both active and standby namenodes throw"
|
||||
@ -453,18 +452,25 @@ public void testHedgingWhenConnectAndEOFException() throws Exception {
|
||||
Mockito.verify(standby).getStats();
|
||||
}
|
||||
|
||||
private ProxyFactory<NamenodeProtocols> createFactory(
|
||||
NamenodeProtocols... protos) {
|
||||
final Iterator<NamenodeProtocols> iterator =
|
||||
private HAProxyFactory<ClientProtocol> createFactory(
|
||||
ClientProtocol... protos) {
|
||||
final Iterator<ClientProtocol> iterator =
|
||||
Lists.newArrayList(protos).iterator();
|
||||
return new ProxyFactory<NamenodeProtocols>() {
|
||||
return new HAProxyFactory<ClientProtocol>() {
|
||||
@Override
|
||||
public NamenodeProtocols createProxy(Configuration conf,
|
||||
InetSocketAddress nnAddr, Class<NamenodeProtocols> xface,
|
||||
public ClientProtocol createProxy(Configuration conf,
|
||||
InetSocketAddress nnAddr, Class<ClientProtocol> xface,
|
||||
UserGroupInformation ugi, boolean withRetries,
|
||||
AtomicBoolean fallbackToSimpleAuth) throws IOException {
|
||||
return iterator.next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClientProtocol createProxy(Configuration conf,
|
||||
InetSocketAddress nnAddr, Class<ClientProtocol> xface,
|
||||
UserGroupInformation ugi, boolean withRetries) throws IOException {
|
||||
return iterator.next();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -171,6 +171,11 @@
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
|
@ -31,9 +31,8 @@
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.junit.Test;
|
||||
import org.junit.runners.model.FrameworkMethod;
|
||||
import org.junit.runners.model.Statement;
|
||||
@ -142,7 +141,7 @@ public static Configuration getHdfsConf() {
|
||||
public static final Path ERASURE_CODING_DIR = new Path("/ec");
|
||||
public static final Path ERASURE_CODING_FILE = new Path("/ec/ecfile");
|
||||
public static final ErasureCodingPolicy ERASURE_CODING_POLICY =
|
||||
ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID);
|
||||
StripedFileTestUtil.getDefaultECPolicy();
|
||||
|
||||
private static MiniDFSCluster MINI_DFS = null;
|
||||
|
||||
|
@ -55,6 +55,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<artifactId>hadoop-hdfs</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs</artifactId>
|
||||
@ -163,11 +168,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>xmlenc</groupId>
|
||||
<artifactId>xmlenc</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk16</artifactId>
|
||||
|
@ -60,7 +60,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
<scope>compile</scope>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
@ -163,11 +163,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>xmlenc</groupId>
|
||||
<artifactId>xmlenc</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
@ -396,6 +391,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<exclude>src/test/resources/editsStored*</exclude>
|
||||
<exclude>src/test/resources/empty-file</exclude>
|
||||
<exclude>src/main/webapps/datanode/robots.txt</exclude>
|
||||
<exclude>src/main/webapps/hdfs/robots.txt</exclude>
|
||||
<exclude>src/main/webapps/journal/robots.txt</exclude>
|
||||
<exclude>src/main/webapps/secondary/robots.txt</exclude>
|
||||
<exclude>src/contrib/**</exclude>
|
||||
<exclude>src/site/resources/images/*</exclude>
|
||||
<exclude>src/main/webapps/static/bootstrap-3.0.2/**</exclude>
|
||||
|
@ -93,20 +93,10 @@ function hdfscmd_case
|
||||
;;
|
||||
datanode)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
# Determine if we're starting a secure datanode, and
|
||||
# if so, redefine appropriate variables
|
||||
if [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
|
||||
HADOOP_SUBCMD_SECURESERVICE="true"
|
||||
HADOOP_SUBCMD_SECUREUSER="${HADOOP_SECURE_DN_USER}"
|
||||
|
||||
# backward compatiblity
|
||||
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}"
|
||||
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}"
|
||||
|
||||
HADOOP_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
|
||||
else
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
|
||||
fi
|
||||
HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
|
||||
hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
|
||||
hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
|
||||
;;
|
||||
debug)
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DebugAdmin'
|
||||
@ -170,18 +160,10 @@ function hdfscmd_case
|
||||
;;
|
||||
nfs3)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
if [[ -n "${HADOOP_PRIVILEGED_NFS_USER}" ]]; then
|
||||
HADOOP_SUBCMD_SECURESERVICE="true"
|
||||
HADOOP_SUBCMD_SECUREUSER="${HADOOP_PRIVILEGED_NFS_USER}"
|
||||
|
||||
# backward compatiblity
|
||||
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}"
|
||||
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}"
|
||||
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
|
||||
else
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
|
||||
fi
|
||||
HADOOP_SECURE_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
|
||||
hadoop_deprecate_envvar HADOOP_SECURE_NFS3_LOG_DIR HADOOP_SECURE_LOG_DIR
|
||||
hadoop_deprecate_envvar HADOOP_SECURE_NFS3_PID_DIR HADOOP_SECURE_PID_DIR
|
||||
;;
|
||||
oev)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
|
||||
@ -241,9 +223,9 @@ else
|
||||
fi
|
||||
|
||||
HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_NEW_CONFIG=true
|
||||
if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
|
||||
# shellcheck source=./hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
|
||||
. "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
|
||||
else
|
||||
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
|
||||
@ -268,7 +250,7 @@ if hadoop_need_reexec hdfs "${HADOOP_SUBCMD}"; then
|
||||
exit $?
|
||||
fi
|
||||
|
||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
HADOOP_SUBCMD_ARGS=("$@")
|
||||
|
||||
@ -288,60 +270,5 @@ fi
|
||||
|
||||
hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
|
||||
|
||||
hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
hadoop_verify_secure_prereq
|
||||
hadoop_setup_secure_service
|
||||
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
|
||||
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
else
|
||||
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
|
||||
fi
|
||||
|
||||
if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
|
||||
else
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
|
||||
fi
|
||||
fi
|
||||
|
||||
hadoop_finalize
|
||||
|
||||
if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
hadoop_secure_daemon_handler \
|
||||
"${HADOOP_DAEMON_MODE}" \
|
||||
"${HADOOP_SUBCMD}" \
|
||||
"${HADOOP_CLASSNAME}" \
|
||||
"${daemon_pidfile}" \
|
||||
"${daemon_outfile}" \
|
||||
"${priv_pidfile}" \
|
||||
"${priv_outfile}" \
|
||||
"${priv_errfile}" \
|
||||
"${HADOOP_SUBCMD_ARGS[@]}"
|
||||
else
|
||||
hadoop_daemon_handler \
|
||||
"${HADOOP_DAEMON_MODE}" \
|
||||
"${HADOOP_SUBCMD}" \
|
||||
"${HADOOP_CLASSNAME}" \
|
||||
"${daemon_pidfile}" \
|
||||
"${daemon_outfile}" \
|
||||
"${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
exit $?
|
||||
else
|
||||
# shellcheck disable=SC2086
|
||||
hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
# everything is in globals at this point, so call the generic handler
|
||||
hadoop_generic_java_subcmd_handler
|
@ -53,6 +53,9 @@ function hadoop_subproject_init
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_NFS3_SECURE_EXTRA_OPTS HDFS_NFS3_SECURE_EXTRA_OPTS
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_SECURE_DN_USER HDFS_DATANODE_SECURE_USER
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_PRIVILEGED_NFS_USER HDFS_NFS3_SECURE_USER
|
||||
|
||||
HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME}"
|
||||
|
||||
@ -74,6 +77,8 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
|
||||
HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P)
|
||||
fi
|
||||
|
||||
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
|
||||
|
||||
if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
||||
[[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
|
||||
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
||||
|
@ -142,7 +142,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
HdfsClientConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
|
||||
public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||
public static final String DFS_NAMENODE_HTTP_BIND_HOST_KEY = "dfs.namenode.http-bind-host";
|
||||
public static final String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
|
||||
public static final String DFS_NAMENODE_RPC_ADDRESS_KEY =
|
||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
||||
public static final String DFS_NAMENODE_RPC_BIND_HOST_KEY = "dfs.namenode.rpc-bind-host";
|
||||
public static final String DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
|
||||
public static final String DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY = "dfs.namenode.servicerpc-bind-host";
|
||||
|
@ -449,19 +449,6 @@ public static Set<String> getAllNnPrincipals(Configuration conf) throws IOExcept
|
||||
return principals;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of InetSocketAddress corresponding to HA NN RPC addresses from
|
||||
* the configuration.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @return list of InetSocketAddresses
|
||||
*/
|
||||
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
|
||||
Configuration conf) {
|
||||
return DFSUtilClient.getAddresses(conf, null,
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of InetSocketAddress corresponding to backup node rpc
|
||||
* addresses from the configuration.
|
||||
@ -693,7 +680,7 @@ public static String addressMapToString(
|
||||
|
||||
public static String nnAddressesAsString(Configuration conf) {
|
||||
Map<String, Map<String, InetSocketAddress>> addresses =
|
||||
getHaNnRpcAddresses(conf);
|
||||
DFSUtilClient.getHaNnRpcAddresses(conf);
|
||||
return addressMapToString(addresses);
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,6 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
|
||||
import static org.apache.hadoop.security.SecurityUtil.buildTokenService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
@ -39,8 +38,6 @@
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -48,17 +45,12 @@
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
@ -67,12 +59,6 @@
|
||||
@InterfaceAudience.Private
|
||||
public class HAUtil {
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(HAUtil.class);
|
||||
|
||||
private static final DelegationTokenSelector tokenSelector =
|
||||
new DelegationTokenSelector();
|
||||
|
||||
private static final String[] HA_SPECIAL_INDEPENDENT_KEYS = new String[]{
|
||||
DFS_NAMENODE_RPC_ADDRESS_KEY,
|
||||
DFS_NAMENODE_RPC_BIND_HOST_KEY,
|
||||
@ -97,7 +83,7 @@ private HAUtil() { /* Hidden constructor */ }
|
||||
*/
|
||||
public static boolean isHAEnabled(Configuration conf, String nsId) {
|
||||
Map<String, Map<String, InetSocketAddress>> addresses =
|
||||
DFSUtil.getHaNnRpcAddresses(conf);
|
||||
DFSUtilClient.getHaNnRpcAddresses(conf);
|
||||
if (addresses == null) return false;
|
||||
Map<String, InetSocketAddress> nnMap = addresses.get(nsId);
|
||||
return nnMap != null && nnMap.size() > 1;
|
||||
@ -259,47 +245,6 @@ public static boolean useLogicalUri(Configuration conf, URI nameNodeUri)
|
||||
return provider.useLogicalURI();
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate a delegation token associated with the given HA cluster URI, and if
|
||||
* one is found, clone it to also represent the underlying namenode address.
|
||||
* @param ugi the UGI to modify
|
||||
* @param haUri the logical URI for the cluster
|
||||
* @param nnAddrs collection of NNs in the cluster to which the token
|
||||
* applies
|
||||
*/
|
||||
public static void cloneDelegationTokenForLogicalUri(
|
||||
UserGroupInformation ugi, URI haUri,
|
||||
Collection<InetSocketAddress> nnAddrs) {
|
||||
// this cloning logic is only used by hdfs
|
||||
Text haService = HAUtilClient.buildTokenServiceForLogicalUri(haUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME);
|
||||
Token<DelegationTokenIdentifier> haToken =
|
||||
tokenSelector.selectToken(haService, ugi.getTokens());
|
||||
if (haToken != null) {
|
||||
for (InetSocketAddress singleNNAddr : nnAddrs) {
|
||||
// this is a minor hack to prevent physical HA tokens from being
|
||||
// exposed to the user via UGI.getCredentials(), otherwise these
|
||||
// cloned tokens may be inadvertently propagated to jobs
|
||||
Token<DelegationTokenIdentifier> specificToken =
|
||||
haToken.privateClone(buildTokenService(singleNNAddr));
|
||||
Text alias = new Text(
|
||||
HAUtilClient.buildTokenServicePrefixForLogicalUri(
|
||||
HdfsConstants.HDFS_URI_SCHEME)
|
||||
+ "//" + specificToken.getService());
|
||||
ugi.addToken(alias, specificToken);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Mapped HA service delegation token for logical URI " +
|
||||
haUri + " to namenode " + singleNNAddr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("No HA service delegation token found for logical URI " +
|
||||
haUri);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the internet address of the currently-active NN. This should rarely be
|
||||
* used, since callers of this method who connect directly to the NN using the
|
||||
|
@ -36,6 +36,7 @@
|
||||
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.NameNodeHAProxyFactory;
|
||||
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||
import org.apache.hadoop.io.Text;
|
||||
@ -112,7 +113,7 @@ public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
|
||||
throws IOException {
|
||||
AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
|
||||
NameNodeProxiesClient.createFailoverProxyProvider(conf, nameNodeUri,
|
||||
xface, true, fallbackToSimpleAuth);
|
||||
xface, true, fallbackToSimpleAuth, new NameNodeHAProxyFactory<T>());
|
||||
|
||||
if (failoverProxyProvider == null) {
|
||||
return createNonHAProxy(conf, DFSUtilClient.getNNAddress(nameNodeUri),
|
||||
|
@ -424,7 +424,8 @@ public CreateResponseProto create(RpcController controller,
|
||||
PBHelperClient.convertCreateFlag(req.getCreateFlag()), req.getCreateParent(),
|
||||
(short) req.getReplication(), req.getBlockSize(),
|
||||
PBHelperClient.convertCryptoProtocolVersions(
|
||||
req.getCryptoProtocolVersionList()));
|
||||
req.getCryptoProtocolVersionList()),
|
||||
req.getEcPolicyName());
|
||||
|
||||
if (result != null) {
|
||||
return CreateResponseProto.newBuilder().setFs(PBHelperClient.convert(result))
|
||||
|
@ -43,6 +43,7 @@
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.io.nativeio.NativeIOException;
|
||||
@ -275,12 +276,10 @@ public static class StorageDirectory implements FormatConfirmable {
|
||||
|
||||
private final StorageLocation location;
|
||||
public StorageDirectory(File dir) {
|
||||
// default dirType is null
|
||||
this(dir, null, false);
|
||||
}
|
||||
|
||||
public StorageDirectory(StorageLocation location) {
|
||||
// default dirType is null
|
||||
this(null, false, location);
|
||||
}
|
||||
|
||||
@ -337,7 +336,8 @@ private StorageDirectory(File dir, StorageDirType dirType,
|
||||
boolean isShared, StorageLocation location) {
|
||||
this.root = dir;
|
||||
this.lock = null;
|
||||
this.dirType = dirType;
|
||||
// default dirType is UNDEFINED
|
||||
this.dirType = (dirType == null ? NameNodeDirType.UNDEFINED : dirType);
|
||||
this.isShared = isShared;
|
||||
this.location = location;
|
||||
assert location == null || dir == null ||
|
||||
|
@ -70,7 +70,8 @@ class BPOfferService {
|
||||
* handshake.
|
||||
*/
|
||||
volatile DatanodeRegistration bpRegistration;
|
||||
|
||||
|
||||
private final String nameserviceId;
|
||||
private final DataNode dn;
|
||||
|
||||
/**
|
||||
@ -120,12 +121,16 @@ void writeUnlock() {
|
||||
mWriteLock.unlock();
|
||||
}
|
||||
|
||||
BPOfferService(List<InetSocketAddress> nnAddrs,
|
||||
List<InetSocketAddress> lifelineNnAddrs, DataNode dn) {
|
||||
BPOfferService(
|
||||
final String nameserviceId,
|
||||
List<InetSocketAddress> nnAddrs,
|
||||
List<InetSocketAddress> lifelineNnAddrs,
|
||||
DataNode dn) {
|
||||
Preconditions.checkArgument(!nnAddrs.isEmpty(),
|
||||
"Must pass at least one NN.");
|
||||
Preconditions.checkArgument(nnAddrs.size() == lifelineNnAddrs.size(),
|
||||
"Must pass same number of NN addresses and lifeline addresses.");
|
||||
this.nameserviceId = nameserviceId;
|
||||
this.dn = dn;
|
||||
|
||||
for (int i = 0; i < nnAddrs.size(); ++i) {
|
||||
@ -170,6 +175,14 @@ boolean isAlive() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets nameservice id to which this {@link BPOfferService} maps to.
|
||||
* @return nameservice id, which can be null.
|
||||
*/
|
||||
String getNameserviceId() {
|
||||
return nameserviceId;
|
||||
}
|
||||
|
||||
String getBlockPoolId() {
|
||||
readLock();
|
||||
try {
|
||||
|
@ -25,7 +25,6 @@
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
@ -279,7 +278,10 @@ private void connectToNNAndHandshake() throws IOException {
|
||||
// This also initializes our block pool in the DN if we are
|
||||
// the first NN connection for this BP.
|
||||
bpos.verifyAndSetNamespaceInfo(this, nsInfo);
|
||||
|
||||
|
||||
/* set thread name again to include NamespaceInfo when it's available. */
|
||||
this.bpThread.setName(formatThreadName("heartbeating", nnAddr));
|
||||
|
||||
// Second phase of the handshake with the NN.
|
||||
register(nsInfo);
|
||||
}
|
||||
@ -547,14 +549,15 @@ void start() {
|
||||
lifelineSender.start();
|
||||
}
|
||||
}
|
||||
|
||||
private String formatThreadName(String action, InetSocketAddress addr) {
|
||||
Collection<StorageLocation> dataDirs =
|
||||
DataNode.getStorageLocations(dn.getConf());
|
||||
return "DataNode: [" + dataDirs.toString() + "] " +
|
||||
action + " to " + addr;
|
||||
|
||||
private String formatThreadName(
|
||||
final String action,
|
||||
final InetSocketAddress addr) {
|
||||
final String prefix = bpos.getBlockPoolId() != null ? bpos.getBlockPoolId()
|
||||
: bpos.getNameserviceId();
|
||||
return prefix + " " + action + " to " + addr;
|
||||
}
|
||||
|
||||
|
||||
//This must be called only by blockPoolManager.
|
||||
void stop() {
|
||||
shouldServiceRun = false;
|
||||
@ -1008,8 +1011,8 @@ public void run() {
|
||||
}
|
||||
|
||||
public void start() {
|
||||
lifelineThread = new Thread(this, formatThreadName("lifeline",
|
||||
lifelineNnAddr));
|
||||
lifelineThread = new Thread(this,
|
||||
formatThreadName("lifeline", lifelineNnAddr));
|
||||
lifelineThread.setDaemon(true);
|
||||
lifelineThread.setUncaughtExceptionHandler(
|
||||
new Thread.UncaughtExceptionHandler() {
|
||||
|
@ -225,7 +225,7 @@ private void doRefreshNamenodes(
|
||||
lifelineAddrs.add(nnIdToLifelineAddr != null ?
|
||||
nnIdToLifelineAddr.get(nnId) : null);
|
||||
}
|
||||
BPOfferService bpos = createBPOS(addrs, lifelineAddrs);
|
||||
BPOfferService bpos = createBPOS(nsToAdd, addrs, lifelineAddrs);
|
||||
bpByNameserviceId.put(nsToAdd, bpos);
|
||||
offerServices.add(bpos);
|
||||
}
|
||||
@ -275,8 +275,10 @@ private void doRefreshNamenodes(
|
||||
/**
|
||||
* Extracted out for test purposes.
|
||||
*/
|
||||
protected BPOfferService createBPOS(List<InetSocketAddress> nnAddrs,
|
||||
protected BPOfferService createBPOS(
|
||||
final String nameserviceId,
|
||||
List<InetSocketAddress> nnAddrs,
|
||||
List<InetSocketAddress> lifelineNnAddrs) {
|
||||
return new BPOfferService(nnAddrs, lifelineNnAddrs, dn);
|
||||
return new BPOfferService(nameserviceId, nnAddrs, lifelineNnAddrs, dn);
|
||||
}
|
||||
}
|
||||
|
@ -241,12 +241,12 @@ public void run() {
|
||||
LOG.info("Failed to read expected encryption handshake from client " +
|
||||
"at " + peer.getRemoteAddressString() + ". Perhaps the client " +
|
||||
"is running an older version of Hadoop which does not support " +
|
||||
"encryption");
|
||||
"encryption", imne);
|
||||
} else {
|
||||
LOG.info("Failed to read expected SASL data transfer protection " +
|
||||
"handshake from client at " + peer.getRemoteAddressString() +
|
||||
". Perhaps the client is running an older version of Hadoop " +
|
||||
"which does not support SASL data transfer protection");
|
||||
"which does not support SASL data transfer protection", imne);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -187,28 +187,21 @@ public void onFailure(@Nonnull Throwable t) {
|
||||
|
||||
/**
|
||||
* {@inheritDoc}.
|
||||
*
|
||||
* The results of in-progress checks are not useful during shutdown,
|
||||
* so we optimize for faster shutdown by interrupt all actively
|
||||
* executing checks.
|
||||
*/
|
||||
@Override
|
||||
public void shutdownAndWait(long timeout, TimeUnit timeUnit)
|
||||
throws InterruptedException {
|
||||
// Try orderly shutdown.
|
||||
executorService.shutdown();
|
||||
|
||||
if (!executorService.awaitTermination(timeout, timeUnit)) {
|
||||
// Interrupt executing tasks and wait again.
|
||||
executorService.shutdownNow();
|
||||
executorService.awaitTermination(timeout, timeUnit);
|
||||
}
|
||||
if (scheduledExecutorService != null) {
|
||||
// Try orderly shutdown
|
||||
scheduledExecutorService.shutdown();
|
||||
|
||||
if (!scheduledExecutorService.awaitTermination(timeout, timeUnit)) {
|
||||
// Interrupt executing tasks and wait again.
|
||||
scheduledExecutorService.shutdownNow();
|
||||
scheduledExecutorService.awaitTermination(timeout, timeUnit);
|
||||
}
|
||||
scheduledExecutorService.shutdownNow();
|
||||
scheduledExecutorService.awaitTermination(timeout, timeUnit);
|
||||
}
|
||||
|
||||
executorService.shutdownNow();
|
||||
executorService.awaitTermination(timeout, timeUnit);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -375,10 +375,15 @@ private void processRecursively(String parent, HdfsFileStatus status,
|
||||
/** @return true if it is necessary to run another round of migration */
|
||||
private void processFile(String fullPath, HdfsLocatedFileStatus status,
|
||||
Result result) {
|
||||
final byte policyId = status.getStoragePolicy();
|
||||
// currently we ignore files with unspecified storage policy
|
||||
byte policyId = status.getStoragePolicy();
|
||||
if (policyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
|
||||
return;
|
||||
try {
|
||||
// get default policy from namenode
|
||||
policyId = dfs.getServerDefaults().getDefaultStoragePolicyId();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Failed to get default policy for " + fullPath, e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
final BlockStoragePolicy policy = blockStoragePolicies[policyId];
|
||||
if (policy == null) {
|
||||
|
@ -17,23 +17,15 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.http.HttpServlet;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.znerd.xmlenc.XMLOutputter;
|
||||
|
||||
/**
|
||||
* A base class for the servlets in DFS.
|
||||
@ -44,44 +36,6 @@ abstract class DfsServlet extends HttpServlet {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(DfsServlet.class.getCanonicalName());
|
||||
|
||||
/** Write the object to XML format */
|
||||
protected void writeXml(Exception except, String path, XMLOutputter doc)
|
||||
throws IOException {
|
||||
doc.startTag(RemoteException.class.getSimpleName());
|
||||
doc.attribute("path", path);
|
||||
if (except instanceof RemoteException) {
|
||||
doc.attribute("class", ((RemoteException) except).getClassName());
|
||||
} else {
|
||||
doc.attribute("class", except.getClass().getName());
|
||||
}
|
||||
String msg = except.getLocalizedMessage();
|
||||
int i = msg.indexOf("\n");
|
||||
if (i >= 0) {
|
||||
msg = msg.substring(0, i);
|
||||
}
|
||||
doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim());
|
||||
doc.endTag();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@link NameNode} proxy from the current {@link ServletContext}.
|
||||
*/
|
||||
protected ClientProtocol createNameNodeProxy() throws IOException {
|
||||
ServletContext context = getServletContext();
|
||||
// if we are running in the Name Node, use it directly rather than via
|
||||
// rpc
|
||||
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
|
||||
if (nn != null) {
|
||||
return nn.getRpcServer();
|
||||
}
|
||||
InetSocketAddress nnAddr =
|
||||
NameNodeHttpServer.getNameNodeAddressFromContext(context);
|
||||
Configuration conf = new HdfsConfiguration(
|
||||
NameNodeHttpServer.getConfFromContext(context));
|
||||
return NameNodeProxies.createProxy(conf, DFSUtilClient.getNNUri(nnAddr),
|
||||
ClientProtocol.class).getProxy();
|
||||
}
|
||||
|
||||
protected UserGroupInformation getUGI(HttpServletRequest request,
|
||||
Configuration conf) throws IOException {
|
||||
return JspHelper.getUGI(getServletContext(), request, conf);
|
||||
|
@ -20,11 +20,10 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.stream.Collectors;
|
||||
@ -40,50 +39,12 @@
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||
public final class ErasureCodingPolicyManager {
|
||||
|
||||
/**
|
||||
* TODO: HDFS-8095.
|
||||
*/
|
||||
private static final int DEFAULT_CELLSIZE = 64 * 1024;
|
||||
private static final ErasureCodingPolicy SYS_POLICY1 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_SCHEMA,
|
||||
DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_POLICY_ID);
|
||||
private static final ErasureCodingPolicy SYS_POLICY2 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA,
|
||||
DEFAULT_CELLSIZE, HdfsConstants.RS_3_2_POLICY_ID);
|
||||
private static final ErasureCodingPolicy SYS_POLICY3 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
|
||||
DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID);
|
||||
private static final ErasureCodingPolicy SYS_POLICY4 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA,
|
||||
DEFAULT_CELLSIZE, HdfsConstants.XOR_2_1_POLICY_ID);
|
||||
private static final ErasureCodingPolicy SYS_POLICY5 =
|
||||
new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA,
|
||||
DEFAULT_CELLSIZE, HdfsConstants.RS_10_4_POLICY_ID);
|
||||
|
||||
//We may add more later.
|
||||
private static final ErasureCodingPolicy[] SYS_POLICIES =
|
||||
new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3,
|
||||
SYS_POLICY4, SYS_POLICY5};
|
||||
|
||||
// Supported storage policies for striped EC files
|
||||
private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE =
|
||||
new byte[]{
|
||||
HdfsConstants.HOT_STORAGE_POLICY_ID,
|
||||
HdfsConstants.COLD_STORAGE_POLICY_ID,
|
||||
HdfsConstants.ALLSSD_STORAGE_POLICY_ID};
|
||||
/**
|
||||
* All supported policies maintained in NN memory for fast querying,
|
||||
* identified and sorted by its name.
|
||||
*/
|
||||
private static final Map<String, ErasureCodingPolicy> SYSTEM_POLICIES_BY_NAME;
|
||||
|
||||
static {
|
||||
// Create a hashmap of all available policies for quick lookup by name
|
||||
SYSTEM_POLICIES_BY_NAME = new TreeMap<>();
|
||||
for (ErasureCodingPolicy policy : SYS_POLICIES) {
|
||||
SYSTEM_POLICIES_BY_NAME.put(policy.getName(), policy);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* All enabled policies maintained in NN memory for fast querying,
|
||||
@ -101,9 +62,10 @@ public final class ErasureCodingPolicyManager {
|
||||
if (policyName.trim().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
ErasureCodingPolicy ecPolicy = SYSTEM_POLICIES_BY_NAME.get(policyName);
|
||||
ErasureCodingPolicy ecPolicy =
|
||||
SystemErasureCodingPolicies.getByName(policyName);
|
||||
if (ecPolicy == null) {
|
||||
String sysPolicies = Arrays.asList(SYS_POLICIES).stream()
|
||||
String sysPolicies = SystemErasureCodingPolicies.getPolicies().stream()
|
||||
.map(ErasureCodingPolicy::getName)
|
||||
.collect(Collectors.joining(", "));
|
||||
String msg = String.format("EC policy '%s' specified at %s is not a " +
|
||||
@ -124,35 +86,6 @@ public final class ErasureCodingPolicyManager {
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* Get system defined policies.
|
||||
* @return system policies
|
||||
*/
|
||||
public static ErasureCodingPolicy[] getSystemPolicies() {
|
||||
return SYS_POLICIES;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a policy by policy ID.
|
||||
* @return ecPolicy, or null if not found
|
||||
*/
|
||||
public static ErasureCodingPolicy getPolicyByID(byte id) {
|
||||
for (ErasureCodingPolicy policy : SYS_POLICIES) {
|
||||
if (policy.getId() == id) {
|
||||
return policy;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a policy by policy name.
|
||||
* @return ecPolicy, or null if not found
|
||||
*/
|
||||
public static ErasureCodingPolicy getPolicyByName(String name) {
|
||||
return SYSTEM_POLICIES_BY_NAME.get(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the set of enabled policies.
|
||||
* @return all policies
|
||||
@ -190,6 +123,7 @@ public static boolean checkStoragePolicySuitableForECStripedMode(
|
||||
* Clear and clean up.
|
||||
*/
|
||||
public void clear() {
|
||||
enabledPoliciesByName.clear();
|
||||
// TODO: we should only clear policies loaded from NN metadata.
|
||||
// This is a placeholder for HDFS-7337.
|
||||
}
|
||||
}
|
||||
|
@ -37,6 +37,7 @@
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
||||
@ -57,6 +58,39 @@ final class FSDirErasureCodingOp {
|
||||
*/
|
||||
private FSDirErasureCodingOp() {}
|
||||
|
||||
/**
|
||||
* Check if the ecPolicyName is valid and enabled, return the corresponding
|
||||
* EC policy if is.
|
||||
* @param fsn namespace
|
||||
* @param ecPolicyName name of EC policy to be checked
|
||||
* @return an erasure coding policy if ecPolicyName is valid and enabled
|
||||
* @throws IOException
|
||||
*/
|
||||
static ErasureCodingPolicy getErasureCodingPolicyByName(
|
||||
final FSNamesystem fsn, final String ecPolicyName) throws IOException {
|
||||
assert fsn.hasReadLock();
|
||||
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
|
||||
.getEnabledPolicyByName(ecPolicyName);
|
||||
if (ecPolicy == null) {
|
||||
final String sysPolicies =
|
||||
Arrays.asList(
|
||||
fsn.getErasureCodingPolicyManager().getEnabledPolicies())
|
||||
.stream()
|
||||
.map(ErasureCodingPolicy::getName)
|
||||
.collect(Collectors.joining(", "));
|
||||
final String message = String.format("Policy '%s' does not match any " +
|
||||
"enabled erasure" +
|
||||
" coding policies: [%s]. The set of enabled erasure coding " +
|
||||
"policies can be configured at '%s'.",
|
||||
ecPolicyName,
|
||||
sysPolicies,
|
||||
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY
|
||||
);
|
||||
throw new HadoopIllegalArgumentException(message);
|
||||
}
|
||||
return ecPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set an erasure coding policy on the given path.
|
||||
*
|
||||
@ -83,25 +117,8 @@ static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn,
|
||||
List<XAttr> xAttrs;
|
||||
fsd.writeLock();
|
||||
try {
|
||||
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
|
||||
.getEnabledPolicyByName(ecPolicyName);
|
||||
if (ecPolicy == null) {
|
||||
final String sysPolicies =
|
||||
Arrays.asList(
|
||||
fsn.getErasureCodingPolicyManager().getEnabledPolicies())
|
||||
.stream()
|
||||
.map(ErasureCodingPolicy::getName)
|
||||
.collect(Collectors.joining(", "));
|
||||
final String message = String.format("Policy '%s' does not match any " +
|
||||
"enabled erasure" +
|
||||
" coding policies: [%s]. The set of enabled erasure coding " +
|
||||
"policies can be configured at '%s'.",
|
||||
ecPolicyName,
|
||||
sysPolicies,
|
||||
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY
|
||||
);
|
||||
throw new HadoopIllegalArgumentException(message);
|
||||
}
|
||||
ErasureCodingPolicy ecPolicy = getErasureCodingPolicyByName(fsn,
|
||||
ecPolicyName);
|
||||
iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
|
||||
// Write access is required to set erasure coding policy
|
||||
if (fsd.isPermissionEnabled()) {
|
||||
@ -302,7 +319,7 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath(
|
||||
if (inode.isFile()) {
|
||||
byte id = inode.asFile().getErasureCodingPolicyID();
|
||||
return id < 0 ? null :
|
||||
ErasureCodingPolicyManager.getPolicyByID(id);
|
||||
SystemErasureCodingPolicies.getByID(id);
|
||||
}
|
||||
// We don't allow setting EC policies on paths with a symlink. Thus
|
||||
// if a symlink is encountered, the dir shouldn't have EC policy.
|
||||
@ -317,8 +334,7 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath(
|
||||
ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue());
|
||||
DataInputStream dIn = new DataInputStream(bIn);
|
||||
String ecPolicyName = WritableUtils.readString(dIn);
|
||||
return ErasureCodingPolicyManager
|
||||
.getPolicyByName(ecPolicyName);
|
||||
return SystemErasureCodingPolicies.getByName(ecPolicyName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
@ -351,7 +352,7 @@ static HdfsFileStatus startFile(
|
||||
EnumSet<CreateFlag> flag, boolean createParent,
|
||||
short replication, long blockSize,
|
||||
FileEncryptionInfo feInfo, INode.BlocksMapUpdateInfo toRemoveBlocks,
|
||||
boolean logRetryEntry)
|
||||
String ecPolicyName, boolean logRetryEntry)
|
||||
throws IOException {
|
||||
assert fsn.hasWriteLock();
|
||||
boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
|
||||
@ -385,7 +386,7 @@ static HdfsFileStatus startFile(
|
||||
FSDirMkdirOp.createAncestorDirectories(fsd, iip, permissions);
|
||||
if (parent != null) {
|
||||
iip = addFile(fsd, parent, iip.getLastLocalName(), permissions,
|
||||
replication, blockSize, holder, clientMachine);
|
||||
replication, blockSize, holder, clientMachine, ecPolicyName);
|
||||
newNode = iip != null ? iip.getLastINode().asFile() : null;
|
||||
}
|
||||
if (newNode == null) {
|
||||
@ -521,7 +522,7 @@ private static BlockInfo addBlock(FSDirectory fsd, String path,
|
||||
private static INodesInPath addFile(
|
||||
FSDirectory fsd, INodesInPath existing, byte[] localName,
|
||||
PermissionStatus permissions, short replication, long preferredBlockSize,
|
||||
String clientName, String clientMachine)
|
||||
String clientName, String clientMachine, String ecPolicyName)
|
||||
throws IOException {
|
||||
|
||||
Preconditions.checkNotNull(existing);
|
||||
@ -530,8 +531,14 @@ private static INodesInPath addFile(
|
||||
fsd.writeLock();
|
||||
try {
|
||||
boolean isStriped = false;
|
||||
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
|
||||
unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
|
||||
ErasureCodingPolicy ecPolicy;
|
||||
if (!StringUtils.isEmpty(ecPolicyName)) {
|
||||
ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicyByName(
|
||||
fsd.getFSNamesystem(), ecPolicyName);
|
||||
} else {
|
||||
ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
|
||||
fsd.getFSNamesystem(), existing);
|
||||
}
|
||||
if (ecPolicy != null) {
|
||||
isStriped = true;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||
@ -335,7 +336,7 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
|
||||
assert ((!isStriped) || (isStriped && !f.hasReplication()));
|
||||
Short replication = (!isStriped ? (short) f.getReplication() : null);
|
||||
ErasureCodingPolicy ecPolicy = isStriped ?
|
||||
ErasureCodingPolicyManager.getPolicyByID(
|
||||
SystemErasureCodingPolicies.getByID(
|
||||
(byte) f.getErasureCodingPolicyID()) : null;
|
||||
Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
|
||||
|
||||
|
@ -148,6 +148,7 @@
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
|
||||
import org.apache.hadoop.fs.CacheFlag;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
@ -778,8 +779,12 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
|
||||
conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),
|
||||
conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT),
|
||||
conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT),
|
||||
checksumType);
|
||||
|
||||
checksumType,
|
||||
conf.getTrimmed(
|
||||
CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
|
||||
""),
|
||||
blockManager.getStoragePolicySuite().getDefaultPolicy().getId());
|
||||
|
||||
this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY,
|
||||
DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
|
||||
|
||||
@ -2175,14 +2180,14 @@ CryptoProtocolVersion chooseProtocolVersion(
|
||||
*/
|
||||
HdfsFileStatus startFile(String src, PermissionStatus permissions,
|
||||
String holder, String clientMachine, EnumSet<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize,
|
||||
CryptoProtocolVersion[] supportedVersions, boolean logRetryCache)
|
||||
throws IOException {
|
||||
boolean createParent, short replication, long blockSize,
|
||||
CryptoProtocolVersion[] supportedVersions, String ecPolicyName,
|
||||
boolean logRetryCache) throws IOException {
|
||||
|
||||
HdfsFileStatus status;
|
||||
try {
|
||||
status = startFileInt(src, permissions, holder, clientMachine, flag,
|
||||
createParent, replication, blockSize, supportedVersions,
|
||||
createParent, replication, blockSize, supportedVersions, ecPolicyName,
|
||||
logRetryCache);
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "create", src);
|
||||
@ -2196,8 +2201,7 @@ private HdfsFileStatus startFileInt(String src,
|
||||
PermissionStatus permissions, String holder, String clientMachine,
|
||||
EnumSet<CreateFlag> flag, boolean createParent, short replication,
|
||||
long blockSize, CryptoProtocolVersion[] supportedVersions,
|
||||
boolean logRetryCache)
|
||||
throws IOException {
|
||||
String ecPolicyName, boolean logRetryCache) throws IOException {
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("DIR* NameSystem.startFile: src=").append(src)
|
||||
@ -2265,9 +2269,8 @@ private HdfsFileStatus startFileInt(String src,
|
||||
dir.writeLock();
|
||||
try {
|
||||
stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder,
|
||||
clientMachine, flag, createParent,
|
||||
replication, blockSize, feInfo,
|
||||
toRemoveBlocks, logRetryCache);
|
||||
clientMachine, flag, createParent, replication, blockSize, feInfo,
|
||||
toRemoveBlocks, ecPolicyName, logRetryCache);
|
||||
} catch (IOException e) {
|
||||
skipSync = e instanceof StandbyException;
|
||||
throw e;
|
||||
|
@ -37,6 +37,7 @@
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
@ -191,8 +192,8 @@ static long getBlockLayoutRedundancy(final BlockType blockType,
|
||||
if (blockType == STRIPED) {
|
||||
Preconditions.checkArgument(replication == null &&
|
||||
erasureCodingPolicyID != null);
|
||||
Preconditions.checkArgument(ErasureCodingPolicyManager
|
||||
.getPolicyByID(erasureCodingPolicyID) != null,
|
||||
Preconditions.checkArgument(SystemErasureCodingPolicies
|
||||
.getByID(erasureCodingPolicyID) != null,
|
||||
"Could not find EC policy with ID 0x" + StringUtils
|
||||
.byteToHexString(erasureCodingPolicyID));
|
||||
layoutRedundancy |= BLOCK_TYPE_MASK_STRIPED;
|
||||
@ -516,8 +517,7 @@ public short getPreferredBlockReplication() {
|
||||
}
|
||||
|
||||
ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getPolicyByID(
|
||||
getErasureCodingPolicyID());
|
||||
SystemErasureCodingPolicies.getByID(getErasureCodingPolicyID());
|
||||
Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x"
|
||||
+ StringUtils.byteToHexString(getErasureCodingPolicyID()));
|
||||
return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
|
||||
|
@ -238,7 +238,7 @@ public static INodesInPath append(INodesInPath iip, INode child,
|
||||
}
|
||||
|
||||
private final byte[][] path;
|
||||
private final String pathname;
|
||||
private volatile String pathname;
|
||||
|
||||
/**
|
||||
* Array with the specified number of INodes resolved for a given path.
|
||||
@ -268,7 +268,6 @@ private INodesInPath(INode[] inodes, byte[][] path, boolean isRaw,
|
||||
Preconditions.checkArgument(inodes != null && path != null);
|
||||
this.inodes = inodes;
|
||||
this.path = path;
|
||||
this.pathname = DFSUtil.byteArray2PathString(path);
|
||||
this.isRaw = isRaw;
|
||||
this.isSnapshot = isSnapshot;
|
||||
this.snapshotId = snapshotId;
|
||||
@ -329,6 +328,9 @@ public byte[] getPathComponent(int i) {
|
||||
|
||||
/** @return the full path in string form */
|
||||
public String getPath() {
|
||||
if (pathname == null) {
|
||||
pathname = DFSUtil.byteArray2PathString(path);
|
||||
}
|
||||
return pathname;
|
||||
}
|
||||
|
||||
|
@ -722,8 +722,8 @@ public FsServerDefaults getServerDefaults() throws IOException {
|
||||
@Override // ClientProtocol
|
||||
public HdfsFileStatus create(String src, FsPermission masked,
|
||||
String clientName, EnumSetWritable<CreateFlag> flag,
|
||||
boolean createParent, short replication, long blockSize,
|
||||
CryptoProtocolVersion[] supportedVersions)
|
||||
boolean createParent, short replication, long blockSize,
|
||||
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
|
||||
throws IOException {
|
||||
checkNNStartup();
|
||||
String clientMachine = getClientMachine();
|
||||
@ -747,7 +747,7 @@ public HdfsFileStatus create(String src, FsPermission masked,
|
||||
.getShortUserName(), null, masked);
|
||||
status = namesystem.startFile(src, perm, clientName, clientMachine,
|
||||
flag.get(), createParent, replication, blockSize, supportedVersions,
|
||||
cacheEntry != null);
|
||||
ecPolicyName, cacheEntry != null);
|
||||
} finally {
|
||||
RetryCache.setState(cacheEntry, status != null, status);
|
||||
}
|
||||
|
@ -0,0 +1,45 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public class NameNodeHAProxyFactory<T> implements HAProxyFactory<T> {
|
||||
|
||||
@Override
|
||||
public T createProxy(Configuration conf, InetSocketAddress nnAddr,
|
||||
Class<T> xface, UserGroupInformation ugi, boolean withRetries,
|
||||
AtomicBoolean fallbackToSimpleAuth) throws IOException {
|
||||
return NameNodeProxies.createNonHAProxy(conf, nnAddr, xface,
|
||||
ugi, withRetries, fallbackToSimpleAuth).getProxy();
|
||||
}
|
||||
|
||||
@Override
|
||||
public T createProxy(Configuration conf, InetSocketAddress nnAddr,
|
||||
Class<T> xface, UserGroupInformation ugi, boolean withRetries)
|
||||
throws IOException {
|
||||
return NameNodeProxies.createNonHAProxy(conf, nnAddr, xface,
|
||||
ugi, withRetries).getProxy();
|
||||
}
|
||||
}
|
@ -0,0 +1,2 @@
|
||||
User-agent: *
|
||||
Disallow: /
|
@ -0,0 +1,2 @@
|
||||
User-agent: *
|
||||
Disallow: /
|
@ -0,0 +1,2 @@
|
||||
User-agent: *
|
||||
Disallow: /
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user