Merge branch 'trunk' into HDFS-6584

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
This commit is contained in:
Tsz-Wo Nicholas Sze 2014-09-08 10:54:48 +08:00
commit f1432e2424
39 changed files with 952 additions and 161 deletions

View File

@ -507,6 +507,8 @@ Release 2.6.0 - UNRELEASED
HADOOP-11060. Create a CryptoCodec test that verifies interoperability
between the JCE and OpenSSL implementations. (hitliuyi via tucu)
HADOOP-11070. Create MiniKMS for testing. (tucu)
OPTIMIZATIONS
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
@ -760,6 +762,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-11063. KMS cannot deploy on Windows, because class names are too long.
(cnauroth)
HADOOP-11067. warning message 'ssl.client.truststore.location has not
been set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to
determine if in proxyuser mode or not. (tucu)
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -385,9 +385,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
// if current UGI is different from UGI at constructor time, behave as
// proxyuser
UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
final String doAsUser =
(loginUgi.getShortUserName().equals(currentUgi.getShortUserName()))
? null : currentUgi.getShortUserName();
final String doAsUser = (currentUgi.getAuthenticationMethod() ==
UserGroupInformation.AuthenticationMethod.PROXY)
? currentUgi.getShortUserName() : null;
// creating the HTTP connection using the current UGI at constructor time
conn = loginUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {

View File

@ -222,9 +222,9 @@
</goals>
<configuration>
<target>
<mkdir dir="${project.build.directory}/test-classes/webapp"/>
<mkdir dir="${project.build.directory}/test-classes/kms-webapp"/>
<copy todir="${project.build.directory}/test-classes/webapp">
<copy todir="${project.build.directory}/test-classes/kms-webapp">
<fileset dir="${basedir}/src/main/webapp"/>
</copy>
</target>

View File

@ -0,0 +1,197 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.security.SslSocketConnector;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.File;
import java.io.FileWriter;
import java.io.Writer;
import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.ServerSocket;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
public class MiniKMS {
private static Server createJettyServer(String keyStore, String password) {
try {
boolean ssl = keyStore != null;
InetAddress localhost = InetAddress.getByName("localhost");
String host = "localhost";
ServerSocket ss = new ServerSocket(0, 50, localhost);
int port = ss.getLocalPort();
ss.close();
Server server = new Server(0);
if (!ssl) {
server.getConnectors()[0].setHost(host);
server.getConnectors()[0].setPort(port);
} else {
SslSocketConnector c = new SslSocketConnector();
c.setHost(host);
c.setPort(port);
c.setNeedClientAuth(false);
c.setKeystore(keyStore);
c.setKeystoreType("jks");
c.setKeyPassword(password);
server.setConnectors(new Connector[]{c});
}
return server;
} catch (Exception ex) {
throw new RuntimeException("Could not start embedded servlet container, "
+ ex.getMessage(), ex);
}
}
private static URL getJettyURL(Server server) {
boolean ssl = server.getConnectors()[0].getClass()
== SslSocketConnector.class;
try {
String scheme = (ssl) ? "https" : "http";
return new URL(scheme + "://" +
server.getConnectors()[0].getHost() + ":" +
server.getConnectors()[0].getPort());
} catch (MalformedURLException ex) {
throw new RuntimeException("It should never happen, " + ex.getMessage(),
ex);
}
}
public static class Builder {
private File kmsConfDir;
private String log4jConfFile;
private File keyStoreFile;
private String keyStorePassword;
public Builder() {
kmsConfDir = new File("target/test-classes").getAbsoluteFile();
log4jConfFile = "kms-log4j.properties";
}
public Builder setKmsConfDir(File confDir) {
Preconditions.checkNotNull(confDir, "KMS conf dir is NULL");
Preconditions.checkArgument(confDir.exists(),
"KMS conf dir does not exist");
kmsConfDir = confDir;
return this;
}
public Builder setLog4jConfFile(String log4jConfFile) {
Preconditions.checkNotNull(log4jConfFile, "log4jconf file is NULL");
this.log4jConfFile = log4jConfFile;
return this;
}
public Builder setSslConf(File keyStoreFile, String keyStorePassword) {
Preconditions.checkNotNull(keyStoreFile, "keystore file is NULL");
Preconditions.checkNotNull(keyStorePassword, "keystore password is NULL");
Preconditions.checkArgument(keyStoreFile.exists(),
"keystore file does not exist");
this.keyStoreFile = keyStoreFile;
this.keyStorePassword = keyStorePassword;
return this;
}
public MiniKMS build() {
Preconditions.checkArgument(kmsConfDir.exists(),
"KMS conf dir does not exist");
return new MiniKMS(kmsConfDir.getAbsolutePath(), log4jConfFile,
(keyStoreFile != null) ? keyStoreFile.getAbsolutePath() : null,
keyStorePassword);
}
}
private String kmsConfDir;
private String log4jConfFile;
private String keyStore;
private String keyStorePassword;
private Server jetty;
private URL kmsURL;
public MiniKMS(String kmsConfDir, String log4ConfFile, String keyStore,
String password) {
this.kmsConfDir = kmsConfDir;
this.log4jConfFile = log4ConfFile;
this.keyStore = keyStore;
this.keyStorePassword = password;
}
public void start() throws Exception {
System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir);
File aclsFile = new File(kmsConfDir, "kms-acls.xml");
if (!aclsFile.exists()) {
Configuration acls = new Configuration(false);
Writer writer = new FileWriter(aclsFile);
acls.writeXml(writer);
writer.close();
}
File coreFile = new File(kmsConfDir, "core-site.xml");
if (!coreFile.exists()) {
Configuration core = new Configuration();
Writer writer = new FileWriter(coreFile);
core.writeXml(writer);
writer.close();
}
File kmsFile = new File(kmsConfDir, "kms-site.xml");
if (!kmsFile.exists()) {
Configuration kms = new Configuration(false);
kms.set("hadoop.security.key.provider.path",
"jceks://file@" + kmsConfDir + "/kms.keystore");
kms.set("hadoop.kms.authentication.type", "simple");
Writer writer = new FileWriter(kmsFile);
kms.writeXml(writer);
writer.close();
}
System.setProperty("log4j.configuration", log4jConfFile);
jetty = createJettyServer(keyStore, keyStorePassword);
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("kms-webapp");
if (url == null) {
throw new RuntimeException(
"Could not find kms-webapp/ dir in test classpath");
}
WebAppContext context = new WebAppContext(url.getPath(), "/kms");
jetty.addHandler(context);
jetty.start();
kmsURL = new URL(getJettyURL(jetty), "kms");
}
public URL getKMSUrl() {
return kmsURL;
}
public void stop() {
if (jetty != null && jetty.isRunning()) {
try {
jetty.stop();
jetty = null;
} catch (Exception ex) {
throw new RuntimeException("Could not stop MiniKMS embedded Jetty, " +
ex.getMessage(), ex);
}
}
}
}

View File

@ -36,10 +36,6 @@ import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.security.SslSocketConnector;
import org.mortbay.jetty.webapp.WebAppContext;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
@ -52,7 +48,6 @@ import java.io.IOException;
import java.io.Writer;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.ServerSocket;
import java.net.SocketTimeoutException;
import java.net.URI;
@ -91,49 +86,6 @@ public class TestKMS {
return file;
}
public static Server createJettyServer(String keyStore, String password) {
try {
boolean ssl = keyStore != null;
InetAddress localhost = InetAddress.getByName("localhost");
String host = "localhost";
ServerSocket ss = new ServerSocket(0, 50, localhost);
int port = ss.getLocalPort();
ss.close();
Server server = new Server(0);
if (!ssl) {
server.getConnectors()[0].setHost(host);
server.getConnectors()[0].setPort(port);
} else {
SslSocketConnector c = new SslSocketConnector();
c.setHost(host);
c.setPort(port);
c.setNeedClientAuth(false);
c.setKeystore(keyStore);
c.setKeystoreType("jks");
c.setKeyPassword(password);
server.setConnectors(new Connector[]{c});
}
return server;
} catch (Exception ex) {
throw new RuntimeException("Could not start embedded servlet container, "
+ ex.getMessage(), ex);
}
}
public static URL getJettyURL(Server server) {
boolean ssl = server.getConnectors()[0].getClass()
== SslSocketConnector.class;
try {
String scheme = (ssl) ? "https" : "http";
return new URL(scheme + "://" +
server.getConnectors()[0].getHost() + ":" +
server.getConnectors()[0].getPort());
} catch (MalformedURLException ex) {
throw new RuntimeException("It should never happen, " + ex.getMessage(),
ex);
}
}
public static abstract class KMSCallable implements Callable<Void> {
private URL kmsUrl;
@ -144,33 +96,19 @@ public class TestKMS {
protected void runServer(String keystore, String password, File confDir,
KMSCallable callable) throws Exception {
System.setProperty(KMSConfiguration.KMS_CONFIG_DIR,
confDir.getAbsolutePath());
System.setProperty("log4j.configuration", "log4j.properties");
Server jetty = createJettyServer(keystore, password);
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder().setKmsConfDir(confDir)
.setLog4jConfFile("log4j.properties");
if (keystore != null) {
miniKMSBuilder.setSslConf(new File(keystore), password);
}
MiniKMS miniKMS = miniKMSBuilder.build();
miniKMS.start();
try {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
if (url == null) {
throw new RuntimeException(
"Could not find webapp/ dir in test classpath");
}
WebAppContext context = new WebAppContext(url.getPath(), "/kms");
jetty.addHandler(context);
jetty.start();
url = new URL(getJettyURL(jetty), "kms");
System.out.println("Test KMS running at: " + url);
callable.kmsUrl = url;
System.out.println("Test KMS running at: " + miniKMS.getKMSUrl());
callable.kmsUrl = miniKMS.getKMSUrl();
callable.call();
} finally {
if (jetty != null && jetty.isRunning()) {
try {
jetty.stop();
} catch (Exception ex) {
throw new RuntimeException("Could not stop embedded Jetty, " +
ex.getMessage(), ex);
}
}
miniKMS.stop();
}
}
@ -1219,7 +1157,7 @@ public class TestKMS {
final URI uri = createKMSUri(getKMSUrl());
// proxyuser client using kerberos credentials
UserGroupInformation clientUgi = UserGroupInformation.
final UserGroupInformation clientUgi = UserGroupInformation.
loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath());
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
@ -1229,7 +1167,7 @@ public class TestKMS {
// authorized proxyuser
UserGroupInformation fooUgi =
UserGroupInformation.createRemoteUser("foo");
UserGroupInformation.createProxyUser("foo", clientUgi);
fooUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
@ -1241,7 +1179,7 @@ public class TestKMS {
// unauthorized proxyuser
UserGroupInformation foo1Ugi =
UserGroupInformation.createRemoteUser("foo1");
UserGroupInformation.createProxyUser("foo1", clientUgi);
foo1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {

View File

@ -467,6 +467,8 @@ Release 2.6.0 - UNRELEASED
HDFS-6376. Distcp data between two HA clusters requires another configuration.
(Dave Marion and Haohui Mai via jing9)
HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
@ -633,8 +635,11 @@ Release 2.6.0 - UNRELEASED
HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via
Arpit Agarwal)
HDFS-6998. warning message 'ssl.client.truststore.location has not been
set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal)
HDFS-6898. DN must reserve space for a full block when an RBW block is
created. (Arpit Agarwal)
HDFS-7025. HDFS Credential Provider related Unit Test Failure.
(Xiaoyu Yao via cnauroth)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
@ -737,6 +742,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6714. TestBlocksScheduledCounter#testBlocksScheduledCounter should
shutdown cluster (vinayakumarb)
HDFS-6986. DistributedFileSystem must get delegation tokens from configured
KeyProvider. (zhz via tucu)
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -3103,4 +3103,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
DFSHedgedReadMetrics getHedgedReadMetrics() {
return HEDGED_READ_METRIC;
}
public KeyProviderCryptoExtension getKeyProvider() {
return provider;
}
}

View File

@ -84,8 +84,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@ -1972,6 +1974,28 @@ public class DistributedFileSystem extends FileSystem {
}.resolve(this, absF);
}
@Override
public Token<?>[] addDelegationTokens(
final String renewer, Credentials credentials) throws IOException {
Token<?>[] tokens = super.addDelegationTokens(renewer, credentials);
if (dfs.getKeyProvider() != null) {
KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(dfs.getKeyProvider());
Token<?>[] kpTokens = keyProviderDelegationTokenExtension.
addDelegationTokens(renewer, credentials);
if (tokens != null && kpTokens != null) {
Token<?>[] all = new Token<?>[tokens.length + kpTokens.length];
System.arraycopy(tokens, 0, all, 0, tokens.length);
System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
tokens = all;
} else {
tokens = (tokens != null) ? tokens : kpTokens;
}
}
return tokens;
}
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
return dfs.getInotifyEventStream();
}

View File

@ -48,7 +48,7 @@ public class HdfsConstants {
"org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol";
public static final int MIN_BLOCKS_FOR_WRITE = 5;
public static final int MIN_BLOCKS_FOR_WRITE = 1;
// Long that indicates "leave current quota unchanged"
public static final long QUOTA_DONT_SET = Long.MAX_VALUE;

View File

@ -165,7 +165,7 @@ public class BlockManager {
final BlocksMap blocksMap;
/** Replication thread. */
final Daemon replicationThread = new Daemon(new ReplicationMonitor());
Daemon replicationThread;
/** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@ -265,6 +265,7 @@ public class BlockManager {
this.namesystem = namesystem;
datanodeManager = new DatanodeManager(this, namesystem, conf);
heartbeatManager = datanodeManager.getHeartbeatManager();
setReplicationMonitor(new ReplicationMonitor());
final long pendingPeriod = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@ -402,6 +403,22 @@ public class BlockManager {
return storagePolicySuite.getPolicy(policyName);
}
public long getReplicationRecheckInterval() {
return replicationRecheckInterval;
}
public AtomicLong excessBlocksCount() {
return excessBlocksCount;
}
public void clearInvalidateBlocks() {
invalidateBlocks.clear();
}
void setReplicationMonitor(Runnable replicationMonitor) {
replicationThread = new Daemon(replicationMonitor);
}
public void setBlockPoolId(String blockPoolId) {
if (isBlockTokenEnabled()) {
blockTokenSecretManager.setBlockPoolId(blockPoolId);
@ -1647,7 +1664,7 @@ public class BlockManager {
* If there were any replication requests that timed out, reap them
* and put them back into the neededReplication queue
*/
private void processPendingReplications() {
void processPendingReplications() {
Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
if (timedOutItems != null) {
namesystem.writeLock();

View File

@ -1053,7 +1053,7 @@ public class DatanodeManager {
* 3. Added to exclude --> start decommission.
* 4. Removed from exclude --> stop decommission.
*/
private void refreshDatanodes() {
void refreshDatanodes() {
for(DatanodeDescriptor node : datanodeMap.values()) {
// Check if not include.
if (!hostFileManager.isIncluded(node)) {
@ -1586,5 +1586,9 @@ public class DatanodeManager {
public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) {
this.shouldSendCachingCommands = shouldSendCachingCommands;
}
public HostFileManager getHostFileManager() {
return this.hostFileManager;
}
}

View File

@ -129,6 +129,10 @@ class HostFileManager {
void refresh(String includeFile, String excludeFile) throws IOException {
HostSet newIncludes = readFile("included", includeFile);
HostSet newExcludes = readFile("excluded", excludeFile);
setHosts(newIncludes, newExcludes);
}
void setHosts(HostSet newIncludes, HostSet newExcludes) {
synchronized (this) {
includes = newIncludes;
excludes = newExcludes;

View File

@ -34,10 +34,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param bytesToReserve disk space to reserve for this replica, based on
* the estimated maximum block length.
*/
public ReplicaBeingWritten(long blockId, long genStamp,
FsVolumeSpi vol, File dir) {
super( blockId, genStamp, vol, dir);
FsVolumeSpi vol, File dir, long bytesToReserve) {
super(blockId, genStamp, vol, dir, bytesToReserve);
}
/**
@ -60,10 +62,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline {
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
* @param bytesToReserve disk space to reserve for this replica, based on
* the estimated maximum block length.
*/
public ReplicaBeingWritten(long blockId, long len, long genStamp,
FsVolumeSpi vol, File dir, Thread writer ) {
super( blockId, len, genStamp, vol, dir, writer);
FsVolumeSpi vol, File dir, Thread writer, long bytesToReserve) {
super(blockId, len, genStamp, vol, dir, writer, bytesToReserve);
}
/**

View File

@ -45,16 +45,25 @@ public class ReplicaInPipeline extends ReplicaInfo
private byte[] lastChecksum;
private Thread writer;
/**
* Bytes reserved for this replica on the containing volume.
* Based off difference between the estimated maximum block length and
* the bytes already written to this block.
*/
private long bytesReserved;
/**
* Constructor for a zero length replica
* @param blockId block id
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param bytesToReserve disk space to reserve for this replica, based on
* the estimated maximum block length.
*/
public ReplicaInPipeline(long blockId, long genStamp,
FsVolumeSpi vol, File dir) {
this( blockId, 0L, genStamp, vol, dir, Thread.currentThread());
FsVolumeSpi vol, File dir, long bytesToReserve) {
this(blockId, 0L, genStamp, vol, dir, Thread.currentThread(), bytesToReserve);
}
/**
@ -67,7 +76,7 @@ public class ReplicaInPipeline extends ReplicaInfo
ReplicaInPipeline(Block block,
FsVolumeSpi vol, File dir, Thread writer) {
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer);
vol, dir, writer, 0L);
}
/**
@ -78,13 +87,16 @@ public class ReplicaInPipeline extends ReplicaInfo
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
* @param bytesToReserve disk space to reserve for this replica, based on
* the estimated maximum block length.
*/
ReplicaInPipeline(long blockId, long len, long genStamp,
FsVolumeSpi vol, File dir, Thread writer ) {
FsVolumeSpi vol, File dir, Thread writer, long bytesToReserve) {
super( blockId, len, genStamp, vol, dir);
this.bytesAcked = len;
this.bytesOnDisk = len;
this.writer = writer;
this.bytesReserved = bytesToReserve;
}
/**
@ -96,6 +108,7 @@ public class ReplicaInPipeline extends ReplicaInfo
this.bytesAcked = from.getBytesAcked();
this.bytesOnDisk = from.getBytesOnDisk();
this.writer = from.writer;
this.bytesReserved = from.bytesReserved;
}
@Override
@ -115,7 +128,14 @@ public class ReplicaInPipeline extends ReplicaInfo
@Override // ReplicaInPipelineInterface
public void setBytesAcked(long bytesAcked) {
long newBytesAcked = bytesAcked - this.bytesAcked;
this.bytesAcked = bytesAcked;
// Once bytes are ACK'ed we can release equivalent space from the
// volume's reservedForRbw count. We could have released it as soon
// as the write-to-disk completed but that would be inefficient.
getVolume().releaseReservedSpace(newBytesAcked);
bytesReserved -= newBytesAcked;
}
@Override // ReplicaInPipelineInterface
@ -123,6 +143,11 @@ public class ReplicaInPipeline extends ReplicaInfo
return bytesOnDisk;
}
@Override
public long getBytesReserved() {
return bytesReserved;
}
@Override // ReplicaInPipelineInterface
public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) {
this.bytesOnDisk = dataLength;

View File

@ -223,6 +223,13 @@ abstract public class ReplicaInfo extends Block implements Replica {
// no need to be unlinked
}
/**
* Number of bytes reserved for this replica on disk.
*/
public long getBytesReserved() {
return 0;
}
/**
* Copy specified file into a temporary file. Then rename the
* temporary file to the original name. This will cause any

View File

@ -45,4 +45,15 @@ public interface FsVolumeSpi {
public File getFinalizedDir(String bpid) throws IOException;
public StorageType getStorageType();
/**
* Reserve disk space for an RBW block so a writer does not run out of
* space before the block is full.
*/
public void reserveSpaceForRbw(long bytesToReserve);
/**
* Release disk space previously reserved for RBW block.
*/
public void releaseReservedSpace(long bytesToRelease);
}

View File

@ -240,7 +240,7 @@ class BlockPoolSlice {
return DatanodeUtil.createTmpFile(b, f);
}
File addBlock(Block b, File f) throws IOException {
File addFinalizedBlock(Block b, File f) throws IOException {
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
if (!blockDir.exists()) {
if (!blockDir.mkdirs()) {
@ -334,9 +334,11 @@ class BlockPoolSlice {
// The restart meta file exists
if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
// It didn't expire. Load the replica as a RBW.
// We don't know the expected block length, so just use 0
// and don't reserve any more space for writes.
newReplica = new ReplicaBeingWritten(blockId,
validateIntegrityAndSetLength(file, genStamp),
genStamp, volume, file.getParentFile(), null);
genStamp, volume, file.getParentFile(), null, 0);
loadRwr = false;
}
sc.close();

View File

@ -593,7 +593,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
+ " from " + srcfile + " to " + dstfile.getAbsolutePath(), e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("addBlock: Moved " + srcmeta + " to " + dstmeta
LOG.debug("addFinalizedBlock: Moved " + srcmeta + " to " + dstmeta
+ " and " + srcfile + " to " + dstfile);
}
return dstfile;
@ -712,7 +712,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
File oldmeta = replicaInfo.getMetaFile();
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS,
v, newBlkFile.getParentFile(), Thread.currentThread());
v, newBlkFile.getParentFile(), Thread.currentThread(), estimateBlockLen);
File newmeta = newReplicaInfo.getMetaFile();
// rename meta file to rbw directory
@ -748,7 +748,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
// Replace finalized replica by a RBW replica in replicas map
volumeMap.add(bpid, newReplicaInfo);
v.reserveSpaceForRbw(estimateBlockLen - replicaInfo.getNumBytes());
return newReplicaInfo;
}
@ -876,7 +876,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
// create a rbw file to hold block in the designated volume
File f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(),
b.getGenerationStamp(), v, f.getParentFile());
b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes());
volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
return newReplicaInfo;
}
@ -992,7 +992,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
// create RBW
final ReplicaBeingWritten rbw = new ReplicaBeingWritten(
blockId, numBytes, expectedGs,
v, dest.getParentFile(), Thread.currentThread());
v, dest.getParentFile(), Thread.currentThread(), 0);
rbw.setBytesAcked(visible);
// overwrite the RBW in the volume map
volumeMap.add(b.getBlockPoolId(), rbw);
@ -1013,7 +1013,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
// create a temporary file to hold block in the designated volume
File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(),
b.getGenerationStamp(), v, f.getParentFile());
b.getGenerationStamp(), v, f.getParentFile(), 0);
volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
return newReplicaInfo;
@ -1079,7 +1079,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
" for block " + replicaInfo);
}
File dest = v.addBlock(bpid, replicaInfo, f);
File dest = v.addFinalizedBlock(
bpid, replicaInfo, f, replicaInfo.getBytesReserved());
newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());
}
volumeMap.add(bpid, newReplicaInfo);

View File

@ -28,6 +28,7 @@ import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
@ -62,6 +63,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
private final DF usage;
private final long reserved;
// Disk space reserved for open blocks.
private AtomicLong reservedForRbw;
// Capacity configured. This is useful when we want to
// limit the visible capacity for tests. If negative, then we just
// query from the filesystem.
@ -82,6 +86,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
this.reserved = conf.getLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
this.reservedForRbw = new AtomicLong(0L);
this.currentDir = currentDir;
File parent = currentDir.getParentFile();
this.usage = new DF(parent, conf);
@ -166,7 +171,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
@Override
public long getAvailable() throws IOException {
long remaining = getCapacity()-getDfsUsed();
long remaining = getCapacity() - getDfsUsed() - reservedForRbw.get();
long available = usage.getAvailable();
if (remaining > available) {
remaining = available;
@ -174,6 +179,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
return (remaining > 0) ? remaining : 0;
}
@VisibleForTesting
public long getReservedForRbw() {
return reservedForRbw.get();
}
long getReserved(){
return reserved;
}
@ -217,16 +227,58 @@ public class FsVolumeImpl implements FsVolumeSpi {
return getBlockPoolSlice(bpid).createTmpFile(b);
}
@Override
public void reserveSpaceForRbw(long bytesToReserve) {
if (bytesToReserve != 0) {
if (FsDatasetImpl.LOG.isDebugEnabled()) {
FsDatasetImpl.LOG.debug("Reserving " + bytesToReserve + " on volume " + getBasePath());
}
reservedForRbw.addAndGet(bytesToReserve);
}
}
@Override
public void releaseReservedSpace(long bytesToRelease) {
if (bytesToRelease != 0) {
if (FsDatasetImpl.LOG.isDebugEnabled()) {
FsDatasetImpl.LOG.debug("Releasing " + bytesToRelease + " on volume " + getBasePath());
}
long oldReservation, newReservation;
do {
oldReservation = reservedForRbw.get();
newReservation = oldReservation - bytesToRelease;
if (newReservation < 0) {
// Failsafe, this should never occur in practice, but if it does we don't
// want to start advertising more space than we have available.
newReservation = 0;
}
} while (!reservedForRbw.compareAndSet(oldReservation, newReservation));
}
}
/**
* RBW files. They get moved to the finalized block directory when
* the block is finalized.
*/
File createRbwFile(String bpid, Block b) throws IOException {
reserveSpaceForRbw(b.getNumBytes());
return getBlockPoolSlice(bpid).createRbwFile(b);
}
File addBlock(String bpid, Block b, File f) throws IOException {
return getBlockPoolSlice(bpid).addBlock(b, f);
/**
*
* @param bytesReservedForRbw Space that was reserved during
* block creation. Now that the block is being finalized we
* can free up this space.
* @return
* @throws IOException
*/
File addFinalizedBlock(String bpid, Block b,
File f, long bytesReservedForRbw)
throws IOException {
releaseReservedSpace(bytesReservedForRbw);
return getBlockPoolSlice(bpid).addFinalizedBlock(b, f);
}
Executor getCacheExecutor() {

View File

@ -978,7 +978,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return Collections.unmodifiableList(auditLoggers);
}
private void loadFSImage(StartupOption startOpt) throws IOException {
protected void loadFSImage(StartupOption startOpt) throws IOException {
final FSImage fsImage = getFSImage();
// format before starting up if requested
@ -1026,7 +1026,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
imageLoadComplete();
}
private void startSecretManager() {
protected void startSecretManager() {
if (dtSecretManager != null) {
try {
dtSecretManager.startThreads();
@ -1038,7 +1038,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
private void startSecretManagerIfNecessary() {
protected void startSecretManagerIfNecessary() {
boolean shouldRun = shouldUseDelegationTokens() &&
!isInSafeMode() && getEditLog().isOpenForWrite();
boolean running = dtSecretManager.isRunning();
@ -1188,7 +1188,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return haEnabled && inActiveState() && startingActiveService;
}
private boolean shouldUseDelegationTokens() {
protected boolean shouldUseDelegationTokens() {
return UserGroupInformation.isSecurityEnabled() ||
alwaysUseDelegationTokensForTests;
}
@ -2775,6 +2775,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws UnresolvedLinkException
* @throws IOException
*/
protected
LocatedBlock prepareFileForWrite(String src, INodeFile file,
String leaseHolder, String clientMachine,
boolean writeToEditLog,
@ -3234,6 +3235,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return new FileState(pendingFile, src);
}
protected
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
long offset) throws IOException {
LocatedBlock lBlk = new LocatedBlock(
@ -3352,8 +3354,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return true;
}
private INodeFile checkLease(String src, String holder, INode inode,
long fileId)
protected INodeFile checkLease(String src, String holder, INode inode,
long fileId)
throws LeaseExpiredException, FileNotFoundException {
assert hasReadLock();
final String ident = src + " (inode " + fileId + ")";
@ -4472,7 +4474,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return leaseManager.reassignLease(lease, src, newHolder);
}
private void commitOrCompleteLastBlock(final INodeFile fileINode,
protected void commitOrCompleteLastBlock(final INodeFile fileINode,
final Block commitBlock) throws IOException {
assert hasWriteLock();
Preconditions.checkArgument(fileINode.isUnderConstruction());
@ -4870,6 +4872,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @return an array of datanode commands
* @throws IOException
*/
protected
HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
StorageReport[] reports, long cacheCapacity, long cacheUsed,
int xceiverCount, int xmitsInProgress, int failedVolumes)
@ -4919,8 +4922,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @param file
* @param logRetryCache
*/
private void persistBlocks(String path, INodeFile file,
boolean logRetryCache) {
protected void persistBlocks(String path, INodeFile file,
boolean logRetryCache) {
assert hasWriteLock();
Preconditions.checkArgument(file.isUnderConstruction());
getEditLog().logUpdateBlocks(path, file, logRetryCache);
@ -5345,7 +5348,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @param path
* @param file
*/
private void persistNewBlock(String path, INodeFile file) {
protected void persistNewBlock(String path, INodeFile file) {
Preconditions.checkArgument(file.isUnderConstruction());
getEditLog().logAddBlock(path, file);
if (NameNode.stateChangeLog.isDebugEnabled()) {
@ -7223,7 +7226,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
*
* @return true if delegation token operation is allowed
*/
private boolean isAllowedDelegationTokenOp() throws IOException {
protected boolean isAllowedDelegationTokenOp() throws IOException {
AuthenticationMethod authMethod = getConnectionAuthenticationMethod();
if (UserGroupInformation.isSecurityEnabled()
&& (authMethod != AuthenticationMethod.KERBEROS)
@ -7390,7 +7393,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
for (DatanodeDescriptor node : live) {
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
info.put(node.getHostName(), getLiveNodeInfo(node));
}
return JSON.toString(info);
}
protected Map<String, Object> getLiveNodeInfo(DatanodeDescriptor node) {
return ImmutableMap.<String, Object>builder()
.put("infoAddr", node.getInfoAddr())
.put("infoSecureAddr", node.getInfoSecureAddr())
.put("xferaddr", node.getXferAddr())
@ -7408,10 +7417,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
.put("blockPoolUsedPercent", node.getBlockPoolUsedPercent())
.put("volfails", node.getVolumeFailures())
.build();
info.put(node.getHostName(), innerinfo);
}
return JSON.toString(info);
}
/**
@ -7697,16 +7702,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return fsLock.longReadLock;
}
@VisibleForTesting
public SafeModeInfo getSafeModeInfoForTests() {
return safeMode;
}
@VisibleForTesting
public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) {
this.nnResourceChecker = nnResourceChecker;
}
public SafeModeInfo getSafeModeInfo() {
return safeMode;
}
@Override
public boolean isAvoidingStaleDataNodesForWrite() {
return this.blockManager.getDatanodeManager()

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
@ -64,8 +65,9 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
tmpDir = new File(System.getProperty("test.build.data", "target"),
UUID.randomUUID().toString()).getAbsoluteFile();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks");
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dfsCluster.waitClusterUp();

View File

@ -59,6 +59,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -809,8 +810,9 @@ public class TestDFSUtil {
"target/test-dir"));
Configuration conf = new Configuration();
final Path jksPath = new Path(testDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks";
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testDir, "test.jks");
file.delete();

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.FSTestWrapper;
import org.apache.hadoop.fs.FileContext;
@ -51,12 +52,22 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import static org.mockito.Mockito.withSettings;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
@ -88,9 +99,11 @@ public class TestEncryptionZones {
// Set up java key store
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
// Lower the batch size for testing
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
2);
@ -312,7 +325,7 @@ public class TestEncryptionZones {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
final Path testRoot = new Path(fsHelper.getTestRootDir());
final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path superPath = new Path(testRoot, "superuseronly");
final Path allPath = new Path(testRoot, "accessall");
@ -346,7 +359,7 @@ public class TestEncryptionZones {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
final Path testRoot = new Path(fsHelper.getTestRootDir());
final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path superPath = new Path(testRoot, "superuseronly");
final Path superPathFile = new Path(superPath, "file1");
final Path allPath = new Path(testRoot, "accessall");
@ -439,7 +452,7 @@ public class TestEncryptionZones {
* Test success of Rename EZ on a directory which is already an EZ.
*/
private void doRenameEncryptionZone(FSTestWrapper wrapper) throws Exception {
final Path testRoot = new Path(fsHelper.getTestRootDir());
final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path pathFoo = new Path(testRoot, "foo");
final Path pathFooBaz = new Path(pathFoo, "baz");
wrapper.mkdir(pathFoo, FsPermission.getDirDefault(), true);
@ -586,8 +599,9 @@ public class TestEncryptionZones {
} catch (IOException e) {
assertExceptionContains("since no key provider is available", e);
}
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
);
// Try listing EZs as well
assertNumZones(0);
@ -753,4 +767,35 @@ public class TestEncryptionZones {
e.getCause());
}
}
/**
* Tests obtaining delegation token from stored key
*/
@Test(timeout = 120000)
public void testDelegationToken() throws Exception {
UserGroupInformation.createRemoteUser("JobTracker");
DistributedFileSystem dfs = cluster.getFileSystem();
KeyProviderCryptoExtension keyProvider = Mockito.mock(KeyProviderCryptoExtension.class,
withSettings().extraInterfaces(
DelegationTokenExtension.class,
CryptoExtension.class));
Mockito.when(keyProvider.getConf()).thenReturn(conf);
byte[] testIdentifier = "Test identifier for delegation token".getBytes();
Token<?> testToken = new Token(testIdentifier, new byte[0],
new Text(), new Text());
Mockito.when(((DelegationTokenExtension)keyProvider).
addDelegationTokens(anyString(), (Credentials)any())).
thenReturn(new Token<?>[] { testToken });
dfs.getClient().provider = keyProvider;
Credentials creds = new Credentials();
final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
DistributedFileSystem.LOG.debug("Delegation tokens: " +
Arrays.asList(tokens));
Assert.assertEquals(2, tokens.length);
Assert.assertEquals(tokens[1], testToken);
Assert.assertEquals(1, creds.numberOfTokens());
}
}

View File

@ -69,8 +69,9 @@ public class TestReservedRawPaths {
// Set up java key store
String testRoot = fsHelper.getTestRootDir();
File testRootDir = new File(testRoot).getAbsoluteFile();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);

View File

@ -424,6 +424,14 @@ public class TestDirectoryScanner {
public String getStorageID() {
return "";
}
@Override
public void reserveSpaceForRbw(long bytesToReserve) {
}
@Override
public void releaseReservedSpace(long bytesToRelease) {
}
}
private final static TestFsVolumeSpi TEST_VOLUME = new TestFsVolumeSpi();

View File

@ -0,0 +1,288 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import org.apache.hadoop.fs.DU;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Daemon;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Test;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import java.util.Random;
/**
* Ensure that the DN reserves disk space equivalent to a full block for
* replica being written (RBW).
*/
public class TestRbwSpaceReservation {
static final Log LOG = LogFactory.getLog(TestRbwSpaceReservation.class);
private static final short REPL_FACTOR = 1;
private static final int DU_REFRESH_INTERVAL_MSEC = 500;
private static final int STORAGES_PER_DATANODE = 1;
private static final int BLOCK_SIZE = 1024 * 1024;
private static final int SMALL_BLOCK_SIZE = 1024;
protected MiniDFSCluster cluster;
private Configuration conf;
private DistributedFileSystem fs = null;
private DFSClient client = null;
FsVolumeImpl singletonVolume = null;
private static Random rand = new Random();
private void initConfig(int blockSize) {
conf = new HdfsConfiguration();
// Refresh disk usage information frequently.
conf.setInt(FS_DU_INTERVAL_KEY, DU_REFRESH_INTERVAL_MSEC);
conf.setLong(DFS_BLOCK_SIZE_KEY, blockSize);
// Disable the scanner
conf.setInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
}
static {
((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL);
}
private void startCluster(int blockSize, long perVolumeCapacity) throws IOException {
initConfig(blockSize);
cluster = new MiniDFSCluster
.Builder(conf)
.storagesPerDatanode(STORAGES_PER_DATANODE)
.numDataNodes(REPL_FACTOR)
.build();
fs = cluster.getFileSystem();
client = fs.getClient();
cluster.waitActive();
if (perVolumeCapacity >= 0) {
List<? extends FsVolumeSpi> volumes =
cluster.getDataNodes().get(0).getFSDataset().getVolumes();
assertThat(volumes.size(), is(1));
singletonVolume = ((FsVolumeImpl) volumes.get(0));
singletonVolume.setCapacityForTesting(perVolumeCapacity);
}
}
@After
public void shutdownCluster() throws IOException {
if (client != null) {
client.close();
client = null;
}
if (fs != null) {
fs.close();
fs = null;
}
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
private void createFileAndTestSpaceReservation(
final String fileNamePrefix, final int fileBlockSize)
throws IOException, InterruptedException {
// Enough for 1 block + meta files + some delta.
final long configuredCapacity = fileBlockSize * 2 - 1;
startCluster(BLOCK_SIZE, configuredCapacity);
FSDataOutputStream out = null;
Path path = new Path("/" + fileNamePrefix + ".dat");
try {
out = fs.create(path, false, 4096, (short) 1, fileBlockSize);
byte[] buffer = new byte[rand.nextInt(fileBlockSize / 4)];
out.write(buffer);
out.hsync();
int bytesWritten = buffer.length;
// Check that space was reserved for a full block minus the bytesWritten.
assertThat(singletonVolume.getReservedForRbw(),
is((long) fileBlockSize - bytesWritten));
out.close();
out = null;
// Check that the reserved space has been released since we closed the
// file.
assertThat(singletonVolume.getReservedForRbw(), is(0L));
// Reopen the file for appends and write 1 more byte.
out = fs.append(path);
out.write(buffer);
out.hsync();
bytesWritten += buffer.length;
// Check that space was again reserved for a full block minus the
// bytesWritten so far.
assertThat(singletonVolume.getReservedForRbw(),
is((long) fileBlockSize - bytesWritten));
// Write once again and again verify the available space. This ensures
// that the reserved space is progressively adjusted to account for bytes
// written to disk.
out.write(buffer);
out.hsync();
bytesWritten += buffer.length;
assertThat(singletonVolume.getReservedForRbw(),
is((long) fileBlockSize - bytesWritten));
} finally {
if (out != null) {
out.close();
}
}
}
@Test (timeout=300000)
public void testWithDefaultBlockSize()
throws IOException, InterruptedException {
createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE);
}
@Test (timeout=300000)
public void testWithNonDefaultBlockSize()
throws IOException, InterruptedException {
// Same test as previous one, but with a non-default block size.
createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE * 2);
}
/**
* Stress test to ensure we are not leaking reserved space.
* @throws IOException
* @throws InterruptedException
*/
@Test (timeout=600000)
public void stressTest() throws IOException, InterruptedException {
final int numWriters = 5;
startCluster(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE * numWriters * 10);
Writer[] writers = new Writer[numWriters];
// Start a few writers and let them run for a while.
for (int i = 0; i < numWriters; ++i) {
writers[i] = new Writer(client, SMALL_BLOCK_SIZE);
writers[i].start();
}
Thread.sleep(60000);
// Stop the writers.
for (Writer w : writers) {
w.stopWriter();
}
int filesCreated = 0;
int numFailures = 0;
for (Writer w : writers) {
w.join();
filesCreated += w.getFilesCreated();
numFailures += w.getNumFailures();
}
LOG.info("Stress test created " + filesCreated +
" files and hit " + numFailures + " failures");
// Check no space was leaked.
assertThat(singletonVolume.getReservedForRbw(), is(0L));
}
private static class Writer extends Daemon {
private volatile boolean keepRunning;
private final DFSClient localClient;
private int filesCreated = 0;
private int numFailures = 0;
byte[] data;
Writer(DFSClient client, int blockSize) throws IOException {
localClient = client;
keepRunning = true;
filesCreated = 0;
numFailures = 0;
// At least some of the files should span a block boundary.
data = new byte[blockSize * 2];
}
@Override
public void run() {
/**
* Create a file, write up to 3 blocks of data and close the file.
* Do this in a loop until we are told to stop.
*/
while (keepRunning) {
OutputStream os = null;
try {
String filename = "/file-" + rand.nextLong();
os = localClient.create(filename, false);
os.write(data, 0, rand.nextInt(data.length));
IOUtils.closeQuietly(os);
os = null;
localClient.delete(filename, false);
Thread.sleep(50); // Sleep for a bit to avoid killing the system.
++filesCreated;
} catch (IOException ioe) {
// Just ignore the exception and keep going.
++numFailures;
} catch (InterruptedException ie) {
return;
} finally {
if (os != null) {
IOUtils.closeQuietly(os);
}
}
}
}
public void stopWriter() {
keepRunning = false;
}
public int getFilesCreated() {
return filesCreated;
}
public int getNumFailures() {
return numFailures;
}
}
}

View File

@ -158,7 +158,7 @@ public class TestWriteToReplica {
replicasMap.add(bpid, new ReplicaInPipeline(
blocks[TEMPORARY].getBlockId(),
blocks[TEMPORARY].getGenerationStamp(), vol,
vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile()));
vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile(), 0));
replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol,
vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null);

View File

@ -223,7 +223,7 @@ public class NameNodeAdapter {
* if safemode is not running.
*/
public static int getSafeModeSafeBlocks(NameNode nn) {
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfo();
if (smi == null) {
return -1;
}

View File

@ -181,6 +181,18 @@ Release 2.6.0 - UNRELEASED
YARN-2511. Allowed all origins by default when CrossOriginFilter is
enabled. (Jonathan Eagles via zjshen)
YARN-2508. Cross Origin configuration parameters prefix are not honored
(Mit Desai via jeagles)
YARN-2512. Allowed pattern matching for origins in CrossOriginFilter.
(Jonathan Eagles via zjshen)
YARN-2507. Documented CrossOriginFilter configurations for the timeline
server. (Jonathan Eagles via zjshen)
YARN-2515. Updated ConverterUtils#toContainerId to parse epoch.
(Tsuyoshi OZAWA via jianhe)
OPTIMIZATIONS
BUG FIXES
@ -284,6 +296,9 @@ Release 2.6.0 - UNRELEASED
YARN-2431. NM restart: cgroup is not removed for reacquired containers
(jlowe)
YARN-2519. Credential Provider related unit tests failed on Windows.
(Xiaoyu Yao via cnauroth)
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -18,8 +18,10 @@
package org.apache.hadoop.yarn.api.records;
import java.text.NumberFormat;
import com.google.common.base.Splitter;
import java.text.NumberFormat;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
@ -33,6 +35,8 @@ import org.apache.hadoop.yarn.util.Records;
@Public
@Stable
public abstract class ContainerId implements Comparable<ContainerId>{
private static final Splitter _SPLITTER = Splitter.on('_').trimResults();
private static final String CONTAINER_PREFIX = "container";
@Private
@Unstable
@ -163,5 +167,38 @@ public abstract class ContainerId implements Comparable<ContainerId>{
return sb.toString();
}
@Public
@Unstable
public static ContainerId fromString(String containerIdStr) {
Iterator<String> it = _SPLITTER.split(containerIdStr).iterator();
if (!it.next().equals(CONTAINER_PREFIX)) {
throw new IllegalArgumentException("Invalid ContainerId prefix: "
+ containerIdStr);
}
try {
ApplicationAttemptId appAttemptID = toApplicationAttemptId(it);
int id = Integer.parseInt(it.next());
int epoch = 0;
if (it.hasNext()) {
epoch = Integer.parseInt(it.next());
}
int cid = (epoch << 22) | id;
ContainerId containerId = ContainerId.newInstance(appAttemptID, cid);
return containerId;
} catch (NumberFormatException n) {
throw new IllegalArgumentException("Invalid ContainerId: "
+ containerIdStr, n);
}
}
private static ApplicationAttemptId toApplicationAttemptId(
Iterator<String> it) throws NumberFormatException {
ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
Integer.parseInt(it.next()));
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, Integer.parseInt(it.next()));
return appAttemptId;
}
protected abstract void build();
}

View File

@ -168,20 +168,7 @@ public class ConverterUtils {
}
public static ContainerId toContainerId(String containerIdStr) {
Iterator<String> it = _split(containerIdStr).iterator();
if (!it.next().equals(CONTAINER_PREFIX)) {
throw new IllegalArgumentException("Invalid ContainerId prefix: "
+ containerIdStr);
}
try {
ApplicationAttemptId appAttemptID = toApplicationAttemptId(it);
ContainerId containerId =
ContainerId.newInstance(appAttemptID, Integer.parseInt(it.next()));
return containerId;
} catch (NumberFormatException n) {
throw new IllegalArgumentException("Invalid ContainerId: "
+ containerIdStr, n);
}
return ContainerId.fromString(containerIdStr);
}
public static ApplicationAttemptId toApplicationAttemptId(

View File

@ -54,10 +54,14 @@ public class TestContainerId {
long ts = System.currentTimeMillis();
ContainerId c6 = newContainerId(36473, 4365472, ts, 25645811);
Assert.assertEquals("container_10_0001_01_000001", c1.toString());
Assert.assertEquals(c1,
ContainerId.fromString("container_10_0001_01_000001"));
Assert.assertEquals(479987, 0x003fffff & c6.getId());
Assert.assertEquals(6, c6.getId() >> 22);
Assert.assertEquals("container_" + ts + "_36473_4365472_479987_06",
c6.toString());
Assert.assertEquals(c6,
ContainerId.fromString("container_" + ts + "_36473_4365472_479987_06"));
}
public static ContainerId newContainerId(int appId, int appAttemptId,

View File

@ -55,6 +55,15 @@ public class TestConverterUtils {
assertEquals(gen, id);
}
@Test
public void testContainerIdWithEpoch() throws URISyntaxException {
ContainerId id = TestContainerId.newContainerId(0, 0, 0, 25645811);
String cid = ConverterUtils.toString(id);
assertEquals("container_0_0000_00_479987_06", cid);
ContainerId gen = ConverterUtils.toContainerId(cid);
assertEquals(gen.toString(), id.toString());
}
@Test
public void testContainerIdNull() throws URISyntaxException {
assertNull(ConverterUtils.toString((ContainerId)null));

View File

@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.http.HttpServer2.Builder;
import org.apache.hadoop.security.alias.CredentialProvider;
@ -74,8 +75,9 @@ public class TestWebAppUtils {
"target/test-dir"));
Configuration conf = new Configuration();
final Path jksPath = new Path(testDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks";
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testDir, "test.jks");
file.delete();

View File

@ -24,6 +24,8 @@ import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
@ -204,7 +206,23 @@ public class CrossOriginFilter implements Filter {
@VisibleForTesting
boolean isOriginAllowed(String origin) {
return allowAllOrigins || allowedOrigins.contains(origin);
if (allowAllOrigins) {
return true;
}
for (String allowedOrigin : allowedOrigins) {
if (allowedOrigin.contains("*")) {
String regex = allowedOrigin.replace(".", "\\.").replace("*", ".*");
Pattern p = Pattern.compile(regex);
Matcher m = p.matcher(origin);
if (m.matches()) {
return true;
}
} else if (allowedOrigin.equals(origin)) {
return true;
}
}
return false;
}
private boolean areHeadersAllowed(String accessControlRequestHeaders) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.server.timeline.webapp;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
@ -37,6 +38,15 @@ public class CrossOriginFilterInitializer extends FilterInitializer {
}
static Map<String, String> getFilterParameters(Configuration conf) {
return conf.getValByRegex(PREFIX);
Map<String, String> filterParams =
new HashMap<String, String>();
for (Map.Entry<String, String> entry : conf.getValByRegex(PREFIX)
.entrySet()) {
String name = entry.getKey();
String value = entry.getValue();
name = name.substring(PREFIX.length());
filterParams.put(name, value);
}
return filterParams;
}
}

View File

@ -77,7 +77,26 @@ public class TestCrossOriginFilter {
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
Assert.assertTrue(filter.isOriginAllowed("example.org"));
Assert.assertTrue(filter.isOriginAllowed("example.com"));
}
@Test
public void testPatternMatchingOrigins() throws ServletException, IOException {
// Setup the configuration settings of the server
Map<String, String> conf = new HashMap<String, String>();
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "*.example.com");
FilterConfig filterConfig = new FilterConfigTest(conf);
// Object under test
CrossOriginFilter filter = new CrossOriginFilter();
filter.init(filterConfig);
// match multiple sub-domains
Assert.assertFalse(filter.isOriginAllowed("example.com"));
Assert.assertFalse(filter.isOriginAllowed("foo:example.com"));
Assert.assertTrue(filter.isOriginAllowed("foo.example.com"));
Assert.assertTrue(filter.isOriginAllowed("foo.bar.example.com"));
}
@Test

View File

@ -42,11 +42,8 @@ public class TestCrossOriginFilterInitializer {
CrossOriginFilterInitializer.getFilterParameters(conf);
// retrieve values
String rootvalue =
filterParameters.get(CrossOriginFilterInitializer.PREFIX + "rootparam");
String nestedvalue =
filterParameters.get(CrossOriginFilterInitializer.PREFIX
+ "nested.param");
String rootvalue = filterParameters.get("rootparam");
String nestedvalue = filterParameters.get("nested.param");
String outofscopeparam = filterParameters.get("outofscopeparam");
// verify expected values are in place

View File

@ -102,6 +102,43 @@ YARN Timeline Server
<name>yarn.timeline-service.handler-thread-count</name>
<value>10</value>
</property>
<property>
<description>Enables cross-origin support (CORS) for web services where
cross-origin web response headers are needed. For example, javascript making
a web services request to the timeline server.</description>
<name>yarn.timeline-service.http-cross-origin.enabled</name>
<value>false</value>
</property>
<property>
<description>Comma separated list of origins that are allowed for web
services needing cross-origin (CORS) support. Wildcards (*) and patterns
allowed</description>
<name>yarn.timeline-service.http-cross-origin.allowed-origins</name>
<value>*</value>
</property>
<property>
<description>Comma separated list of methods that are allowed for web
services needing cross-origin (CORS) support.</description>
<name>yarn.timeline-service.http-cross-origin.allowed-methods</name>
<value>GET,POST,HEAD</value>
</property>
<property>
<description>Comma separated list of headers that are allowed for web
services needing cross-origin (CORS) support.</description>
<name>yarn.timeline-service.http-cross-origin.allowed-headers</name>
<value>X-Requested-With,Content-Type,Accept,Origin</value>
</property>
<property>
<description>The number of seconds a pre-flighted request can be cached
for web services needing cross-origin (CORS) support.</description>
<name>yarn.timeline-service.http-cross-origin.max-age</name>
<value>1800</value>
</property>
+---+
* Generic-data related Configuration