Merging r1525409 through r1525758 from trunk to branch HDFS-2832

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1525759 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-09-24 00:57:43 +00:00
commit 114aa229de
22 changed files with 970 additions and 538 deletions

View File

@ -499,6 +499,9 @@ Release 2.1.1-beta - 2013-09-23
HADOOP-9961. versions of a few transitive dependencies diverged between hadoop
subprojects. (rvs via tucu)
HADOOP-9977. Hadoop services won't start with different keypass and
keystorepass when https is enabled. (cnauroth)
Release 2.1.0-beta - 2013-08-22
INCOMPATIBLE CHANGES

View File

@ -39,11 +39,14 @@ class SetReplication extends FsCommand {
}
public static final String NAME = "setrep";
public static final String USAGE = "[-R] [-w] <rep> <path/file> ...";
public static final String USAGE = "[-R] [-w] <rep> <path> ...";
public static final String DESCRIPTION =
"Set the replication level of a file.\n" +
"The -R flag requests a recursive change of replication level\n" +
"for an entire tree.";
"Set the replication level of a file. If <path> is a directory\n" +
"then the command recursively changes the replication factor of\n" +
"all files under the directory tree rooted at <path>.\n" +
"The -w flag requests that the command wait for the replication\n" +
"to complete. This can potentially take a very long time.\n" +
"The -R flag is accepted for backwards compatibility. It has no effect.";
protected short newRep = 0;
protected List<PathData> waitList = new LinkedList<PathData>();
@ -54,7 +57,7 @@ class SetReplication extends FsCommand {
CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R", "w");
cf.parse(args);
waitOpt = cf.getOpt("w");
setRecursive(cf.getOpt("R"));
setRecursive(true);
try {
newRep = Short.parseShort(args.removeFirst());
@ -126,4 +129,4 @@ class SetReplication extends FsCommand {
out.println(" done");
}
}
}
}

View File

@ -53,6 +53,8 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
"ssl.{0}.keystore.location";
public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY =
"ssl.{0}.keystore.password";
public static final String SSL_KEYSTORE_KEYPASSWORD_TPL_KEY =
"ssl.{0}.keystore.keypassword";
public static final String SSL_KEYSTORE_TYPE_TPL_KEY =
"ssl.{0}.keystore.type";
@ -136,7 +138,7 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY),
DEFAULT_KEYSTORE_TYPE);
KeyStore keystore = KeyStore.getInstance(keystoreType);
String keystorePassword = null;
String keystoreKeyPassword = null;
if (requireClientCert || mode == SSLFactory.Mode.SERVER) {
String locationProperty =
resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY);
@ -147,11 +149,17 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
}
String passwordProperty =
resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY);
keystorePassword = conf.get(passwordProperty, "");
String keystorePassword = conf.get(passwordProperty, "");
if (keystorePassword.isEmpty()) {
throw new GeneralSecurityException("The property '" + passwordProperty +
"' has not been set in the ssl configuration file.");
}
String keyPasswordProperty =
resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY);
// Key password defaults to the same value as store password for
// compatibility with legacy configurations that did not use a separate
// configuration property for key password.
keystoreKeyPassword = conf.get(keyPasswordProperty, keystorePassword);
LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation);
InputStream is = new FileInputStream(keystoreLocation);
@ -167,8 +175,8 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
KeyManagerFactory keyMgrFactory = KeyManagerFactory
.getInstance(SSLFactory.SSLCERTIFICATE);
keyMgrFactory.init(keystore, (keystorePassword != null) ?
keystorePassword.toCharArray() : null);
keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ?
keystoreKeyPassword.toCharArray() : null);
keyManagers = keyMgrFactory.getKeyManagers();
//trust store

View File

@ -381,17 +381,22 @@ rmr
setrep
Usage: <<<hdfs dfs -setrep [-R] <path> >>>
Usage: <<<hdfs dfs -setrep [-R] [-w] <numRepicas> <path> >>>
Changes the replication factor of a file.
Changes the replication factor of a file. If <path> is a directory then
the command recursively changes the replication factor of all files under
the directory tree rooted at <path>.
Options:
* The -R option will recursively increase the replication factor of files within a directory.
* The -w flag requests that the command wait for the replication
to complete. This can potentially take a very long time.
* The -R flag is accepted for backwards compatibility. It has no effect.
Example:
* <<<hdfs dfs -setrep -w 3 -R /user/hadoop/dir1>>>
* <<<hdfs dfs -setrep -w 3 /user/hadoop/dir1>>>
Exit Code:

View File

@ -145,6 +145,28 @@ public class KeyStoreTestUtil {
saveKeyStore(ks, filename, password);
}
/**
* Creates a keystore with a single key and saves it to a file.
*
* @param filename String file to save
* @param password String store password to set on keystore
* @param keyPassword String key password to set on key
* @param alias String alias to use for the key
* @param privateKey Key to save in keystore
* @param cert Certificate to use as certificate chain associated to key
* @throws GeneralSecurityException for any error with the security APIs
* @throws IOException if there is an I/O error saving the file
*/
public static void createKeyStore(String filename,
String password, String keyPassword, String alias,
Key privateKey, Certificate cert)
throws GeneralSecurityException, IOException {
KeyStore ks = createEmptyKeyStore();
ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(),
new Certificate[]{cert});
saveKeyStore(ks, filename, password);
}
public static void createTrustStore(String filename,
String password, String alias,
Certificate cert)
@ -178,6 +200,19 @@ public class KeyStoreTestUtil {
f.delete();
}
/**
* Performs complete setup of SSL configuration in preparation for testing an
* SSLFactory. This includes keys, certs, keystores, truststores, the server
* SSL configuration file, the client SSL configuration file, and the master
* configuration file read by the SSLFactory.
*
* @param keystoresDir String directory to save keystores
* @param sslConfDir String directory to save SSL configuration files
* @param conf Configuration master configuration to be used by an SSLFactory,
* which will be mutated by this method
* @param useClientCert boolean true to make the client present a cert in the
* SSL handshake
*/
public static void setupSSLConfig(String keystoresDir, String sslConfDir,
Configuration conf, boolean useClientCert)
throws Exception {
@ -213,53 +248,13 @@ public class KeyStoreTestUtil {
KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
Configuration clientSSLConf = new Configuration(false);
clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.CLIENT,
FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), clientKS);
clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.CLIENT,
FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), clientPassword);
clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.CLIENT,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.CLIENT,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword);
clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.CLIENT,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword,
clientPassword, trustKS);
Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword,
serverPassword, trustKS);
Configuration serverSSLConf = new Configuration(false);
serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.SERVER,
FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), serverKS);
serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.SERVER,
FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), serverPassword);
serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.SERVER,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.SERVER,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword);
serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
SSLFactory.Mode.SERVER,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
Writer writer = new FileWriter(sslClientConfFile);
try {
clientSSLConf.writeXml(writer);
} finally {
writer.close();
}
writer = new FileWriter(sslServerConfFile);
try {
serverSSLConf.writeXml(writer);
} finally {
writer.close();
}
saveConfig(sslClientConfFile, clientSSLConf);
saveConfig(sslServerConfFile, serverSSLConf);
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
conf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile.getName());
@ -267,4 +262,101 @@ public class KeyStoreTestUtil {
conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert);
}
/**
* Creates SSL configuration for a client.
*
* @param clientKS String client keystore file
* @param password String store password, or null to avoid setting store
* password
* @param keyPassword String key password, or null to avoid setting key
* password
* @param trustKS String truststore file
* @return Configuration for client SSL
*/
public static Configuration createClientSSLConfig(String clientKS,
String password, String keyPassword, String trustKS) {
Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT,
clientKS, password, keyPassword, trustKS);
return clientSSLConf;
}
/**
* Creates SSL configuration for a server.
*
* @param serverKS String server keystore file
* @param password String store password, or null to avoid setting store
* password
* @param keyPassword String key password, or null to avoid setting key
* password
* @param trustKS String truststore file
* @return Configuration for server SSL
*/
public static Configuration createServerSSLConfig(String serverKS,
String password, String keyPassword, String trustKS) throws IOException {
Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER,
serverKS, password, keyPassword, trustKS);
return serverSSLConf;
}
/**
* Creates SSL configuration.
*
* @param mode SSLFactory.Mode mode to configure
* @param keystore String keystore file
* @param password String store password, or null to avoid setting store
* password
* @param keyPassword String key password, or null to avoid setting key
* password
* @param trustKS String truststore file
* @return Configuration for SSL
*/
private static Configuration createSSLConfig(SSLFactory.Mode mode,
String keystore, String password, String keyPassword, String trustKS) {
String trustPassword = "trustP";
Configuration sslConf = new Configuration(false);
if (keystore != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), keystore);
}
if (password != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), password);
}
if (keyPassword != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY),
keyPassword);
}
if (trustKS != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
}
if (trustPassword != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY),
trustPassword);
}
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
return sslConf;
}
/**
* Saves configuration to a file.
*
* @param file File to save
* @param conf Configuration contents to write to file
* @throws IOException if there is an I/O error saving the file
*/
public static void saveConfig(File file, Configuration conf)
throws IOException {
Writer writer = new FileWriter(file);
try {
conf.writeXml(writer);
} finally {
writer.close();
}
}
}

View File

@ -29,12 +29,19 @@ import javax.net.ssl.HttpsURLConnection;
import java.io.File;
import java.net.URL;
import java.security.GeneralSecurityException;
import java.security.KeyPair;
import java.security.cert.X509Certificate;
import java.util.Collections;
import java.util.Map;
public class TestSSLFactory {
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/" +
TestSSLFactory.class.getSimpleName();
private static final String KEYSTORES_DIR =
new File(BASEDIR).getAbsolutePath();
private String sslConfsDir;
@BeforeClass
public static void setUp() throws Exception {
@ -46,18 +53,16 @@ public class TestSSLFactory {
private Configuration createConfiguration(boolean clientCert)
throws Exception {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, clientCert);
KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf,
clientCert);
return conf;
}
@After
@Before
public void cleanUp() throws Exception {
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfsDir);
sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir);
}
@Test(expected = IllegalStateException.class)
@ -181,4 +186,90 @@ public class TestSSLFactory {
}
}
@Test
public void testServerDifferentPasswordAndKeyPassword() throws Exception {
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER, "password",
"keyPassword", "password", "keyPassword");
}
@Test
public void testServerKeyPasswordDefaultsToPassword() throws Exception {
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER, "password",
"password", "password", null);
}
@Test
public void testClientDifferentPasswordAndKeyPassword() throws Exception {
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.CLIENT, "password",
"keyPassword", "password", "keyPassword");
}
@Test
public void testClientKeyPasswordDefaultsToPassword() throws Exception {
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.CLIENT, "password",
"password", "password", null);
}
/**
* Checks that SSLFactory initialization is successful with the given
* arguments. This is a helper method for writing test cases that cover
* different combinations of settings for the store password and key password.
* It takes care of bootstrapping a keystore, a truststore, and SSL client or
* server configuration. Then, it initializes an SSLFactory. If no exception
* is thrown, then initialization was successful.
*
* @param mode SSLFactory.Mode mode to test
* @param password String store password to set on keystore
* @param keyPassword String key password to set on keystore
* @param confPassword String store password to set in SSL config file, or null
* to avoid setting in SSL config file
* @param confKeyPassword String key password to set in SSL config file, or
* null to avoid setting in SSL config file
* @throws Exception for any error
*/
private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode,
String password, String keyPassword, String confPassword,
String confKeyPassword) throws Exception {
String keystore = new File(KEYSTORES_DIR, "keystore.jks").getAbsolutePath();
String truststore = new File(KEYSTORES_DIR, "truststore.jks")
.getAbsolutePath();
String trustPassword = "trustP";
// Create keys, certs, keystore, and truststore.
KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
X509Certificate cert = KeyStoreTestUtil.generateCertificate("CN=Test",
keyPair, 30, "SHA1withRSA");
KeyStoreTestUtil.createKeyStore(keystore, password, keyPassword, "Test",
keyPair.getPrivate(), cert);
Map<String, X509Certificate> certs = Collections.singletonMap("server",
cert);
KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs);
// Create SSL configuration file, for either server or client.
final String sslConfFileName;
final Configuration sslConf;
if (mode == SSLFactory.Mode.SERVER) {
sslConfFileName = "ssl-server.xml";
sslConf = KeyStoreTestUtil.createServerSSLConfig(keystore, confPassword,
confKeyPassword, truststore);
} else {
sslConfFileName = "ssl-client.xml";
sslConf = KeyStoreTestUtil.createClientSSLConfig(keystore, confPassword,
confKeyPassword, truststore);
}
KeyStoreTestUtil.saveConfig(new File(sslConfsDir, sslConfFileName), sslConf);
// Create the master configuration for use by the SSLFactory, which by
// default refers to the ssl-server.xml or ssl-client.xml created above.
Configuration conf = new Configuration();
conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, true);
// Try initializing an SSLFactory.
SSLFactory sslFactory = new SSLFactory(mode, conf);
try {
sslFactory.init();
} finally {
sslFactory.destroy();
}
}
}

View File

@ -601,16 +601,28 @@
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path/file&gt; \.\.\.:( |\t)*Set the replication level of a file.( )*</expected-output>
<expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path&gt; \.\.\.:( |\t)*Set the replication level of a file. If &lt;path&gt; is a directory( )*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^( |\t)*The -R flag requests a recursive change of replication level( )*</expected-output>
<expected-output>^( |\t)*then the command recursively changes the replication factor of( )*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^( |\t)*for an entire tree.( )*</expected-output>
<expected-output>^( |\t)*all files under the directory tree rooted at &lt;path&gt;\.( )*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^( |\t)*The -w flag requests that the command wait for the replication( )*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^( |\t)*to complete. This can potentially take a very long time\.( )*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^( |\t)*The -R flag is accepted for backwards compatibility\. It has no effect\.( )*</expected-output>
</comparator>
</comparators>
</test>

View File

@ -97,7 +97,7 @@ public class AsyncDataService {
void writeAsync(OpenFileCtx openFileCtx) {
if (LOG.isDebugEnabled()) {
LOG.debug("Scheduling write back task for fileId: "
+ openFileCtx.copyLatestAttr().getFileId());
+ openFileCtx.getLatestAttr().getFileId());
}
WriteBackTask wbTask = new WriteBackTask(openFileCtx);
execute(wbTask);
@ -125,7 +125,7 @@ public class AsyncDataService {
public String toString() {
// Called in AsyncDataService.execute for displaying error messages.
return "write back data for fileId"
+ openFileCtx.copyLatestAttr().getFileId() + " with nextOffset "
+ openFileCtx.getLatestAttr().getFileId() + " with nextOffset "
+ openFileCtx.getNextOffset();
}

View File

@ -17,19 +17,34 @@
*/
package org.apache.hadoop.hdfs.nfs.nfs3;
import java.util.Comparator;
import com.google.common.base.Preconditions;
/**
* OffsetRange is the range of read/write request. A single point (e.g.,[5,5])
* is not a valid range.
*/
public class OffsetRange implements Comparable<OffsetRange> {
public class OffsetRange {
public static final Comparator<OffsetRange> ReverseComparatorOnMin =
new Comparator<OffsetRange>() {
@Override
public int compare(OffsetRange o1, OffsetRange o2) {
if (o1.getMin() == o2.getMin()) {
return o1.getMax() < o2.getMax() ?
1 : (o1.getMax() > o2.getMax() ? -1 : 0);
} else {
return o1.getMin() < o2.getMin() ? 1 : -1;
}
}
};
private final long min;
private final long max;
OffsetRange(long min, long max) {
if ((min >= max) || (min < 0) || (max < 0)) {
throw new IllegalArgumentException("Wrong offset range: (" + min + ","
+ max + ")");
}
Preconditions.checkArgument(min >= 0 && max >= 0 && min < max);
this.min = min;
this.max = max;
}
@ -49,24 +64,10 @@ public class OffsetRange implements Comparable<OffsetRange> {
@Override
public boolean equals(Object o) {
assert (o instanceof OffsetRange);
OffsetRange range = (OffsetRange) o;
return (min == range.getMin()) && (max == range.getMax());
}
private static int compareTo(long left, long right) {
if (left < right) {
return -1;
} else if (left > right) {
return 1;
} else {
return 0;
if (o instanceof OffsetRange) {
OffsetRange range = (OffsetRange) o;
return (min == range.getMin()) && (max == range.getMax());
}
}
@Override
public int compareTo(OffsetRange other) {
final int d = compareTo(min, other.getMin());
return d != 0 ? d : compareTo(max, other.getMax());
return false;
}
}

View File

@ -27,6 +27,8 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.jboss.netty.channel.Channel;
import com.google.common.base.Preconditions;
/**
* WriteCtx saves the context of one write request, such as request, channel,
* xid and reply status.
@ -49,13 +51,21 @@ class WriteCtx {
private final long offset;
private final int count;
private final WriteStableHow stableHow;
private byte[] data;
private volatile byte[] data;
private final Channel channel;
private final int xid;
private boolean replied;
private DataState dataState;
/**
* Data belonging to the same {@link OpenFileCtx} may be dumped to a file.
* After being dumped to the file, the corresponding {@link WriteCtx} records
* the dump file and the offset.
*/
private RandomAccessFile raf;
private long dumpFileOffset;
private volatile DataState dataState;
public DataState getDataState() {
return dataState;
@ -64,12 +74,13 @@ class WriteCtx {
public void setDataState(DataState dataState) {
this.dataState = dataState;
}
private RandomAccessFile raf;
private long dumpFileOffset;
// Return the dumped data size
public long dumpData(FileOutputStream dumpOut, RandomAccessFile raf)
/**
* Writing the data into a local file. After the writing, if
* {@link #dataState} is still ALLOW_DUMP, set {@link #data} to null and set
* {@link #dataState} to DUMPED.
*/
long dumpData(FileOutputStream dumpOut, RandomAccessFile raf)
throws IOException {
if (dataState != DataState.ALLOW_DUMP) {
if (LOG.isTraceEnabled()) {
@ -84,48 +95,63 @@ class WriteCtx {
if (LOG.isDebugEnabled()) {
LOG.debug("After dump, new dumpFileOffset:" + dumpFileOffset);
}
data = null;
dataState = DataState.DUMPED;
return count;
// it is possible that while we dump the data, the data is also being
// written back to HDFS. After dump, if the writing back has not finished
// yet, we change its flag to DUMPED and set the data to null. Otherwise
// this WriteCtx instance should have been removed from the buffer.
if (dataState == DataState.ALLOW_DUMP) {
synchronized (this) {
if (dataState == DataState.ALLOW_DUMP) {
data = null;
dataState = DataState.DUMPED;
return count;
}
}
}
return 0;
}
public FileHandle getHandle() {
FileHandle getHandle() {
return handle;
}
public long getOffset() {
long getOffset() {
return offset;
}
public int getCount() {
int getCount() {
return count;
}
public WriteStableHow getStableHow() {
WriteStableHow getStableHow() {
return stableHow;
}
public byte[] getData() throws IOException {
byte[] getData() throws IOException {
if (dataState != DataState.DUMPED) {
if (data == null) {
throw new IOException("Data is not dumpted but has null:" + this);
}
} else {
// read back
if (data != null) {
throw new IOException("Data is dumpted but not null");
}
data = new byte[count];
raf.seek(dumpFileOffset);
int size = raf.read(data, 0, count);
if (size != count) {
throw new IOException("Data count is " + count + ", but read back "
+ size + "bytes");
synchronized (this) {
if (dataState != DataState.DUMPED) {
Preconditions.checkState(data != null);
return data;
}
}
}
// read back from dumped file
this.loadData();
return data;
}
private void loadData() throws IOException {
Preconditions.checkState(data == null);
data = new byte[count];
raf.seek(dumpFileOffset);
int size = raf.read(data, 0, count);
if (size != count) {
throw new IOException("Data count is " + count + ", but read back "
+ size + "bytes");
}
}
Channel getChannel() {
return channel;
}

View File

@ -67,8 +67,8 @@ public class WriteManager {
*/
private long streamTimeout;
public static final long DEFAULT_STREAM_TIMEOUT = 10 * 1000; // 10 second
public static final long MINIMIUM_STREAM_TIMEOUT = 1 * 1000; // 1 second
public static final long DEFAULT_STREAM_TIMEOUT = 10 * 60 * 1000; //10 minutes
public static final long MINIMIUM_STREAM_TIMEOUT = 10 * 1000; //10 seconds
void addOpenFileStream(FileHandle h, OpenFileCtx ctx) {
openFileMap.put(h, ctx);
@ -215,6 +215,10 @@ public class WriteManager {
LOG.info("Inactive stream, fileId=" + fileHandle.getFileId()
+ " commitOffset=" + commitOffset);
return true;
} else if (ret == OpenFileCtx.COMMIT_INACTIVE_WITH_PENDING_WRITE) {
LOG.info("Inactive stream with pending writes, fileId="
+ fileHandle.getFileId() + " commitOffset=" + commitOffset);
return false;
}
assert (ret == OpenFileCtx.COMMIT_WAIT || ret == OpenFileCtx.COMMIT_ERROR);
if (ret == OpenFileCtx.COMMIT_ERROR) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.nfs.nfs3;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
@ -51,8 +52,9 @@ public class TestOffsetRange {
OffsetRange r3 = new OffsetRange(1, 3);
OffsetRange r4 = new OffsetRange(3, 4);
assertTrue(r2.compareTo(r3) == 0);
assertTrue(r2.compareTo(r1) == 1);
assertTrue(r2.compareTo(r4) == -1);
assertEquals(0, OffsetRange.ReverseComparatorOnMin.compare(r2, r3));
assertEquals(0, OffsetRange.ReverseComparatorOnMin.compare(r2, r2));
assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2, r1) < 0);
assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2, r4) > 0);
}
}

View File

@ -286,8 +286,12 @@ Release 2.3.0 - UNRELEASED
and the excludedNodes parameter types respectively to Node and Set.
(Junping Du via szetszwo)
HDFS-5240. Separate formatting from logging in the audit logger API (daryn)
OPTIMIZATIONS
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
BUG FIXES
HDFS-5034. Remove debug prints from GetFileLinkInfo (Andrew Wang via Colin
Patrick McCabe)
@ -321,6 +325,8 @@ Release 2.2.0 - UNRELEASED
BUG FIXES
HDFS-5139. Remove redundant -R option from setrep.
Release 2.1.1-beta - 2013-09-23
INCOMPATIBLE CHANGES
@ -408,6 +414,9 @@ Release 2.1.1-beta - 2013-09-23
HDFS-5212. Refactor RpcMessage and NFS3Response to support different
types of authentication information. (jing9)
HDFS-4971. Move IO operations out of locking in OpenFileCtx. (brandonli and
jing9)
OPTIMIZATIONS
BUG FIXES

View File

@ -443,7 +443,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private final long accessTimePrecision;
/** Lock to protect FSNamesystem. */
private ReentrantReadWriteLock fsLock = new ReentrantReadWriteLock(true);
private ReentrantReadWriteLock fsLock;
/**
* Used when this NN is in standby state to read from the shared edit log.
@ -618,6 +618,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
*/
FSNamesystem(Configuration conf, FSImage fsImage, boolean ignoreRetryCache)
throws IOException {
boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true);
LOG.info("fsLock is fair:" + fair);
fsLock = new ReentrantReadWriteLock(fair);
try {
resourceRecheckInterval = conf.getLong(
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
@ -6893,10 +6896,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
sb.append(trackingId);
}
auditLog.info(sb);
logAuditMessage(sb.toString());
}
}
public void logAuditMessage(String message) {
auditLog.info(message);
}
}
}

View File

@ -61,6 +61,8 @@ import static org.junit.Assert.*;
public class TestDFSShell {
private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
private static AtomicInteger counter = new AtomicInteger();
private final int SUCCESS = 0;
private final int ERROR = 1;
static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class);
@ -1619,9 +1621,6 @@ public class TestDFSShell {
// force Copy Option is -f
@Test (timeout = 30000)
public void testCopyCommandsWithForceOption() throws Exception {
final int SUCCESS = 0;
final int ERROR = 1;
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
@ -1682,7 +1681,55 @@ public class TestDFSShell {
}
cluster.shutdown();
}
}
// setrep for file and directory.
@Test (timeout = 30000)
public void testSetrep() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
FsShell shell = null;
FileSystem fs = null;
final String testdir1 = "/tmp/TestDFSShell-testSetrep-" + counter.getAndIncrement();
final String testdir2 = testdir1 + "/nestedDir";
final Path hdfsFile1 = new Path(testdir1, "testFileForSetrep");
final Path hdfsFile2 = new Path(testdir2, "testFileForSetrep");
final Short oldRepFactor = new Short((short) 1);
final Short newRepFactor = new Short((short) 3);
try {
String[] argv;
cluster.waitActive();
fs = cluster.getFileSystem();
assertThat(fs.mkdirs(new Path(testdir2)), is(true));
shell = new FsShell(conf);
fs.create(hdfsFile1, true).close();
fs.create(hdfsFile2, true).close();
// Tests for setrep on a file.
argv = new String[] { "-setrep", newRepFactor.toString(), hdfsFile1.toString() };
assertThat(shell.run(argv), is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(), is(oldRepFactor));
// Tests for setrep
// Tests for setrep on a directory and make sure it is applied recursively.
argv = new String[] { "-setrep", newRepFactor.toString(), testdir1 };
assertThat(shell.run(argv), is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(), is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(), is(newRepFactor));
} finally {
if (shell != null) {
shell.close();
}
cluster.shutdown();
}
}
/**

View File

@ -20,8 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
@ -142,4 +141,21 @@ public class TestFSNamesystem {
assertTrue("Replication queues weren't being populated after entering "
+ "safemode 2nd time", fsn.isPopulatingReplQueues());
}
@Test
public void testFsLockFairness() throws IOException, InterruptedException{
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
conf.setBoolean("dfs.namenode.fslock.fair", true);
FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
assertTrue(fsNamesystem.getFsLockForTests().isFair());
conf.setBoolean("dfs.namenode.fslock.fair", false);
fsNamesystem = new FSNamesystem(conf, fsImage);
assertFalse(fsNamesystem.getFsLockForTests().isFair());
}
}

View File

@ -6049,7 +6049,7 @@
<command>-fs NAMENODE -mkdir /dir0</command>
<command>-fs NAMENODE -touchz /dir0/file0</command>
<command>-fs NAMENODE -touchz /dir0/file1</command>
<command>-fs NAMENODE -setrep -R 2 /dir0</command>
<command>-fs NAMENODE -setrep 2 /dir0</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -r /user</command>
@ -6072,7 +6072,7 @@
<command>-fs NAMENODE -mkdir -p dir0</command>
<command>-fs NAMENODE -touchz dir0/file0</command>
<command>-fs NAMENODE -touchz dir0/file1</command>
<command>-fs NAMENODE -setrep -R 2 dir0</command>
<command>-fs NAMENODE -setrep 2 dir0</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -r /user</command>
@ -6089,6 +6089,24 @@
</comparators>
</test>
<test> <!-- TESTED -->
<description>setrep: -R ignored for existing file</description>
<test-commands>
<command>-fs NAMENODE -mkdir -p dir0</command>
<command>-fs NAMENODE -touchz dir0/file0</command>
<command>-fs NAMENODE -setrep -R 2 dir0/file0</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -r /user</command>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^Replication 2 set: dir0/file0</expected-output>
</comparator>
</comparators>
</test>
<test> <!-- TESTED -->
<description>setrep: non existent file (absolute path)</description>
<test-commands>
@ -6145,7 +6163,7 @@
<command>-fs NAMENODE -mkdir hdfs:///dir0/</command>
<command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
<command>-fs NAMENODE -touchz hdfs:///dir0/file1</command>
<command>-fs NAMENODE -setrep -R 2 hdfs:///dir0</command>
<command>-fs NAMENODE -setrep 2 hdfs:///dir0</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -r hdfs:///*</command>
@ -6203,7 +6221,7 @@
<command>-fs NAMENODE -mkdir -p NAMENODE/dir0</command>
<command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
<command>-fs NAMENODE -touchz NAMENODE/dir0/file1</command>
<command>-fs NAMENODE -setrep -R 2 NAMENODE/dir0</command>
<command>-fs NAMENODE -setrep 2 NAMENODE/dir0</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -r NAMENODE/*</command>

View File

@ -178,6 +178,9 @@ Release 2.3.0 - UNRELEASED
MAPREDUCE-5404. HSAdminServer does not use ephemeral ports in minicluster
mode (Ted Yu via jlowe)
MAPREDUCE-5522. Incorrect oreder expected from JobQueueInfo (Jinghui Wang
via bobby)
Release 2.2.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -128,10 +128,15 @@ public class TestQueue {
assertEquals(secondSubQueue.getJobQueueInfo().getChildren().size(), 0);
// test
assertEquals(manager.getSchedulerInfo("first"), "queueInfo");
assertEquals(manager.getJobQueueInfos()[0].getQueueName(), secondSubQueue
.getJobQueueInfo().getQueueName());
assertEquals(manager.getJobQueueInfos()[1].getQueueName(), firstSubQueue
.getJobQueueInfo().getQueueName());
Set<String> queueJobQueueInfos = new HashSet<String>();
for(JobQueueInfo jobInfo : manager.getJobQueueInfos()){
queueJobQueueInfos.add(jobInfo.getQueueName());
}
Set<String> rootJobQueueInfos = new HashSet<String>();
for(Queue queue : root.getChildren()){
rootJobQueueInfos.add(queue.getJobQueueInfo().getQueueName());
}
assertEquals(queueJobQueueInfos, rootJobQueueInfos);
// test getJobQueueInfoMapping
assertEquals(
manager.getJobQueueInfoMapping().get("first").getQueueName(), "first");

View File

@ -43,6 +43,9 @@ Release 2.3.0 - UNRELEASED
YARN-1060. Two tests in TestFairScheduler are missing @Test annotation
(Niranjan Singh via Sandy Ryza)
YARN-1188. The context of QueueMetrics becomes default when using
FairScheduler (Tsuyoshi Ozawa via Sandy Ryza)
Release 2.2.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -21,12 +21,14 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@Metrics(context="yarn")
public class FSQueueMetrics extends QueueMetrics {
@Metric("Fair share of memory in MB") MutableGaugeInt fairShareMB;