diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 5fcb938d232..a501799ea15 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -139,6 +139,17 @@
true
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+
+ test-jar
+
+
+
+
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index c0ccd0a58e1..9e3e826ea77 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -36,10 +36,6 @@ Trunk (Unreleased)
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
- HADOOP-7664. Remove warmings when overriding final parameter configuration
- if the override value is same as the final parameter value.
- (Ravi Prakash via suresh)
-
HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin
Jetly via jitendra)
@@ -162,9 +158,6 @@ Trunk (Unreleased)
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
- HADOOP-10607. Create API to separate credential/password storage from
- applications. (Larry McCay via omalley)
-
HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata.
(tucu)
@@ -182,6 +175,8 @@ Trunk (Unreleased)
HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
+ HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
+
BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -379,6 +374,16 @@ Trunk (Unreleased)
NativeAzureFileSystem#NativeAzureFsInputStream#close().
(Chen He via cnauroth)
+ HADOOP-10831. UserProvider is not thread safe. (Benoy Antony via umamahesh)
+
+ HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
+
+ HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
+ (Mike Yoder via wang)
+
+ HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
+ System. (Shanyu Zhao via cnauroth)
+
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -397,6 +402,30 @@ Release 2.6.0 - UNRELEASED
HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth)
+ HADOOP-7664. Remove warmings when overriding final parameter configuration
+ if the override value is same as the final parameter value.
+ (Ravi Prakash via suresh)
+
+ HADOOP-10673. Update rpc metrics when the call throws an exception. (Ming Ma
+ via jing9)
+
+ HADOOP-10845. Add common tests for ACLs in combination with viewfs.
+ (Stephen Chu via cnauroth)
+
+ HADOOP-10839. Add unregisterSource() to MetricsSystem API.
+ (Shanyu Zhao via cnauroth)
+
+ HADOOP-10607. Create an API to separate credentials/password storage
+ from applications (Larry McCay via omalley)
+
+ HADOOP-10732. Fix locking in credential update. (Ted Yu via omalley)
+
+ HADOOP-10733. Fix potential null dereference in CredShell. (Ted Yu via
+ omalley)
+
+ HADOOP-10610. Upgrade S3n s3.fs.buffer.dir to support multi directories.
+ (Ted Malaska via atm)
+
OPTIMIZATIONS
BUG FIXES
@@ -412,6 +441,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-10810. Clean up native code compilation warnings. (cnauroth)
+ HADOOP-9921. daemon scripts should remove pid file on stop call after stop
+ or process is found not running ( vinayakumarb )
+
+ HADOOP-10591. Compression codecs must used pooled direct buffers or
+ deallocate direct buffers when stream is closed (cmccabe)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
index bb6ed8690ef..6a4cd69152e 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
@@ -198,6 +198,7 @@ case $startStop in
else
echo no $command to stop
fi
+ rm -f $pid
else
echo no $command to stop
fi
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
index 80dd9a0326d..fb01e5f7c5b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
@@ -57,6 +57,16 @@ public class KeyShell extends Configured implements Tool {
private boolean userSuppliedProvider = false;
+ /**
+ * Primary entry point for the KeyShell; called via main().
+ *
+ * @param args Command line arguments.
+ * @return 0 on success and 1 on failure. This value is passed back to
+ * the unix shell, so we must follow shell return code conventions:
+ * the return code is an unsigned character, and 0 means success, and
+ * small positive integers mean failure.
+ * @throws Exception
+ */
@Override
public int run(String[] args) throws Exception {
int exitCode = 0;
@@ -68,11 +78,11 @@ public class KeyShell extends Configured implements Tool {
if (command.validate()) {
command.execute();
} else {
- exitCode = -1;
+ exitCode = 1;
}
} catch (Exception e) {
e.printStackTrace(err);
- return -1;
+ return 1;
}
return exitCode;
}
@@ -86,8 +96,8 @@ public class KeyShell extends Configured implements Tool {
* % hadoop key list [-provider providerPath]
* % hadoop key delete keyName [--provider providerPath] [-i]
*
- * @param args
- * @return
+ * @param args Command line arguments.
+ * @return 0 on success, 1 on failure.
* @throws IOException
*/
private int init(String[] args) throws IOException {
@@ -105,7 +115,7 @@ public class KeyShell extends Configured implements Tool {
command = new CreateCommand(keyName, options);
if ("--help".equals(keyName)) {
printKeyShellUsage();
- return -1;
+ return 1;
}
} else if (args[i].equals("delete")) {
String keyName = "--help";
@@ -116,7 +126,7 @@ public class KeyShell extends Configured implements Tool {
command = new DeleteCommand(keyName);
if ("--help".equals(keyName)) {
printKeyShellUsage();
- return -1;
+ return 1;
}
} else if (args[i].equals("roll")) {
String keyName = "--help";
@@ -127,7 +137,7 @@ public class KeyShell extends Configured implements Tool {
command = new RollCommand(keyName);
if ("--help".equals(keyName)) {
printKeyShellUsage();
- return -1;
+ return 1;
}
} else if ("list".equals(args[i])) {
command = new ListCommand();
@@ -145,13 +155,13 @@ public class KeyShell extends Configured implements Tool {
out.println("\nAttributes must be in attribute=value form, " +
"or quoted\nlike \"attribute = value\"\n");
printKeyShellUsage();
- return -1;
+ return 1;
}
if (attributes.containsKey(attr)) {
out.println("\nEach attribute must correspond to only one value:\n" +
"atttribute \"" + attr + "\" was repeated\n" );
printKeyShellUsage();
- return -1;
+ return 1;
}
attributes.put(attr, val);
} else if ("--provider".equals(args[i]) && moreTokens) {
@@ -163,17 +173,17 @@ public class KeyShell extends Configured implements Tool {
interactive = true;
} else if ("--help".equals(args[i])) {
printKeyShellUsage();
- return -1;
+ return 1;
} else {
printKeyShellUsage();
ToolRunner.printGenericCommandUsage(System.err);
- return -1;
+ return 1;
}
}
if (command == null) {
printKeyShellUsage();
- return -1;
+ return 1;
}
if (!attributes.isEmpty()) {
@@ -491,10 +501,11 @@ public class KeyShell extends Configured implements Tool {
}
/**
- * Main program.
+ * main() entry point for the KeyShell. While strictly speaking the
+ * return is void, it will System.exit() with a return code: 0 is for
+ * success and 1 for failure.
*
- * @param args
- * Command line arguments
+ * @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
index e978e7067ef..acc5500d7f5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.s3.S3Exception;
@@ -225,6 +226,7 @@ public class NativeS3FileSystem extends FileSystem {
private OutputStream backupStream;
private MessageDigest digest;
private boolean closed;
+ private LocalDirAllocator lDirAlloc;
public NativeS3FsOutputStream(Configuration conf,
NativeFileSystemStore store, String key, Progressable progress,
@@ -246,11 +248,10 @@ public class NativeS3FileSystem extends FileSystem {
}
private File newBackupFile() throws IOException {
- File dir = new File(conf.get("fs.s3.buffer.dir"));
- if (!dir.mkdirs() && !dir.exists()) {
- throw new IOException("Cannot create S3 buffer directory: " + dir);
+ if (lDirAlloc == null) {
+ lDirAlloc = new LocalDirAllocator("fs.s3.buffer.dir");
}
- File result = File.createTempFile("output-", ".tmp", dir);
+ File result = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
result.deleteOnExit();
return result;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index 2c184f6bb05..f1975eae1b2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
@@ -279,6 +281,38 @@ class ChRootedFs extends AbstractFileSystem {
myFs.setTimes(fullPath(f), mtime, atime);
}
+ @Override
+ public void modifyAclEntries(Path path, List aclSpec)
+ throws IOException {
+ myFs.modifyAclEntries(fullPath(path), aclSpec);
+ }
+
+ @Override
+ public void removeAclEntries(Path path, List aclSpec)
+ throws IOException {
+ myFs.removeAclEntries(fullPath(path), aclSpec);
+ }
+
+ @Override
+ public void removeDefaultAcl(Path path) throws IOException {
+ myFs.removeDefaultAcl(fullPath(path));
+ }
+
+ @Override
+ public void removeAcl(Path path) throws IOException {
+ myFs.removeAcl(fullPath(path));
+ }
+
+ @Override
+ public void setAcl(Path path, List aclSpec) throws IOException {
+ myFs.setAcl(fullPath(path), aclSpec);
+ }
+
+ @Override
+ public AclStatus getAclStatus(Path path) throws IOException {
+ return myFs.getAclStatus(fullPath(path));
+ }
+
@Override
public void setVerifyChecksum(final boolean verifyChecksum)
throws IOException, UnresolvedLinkException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index b2f2bed5a28..34a9afc5499 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -871,5 +872,46 @@ public class ViewFileSystem extends FileSystem {
public short getDefaultReplication(Path f) {
throw new NotInMountpointException(f, "getDefaultReplication");
}
+
+ @Override
+ public void modifyAclEntries(Path path, List aclSpec)
+ throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("modifyAclEntries", path);
+ }
+
+ @Override
+ public void removeAclEntries(Path path, List aclSpec)
+ throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("removeAclEntries", path);
+ }
+
+ @Override
+ public void removeDefaultAcl(Path path) throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("removeDefaultAcl", path);
+ }
+
+ @Override
+ public void removeAcl(Path path) throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("removeAcl", path);
+ }
+
+ @Override
+ public void setAcl(Path path, List aclSpec) throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("setAcl", path);
+ }
+
+ @Override
+ public AclStatus getAclStatus(Path path) throws IOException {
+ checkPathIsSlash(path);
+ return new AclStatus.Builder().owner(ugi.getUserName())
+ .group(ugi.getGroupNames()[0])
+ .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
+ .stickyBit(false).build();
+ }
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index f2a433b95f8..232fcbbb409 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -49,6 +49,9 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.local.LocalConfigKeys;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -603,6 +606,51 @@ public class ViewFs extends AbstractFileSystem {
return true;
}
+ @Override
+ public void modifyAclEntries(Path path, List aclSpec)
+ throws IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(path), true);
+ res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
+ }
+
+ @Override
+ public void removeAclEntries(Path path, List aclSpec)
+ throws IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(path), true);
+ res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec);
+ }
+
+ @Override
+ public void removeDefaultAcl(Path path)
+ throws IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(path), true);
+ res.targetFileSystem.removeDefaultAcl(res.remainingPath);
+ }
+
+ @Override
+ public void removeAcl(Path path)
+ throws IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(path), true);
+ res.targetFileSystem.removeAcl(res.remainingPath);
+ }
+
+ @Override
+ public void setAcl(Path path, List aclSpec) throws IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(path), true);
+ res.targetFileSystem.setAcl(res.remainingPath, aclSpec);
+ }
+
+ @Override
+ public AclStatus getAclStatus(Path path) throws IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(path), true);
+ return res.targetFileSystem.getAclStatus(res.remainingPath);
+ }
/*
@@ -832,5 +880,46 @@ public class ViewFs extends AbstractFileSystem {
throws AccessControlException {
throw readOnlyMountTable("setVerifyChecksum", "");
}
+
+ @Override
+ public void modifyAclEntries(Path path, List aclSpec)
+ throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("modifyAclEntries", path);
+ }
+
+ @Override
+ public void removeAclEntries(Path path, List aclSpec)
+ throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("removeAclEntries", path);
+ }
+
+ @Override
+ public void removeDefaultAcl(Path path) throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("removeDefaultAcl", path);
+ }
+
+ @Override
+ public void removeAcl(Path path) throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("removeAcl", path);
+ }
+
+ @Override
+ public void setAcl(Path path, List aclSpec) throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("setAcl", path);
+ }
+
+ @Override
+ public AclStatus getAclStatus(Path path) throws IOException {
+ checkPathIsSlash(path);
+ return new AclStatus.Builder().owner(ugi.getUserName())
+ .group(ugi.getGroupNames()[0])
+ .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
+ .stickyBit(false).build();
+ }
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index 42e96cfdc50..37b97f2a641 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -100,7 +100,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
- return createOutputStream(out, createCompressor());
+ return CompressionCodec.Util.
+ createOutputStreamWithCodecPool(this, conf, out);
}
/**
@@ -153,7 +154,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
- return createInputStream(in, createDecompressor());
+ return CompressionCodec.Util.
+ createInputStreamWithCodecPool(this, conf, in);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodec.java
index af2ff20b39d..f37aadfcb57 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodec.java
@@ -24,6 +24,7 @@ import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
/**
* This class encapsulates a streaming compression/decompression pair.
@@ -113,4 +114,58 @@ public interface CompressionCodec {
* @return the extension including the '.'
*/
String getDefaultExtension();
+
+ static class Util {
+ /**
+ * Create an output stream with a codec taken from the global CodecPool.
+ *
+ * @param codec The codec to use to create the output stream.
+ * @param conf The configuration to use if we need to create a new codec.
+ * @param out The output stream to wrap.
+ * @return The new output stream
+ * @throws IOException
+ */
+ static CompressionOutputStream createOutputStreamWithCodecPool(
+ CompressionCodec codec, Configuration conf, OutputStream out)
+ throws IOException {
+ Compressor compressor = CodecPool.getCompressor(codec, conf);
+ CompressionOutputStream stream = null;
+ try {
+ stream = codec.createOutputStream(out, compressor);
+ } finally {
+ if (stream == null) {
+ CodecPool.returnCompressor(compressor);
+ } else {
+ stream.setTrackedCompressor(compressor);
+ }
+ }
+ return stream;
+ }
+
+ /**
+ * Create an input stream with a codec taken from the global CodecPool.
+ *
+ * @param codec The codec to use to create the input stream.
+ * @param conf The configuration to use if we need to create a new codec.
+ * @param in The input stream to wrap.
+ * @return The new input stream
+ * @throws IOException
+ */
+ static CompressionInputStream createInputStreamWithCodecPool(
+ CompressionCodec codec, Configuration conf, InputStream in)
+ throws IOException {
+ Decompressor decompressor = CodecPool.getDecompressor(codec);
+ CompressionInputStream stream = null;
+ try {
+ stream = codec.createInputStream(in, decompressor);
+ } finally {
+ if (stream == null) {
+ CodecPool.returnDecompressor(decompressor);
+ } else {
+ stream.setTrackedDecompressor(decompressor);
+ }
+ }
+ return stream;
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
index 4491819d72c..cf3ac401cdd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionInputStream.java
@@ -41,6 +41,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
protected final InputStream in;
protected long maxAvailableData = 0L;
+ private Decompressor trackedDecompressor;
+
/**
* Create a compression input stream that reads
* the decompressed bytes from the given stream.
@@ -58,6 +60,10 @@ public abstract class CompressionInputStream extends InputStream implements Seek
@Override
public void close() throws IOException {
in.close();
+ if (trackedDecompressor != null) {
+ CodecPool.returnDecompressor(trackedDecompressor);
+ trackedDecompressor = null;
+ }
}
/**
@@ -112,4 +118,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
+
+ void setTrackedDecompressor(Decompressor decompressor) {
+ trackedDecompressor = decompressor;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
index 9bd6b84f988..00e272a9cc5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionOutputStream.java
@@ -34,7 +34,13 @@ public abstract class CompressionOutputStream extends OutputStream {
* The output stream to be compressed.
*/
protected final OutputStream out;
-
+
+ /**
+ * If non-null, this is the Compressor object that we should call
+ * CodecPool#returnCompressor on when this stream is closed.
+ */
+ private Compressor trackedCompressor;
+
/**
* Create a compression output stream that writes
* the compressed bytes to the given stream.
@@ -43,11 +49,19 @@ public abstract class CompressionOutputStream extends OutputStream {
protected CompressionOutputStream(OutputStream out) {
this.out = out;
}
-
+
+ void setTrackedCompressor(Compressor compressor) {
+ trackedCompressor = compressor;
+ }
+
@Override
public void close() throws IOException {
finish();
out.close();
+ if (trackedCompressor != null) {
+ CodecPool.returnCompressor(trackedCompressor);
+ trackedCompressor = null;
+ }
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
index dc02dcaf429..0e6f02cc9f4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
@@ -51,14 +51,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
- // This may leak memory if called in a loop. The createCompressor() call
- // may cause allocation of an untracked direct-backed buffer if native
- // libs are being used (even if you close the stream). A Compressor
- // object should be reused between successive calls.
- LOG.warn("DefaultCodec.createOutputStream() may leak memory. "
- + "Create a compressor first.");
- return new CompressorStream(out, createCompressor(),
- conf.getInt("io.file.buffer.size", 4*1024));
+ return CompressionCodec.Util.
+ createOutputStreamWithCodecPool(this, conf, out);
}
@Override
@@ -82,8 +76,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
- return new DecompressorStream(in, createDecompressor(),
- conf.getInt("io.file.buffer.size", 4*1024));
+ return CompressionCodec.Util.
+ createInputStreamWithCodecPool(this, conf, in);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index 487f29bec6d..c493f1705dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -159,10 +159,11 @@ public class GzipCodec extends DefaultCodec {
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
- return (ZlibFactory.isNativeZlibLoaded(conf)) ?
- new CompressorStream(out, createCompressor(),
- conf.getInt("io.file.buffer.size", 4*1024)) :
- new GzipOutputStream(out);
+ if (!ZlibFactory.isNativeZlibLoaded(conf)) {
+ return new GzipOutputStream(out);
+ }
+ return CompressionCodec.Util.
+ createOutputStreamWithCodecPool(this, conf, out);
}
@Override
@@ -192,8 +193,9 @@ public class GzipCodec extends DefaultCodec {
@Override
public CompressionInputStream createInputStream(InputStream in)
- throws IOException {
- return createInputStream(in, null);
+ throws IOException {
+ return CompressionCodec.Util.
+ createInputStreamWithCodecPool(this, conf, in);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
index 4b0ea796b71..61462c08ddc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Lz4Codec.java
@@ -84,7 +84,8 @@ public class Lz4Codec implements Configurable, CompressionCodec {
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
- return createOutputStream(out, createCompressor());
+ return CompressionCodec.Util.
+ createOutputStreamWithCodecPool(this, conf, out);
}
/**
@@ -157,7 +158,8 @@ public class Lz4Codec implements Configurable, CompressionCodec {
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
- return createInputStream(in, createDecompressor());
+ return CompressionCodec.Util.
+ createInputStreamWithCodecPool(this, conf, in);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 402f8c8e99f..8d2fa1a6fb4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -95,7 +95,8 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
- return createOutputStream(out, createCompressor());
+ return CompressionCodec.Util.
+ createOutputStreamWithCodecPool(this, conf, out);
}
/**
@@ -158,7 +159,8 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
- return createInputStream(in, createDecompressor());
+ return CompressionCodec.Util.
+ createInputStreamWithCodecPool(this, conf, in);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 3bdcbd9856b..64615d22f85 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -599,24 +599,35 @@ public class ProtobufRpcEngine implements RpcEngine {
.mergeFrom(request.theRequestRead).build();
Message result;
+ long startTime = Time.now();
+ int qTime = (int) (startTime - receiveTime);
+ Exception exception = null;
try {
- long startTime = Time.now();
server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
result = service.callBlockingMethod(methodDescriptor, null, param);
- int processingTime = (int) (Time.now() - startTime);
- int qTime = (int) (startTime - receiveTime);
- if (LOG.isDebugEnabled()) {
- LOG.info("Served: " + methodName + " queueTime= " + qTime +
- " procesingTime= " + processingTime);
- }
- server.rpcMetrics.addRpcQueueTime(qTime);
- server.rpcMetrics.addRpcProcessingTime(processingTime);
- server.rpcDetailedMetrics.addProcessingTime(methodName,
- processingTime);
} catch (ServiceException e) {
+ exception = (Exception) e.getCause();
throw (Exception) e.getCause();
} catch (Exception e) {
+ exception = e;
throw e;
+ } finally {
+ int processingTime = (int) (Time.now() - startTime);
+ if (LOG.isDebugEnabled()) {
+ String msg = "Served: " + methodName + " queueTime= " + qTime +
+ " procesingTime= " + processingTime;
+ if (exception != null) {
+ msg += " exception= " + exception.getClass().getSimpleName();
+ }
+ LOG.debug(msg);
+ }
+ String detailedMetricsName = (exception == null) ?
+ methodName :
+ exception.getClass().getSimpleName();
+ server.rpcMetrics.addRpcQueueTime(qTime);
+ server.rpcMetrics.addRpcProcessingTime(processingTime);
+ server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
+ processingTime);
}
return new RpcResponseWrapper(result);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 0f11c97c9eb..24dd0c21b82 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -355,8 +355,8 @@ public abstract class Server {
private int readThreads; // number of read threads
private int readerPendingConnectionQueue; // number of connections to queue per read thread
private Class extends Writable> rpcRequestClass; // class used for deserializing the rpc request
- protected RpcMetrics rpcMetrics;
- protected RpcDetailedMetrics rpcDetailedMetrics;
+ final protected RpcMetrics rpcMetrics;
+ final protected RpcDetailedMetrics rpcDetailedMetrics;
private Configuration conf;
private String portRangeConfig = null;
@@ -2494,12 +2494,8 @@ public abstract class Server {
listener.doStop();
responder.interrupt();
notifyAll();
- if (this.rpcMetrics != null) {
- this.rpcMetrics.shutdown();
- }
- if (this.rpcDetailedMetrics != null) {
- this.rpcDetailedMetrics.shutdown();
- }
+ this.rpcMetrics.shutdown();
+ this.rpcDetailedMetrics.shutdown();
}
/** Wait for the server to be stopped.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index 34823b34d1f..04ab4dc2699 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -471,37 +471,29 @@ public class WritableRpcEngine implements RpcEngine {
// Invoke the protocol method
+ long startTime = Time.now();
+ int qTime = (int) (startTime-receivedTime);
+ Exception exception = null;
try {
- long startTime = Time.now();
- Method method =
+ Method method =
protocolImpl.protocolClass.getMethod(call.getMethodName(),
call.getParameterClasses());
method.setAccessible(true);
server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
Object value =
method.invoke(protocolImpl.protocolImpl, call.getParameters());
- int processingTime = (int) (Time.now() - startTime);
- int qTime = (int) (startTime-receivedTime);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Served: " + call.getMethodName() +
- " queueTime= " + qTime +
- " procesingTime= " + processingTime);
- }
- server.rpcMetrics.addRpcQueueTime(qTime);
- server.rpcMetrics.addRpcProcessingTime(processingTime);
- server.rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
- processingTime);
if (server.verbose) log("Return: "+value);
-
return new ObjectWritable(method.getReturnType(), value);
} catch (InvocationTargetException e) {
Throwable target = e.getTargetException();
if (target instanceof IOException) {
+ exception = (IOException)target;
throw (IOException)target;
} else {
IOException ioe = new IOException(target.toString());
ioe.setStackTrace(target.getStackTrace());
+ exception = ioe;
throw ioe;
}
} catch (Throwable e) {
@@ -510,8 +502,27 @@ public class WritableRpcEngine implements RpcEngine {
}
IOException ioe = new IOException(e.toString());
ioe.setStackTrace(e.getStackTrace());
+ exception = ioe;
throw ioe;
- }
+ } finally {
+ int processingTime = (int) (Time.now() - startTime);
+ if (LOG.isDebugEnabled()) {
+ String msg = "Served: " + call.getMethodName() +
+ " queueTime= " + qTime +
+ " procesingTime= " + processingTime;
+ if (exception != null) {
+ msg += " exception= " + exception.getClass().getSimpleName();
+ }
+ LOG.debug(msg);
+ }
+ String detailedMetricsName = (exception == null) ?
+ call.getMethodName() :
+ exception.getClass().getSimpleName();
+ server.rpcMetrics.addRpcQueueTime(qTime);
+ server.rpcMetrics.addRpcProcessingTime(processingTime);
+ server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
+ processingTime);
+ }
}
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
index e853319c4e8..a277abd6e13 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsSystem.java
@@ -54,6 +54,12 @@ public abstract class MetricsSystem implements MetricsSystemMXBean {
*/
public abstract T register(String name, String desc, T source);
+ /**
+ * Unregister a metrics source
+ * @param name of the source. This is the name you use to call register()
+ */
+ public abstract void unregisterSource(String name);
+
/**
* Register a metrics source (deriving name and description from the object)
* @param the actual type of the source object
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index 2c236bc533f..e4b5580536b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -85,7 +85,7 @@ class MetricsConfig extends SubsetConfiguration {
private ClassLoader pluginLoader;
MetricsConfig(Configuration c, String prefix) {
- super(c, prefix, ".");
+ super(c, prefix.toLowerCase(Locale.US), ".");
}
static MetricsConfig create(String prefix) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index cf2dda4e380..722abd95c4a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -232,6 +232,17 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
return source;
}
+ @Override public synchronized
+ void unregisterSource(String name) {
+ if (sources.containsKey(name)) {
+ sources.get(name).stop();
+ sources.remove(name);
+ }
+ if (allSources.containsKey(name)) {
+ allSources.remove(name);
+ }
+ }
+
synchronized
void registerSource(String name, String desc, MetricsSource source) {
checkNotNull(config, "config");
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProvider.java
index bded4b972af..63c1cb48bfe 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProvider.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceStability;
* abstraction to separate credential storage from users of them. It
* is intended to support getting or storing passwords in a variety of ways,
* including third party bindings.
+ *
+ * CredentialProvider
implementations must be thread safe.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index 02f4f751129..bb35ce51d48 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -264,7 +264,7 @@ public class CredentialShell extends Configured implements Tool {
alias + " from CredentialProvider " + provider.toString() +
". Continue?:");
if (!cont) {
- out.println("Nothing has been be deleted.");
+ out.println("Nothing has been deleted.");
}
return cont;
} catch (IOException e) {
@@ -373,12 +373,12 @@ public class CredentialShell extends Configured implements Tool {
char[] newPassword2 = c.readPassword("Enter password again: ");
noMatch = !Arrays.equals(newPassword1, newPassword2);
if (noMatch) {
- Arrays.fill(newPassword1, ' ');
+ if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
c.format("Passwords don't match. Try again.%n");
} else {
cred = newPassword1;
}
- Arrays.fill(newPassword2, ' ');
+ if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
} while (noMatch);
return cred;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
index 61958fe413e..551c4ca14ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
@@ -230,6 +230,7 @@ public class JavaKeyStoreProvider extends CredentialProvider {
CredentialEntry innerSetCredential(String alias, char[] material)
throws IOException {
+ writeLock.lock();
try {
keyStore.setKeyEntry(alias, new SecretKeySpec(
new String(material).getBytes("UTF-8"), "AES"),
@@ -237,6 +238,8 @@ public class JavaKeyStoreProvider extends CredentialProvider {
} catch (KeyStoreException e) {
throw new IOException("Can't store credential " + alias + " in " + this,
e);
+ } finally {
+ writeLock.unlock();
}
changed = true;
return new CredentialEntry(alias, material);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
index 9e724c070bf..99d6d0060d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
@@ -55,7 +55,7 @@ public class UserProvider extends CredentialProvider {
}
@Override
- public CredentialEntry getCredentialEntry(String alias) {
+ public synchronized CredentialEntry getCredentialEntry(String alias) {
byte[] bytes = credentials.getSecretKey(new Text(alias));
if (bytes == null) {
return null;
@@ -64,7 +64,7 @@ public class UserProvider extends CredentialProvider {
}
@Override
- public CredentialEntry createCredentialEntry(String name, char[] credential)
+ public synchronized CredentialEntry createCredentialEntry(String name, char[] credential)
throws IOException {
Text nameT = new Text(name);
if (credentials.getSecretKey(nameT) != null) {
@@ -77,7 +77,7 @@ public class UserProvider extends CredentialProvider {
}
@Override
- public void deleteCredentialEntry(String name) throws IOException {
+ public synchronized void deleteCredentialEntry(String name) throws IOException {
byte[] cred = credentials.getSecretKey(new Text(name));
if (cred != null) {
credentials.removeSecretKey(new Text(name));
@@ -95,7 +95,7 @@ public class UserProvider extends CredentialProvider {
}
@Override
- public void flush() {
+ public synchronized void flush() {
user.addCredentials(credentials);
}
@@ -112,7 +112,7 @@ public class UserProvider extends CredentialProvider {
}
@Override
- public List getAliases() throws IOException {
+ public synchronized List getAliases() throws IOException {
List list = new ArrayList();
List aliases = credentials.getAllSecretKeys();
for (Text key : aliases) {
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
index f4fabab7262..149c2202506 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
@@ -127,7 +127,7 @@ User Commands
Runs a HDFS filesystem checking utility.
See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
- Usage: << [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]]>>>
+ Usage: << [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]] [-showprogress]>>>
*------------------+---------------------------------------------+
|| COMMAND_OPTION || Description
@@ -148,6 +148,8 @@ User Commands
*------------------+---------------------------------------------+
| -racks | Print out network topology for data-node locations.
*------------------+---------------------------------------------+
+| -showprogress | Print out show progress in output. Default is OFF (no progress).
+*------------------+---------------------------------------------+
* <<>>
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
index b1882a660f4..154579b567d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
@@ -161,7 +161,7 @@ public class TestKeyShell {
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
- assertEquals(-1, rc);
+ assertEquals(1, rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
@@ -174,7 +174,7 @@ public class TestKeyShell {
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
- assertEquals(-1, rc);
+ assertEquals(1, rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
@@ -187,7 +187,7 @@ public class TestKeyShell {
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
- assertEquals(-1, rc);
+ assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " +
"KeyProviders configured."));
}
@@ -216,7 +216,7 @@ public class TestKeyShell {
config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///");
ks.setConf(config);
rc = ks.run(args1);
- assertEquals(-1, rc);
+ assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " +
"KeyProviders configured."));
}
@@ -262,19 +262,19 @@ public class TestKeyShell {
final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
"--attr", "=bar"};
rc = ks.run(args2);
- assertEquals(-1, rc);
+ assertEquals(1, rc);
/* Not in attribute = value form */
outContent.reset();
args2[5] = "foo";
rc = ks.run(args2);
- assertEquals(-1, rc);
+ assertEquals(1, rc);
/* No attribute or value */
outContent.reset();
args2[5] = "=";
rc = ks.run(args2);
- assertEquals(-1, rc);
+ assertEquals(1, rc);
/* Legal: attribute is a, value is b=c */
outContent.reset();
@@ -308,7 +308,7 @@ public class TestKeyShell {
"--attr", "foo=bar",
"--attr", "foo=glarch"};
rc = ks.run(args4);
- assertEquals(-1, rc);
+ assertEquals(1, rc);
/* Clean up to be a good citizen */
deleteKey(ks, "keyattr1");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 2d3cb270f11..e1a440d0614 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.viewfs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
+import java.util.ArrayList;
import java.util.List;
@@ -28,9 +29,16 @@ import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
+import org.apache.hadoop.fs.permission.AclEntry;
+import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
@@ -38,6 +46,7 @@ import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.After;
import org.junit.Assert;
@@ -96,7 +105,6 @@ public class ViewFileSystemBaseTest {
// in the test root
// Set up the defaultMT in the config with our mount point links
- //Configuration conf = new Configuration();
conf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
@@ -720,4 +728,49 @@ public class ViewFileSystemBaseTest {
Assert.assertTrue("Other-readable permission not set!",
perms.getOtherAction().implies(FsAction.READ));
}
+
+ /**
+ * Verify the behavior of ACL operations on paths above the root of
+ * any mount table entry.
+ */
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalModifyAclEntries() throws IOException {
+ fsView.modifyAclEntries(new Path("/internalDir"),
+ new ArrayList());
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveAclEntries() throws IOException {
+ fsView.removeAclEntries(new Path("/internalDir"),
+ new ArrayList());
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveDefaultAcl() throws IOException {
+ fsView.removeDefaultAcl(new Path("/internalDir"));
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveAcl() throws IOException {
+ fsView.removeAcl(new Path("/internalDir"));
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalSetAcl() throws IOException {
+ fsView.setAcl(new Path("/internalDir"), new ArrayList());
+ }
+
+ @Test
+ public void testInternalGetAclStatus() throws IOException {
+ final UserGroupInformation currentUser =
+ UserGroupInformation.getCurrentUser();
+ AclStatus aclStatus = fsView.getAclStatus(new Path("/internalDir"));
+ assertEquals(aclStatus.getOwner(), currentUser.getUserName());
+ assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
+ assertEquals(aclStatus.getEntries(),
+ AclUtil.getMinimalAcl(PERMISSION_555));
+ assertFalse(aclStatus.isStickyBit());
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
index 0f771cd3ba5..2813c34bef4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
@@ -22,10 +22,14 @@ import static org.apache.hadoop.fs.FileContextTestHelper.checkFileStatus;
import static org.apache.hadoop.fs.FileContextTestHelper.exists;
import static org.apache.hadoop.fs.FileContextTestHelper.isDir;
import static org.apache.hadoop.fs.FileContextTestHelper.isFile;
+import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
+import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
@@ -39,8 +43,12 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.After;
import org.junit.Assert;
@@ -695,4 +703,48 @@ public class ViewFsBaseTest {
public void testInternalSetOwner() throws IOException {
fcView.setOwner(new Path("/internalDir"), "foo", "bar");
}
+
+ /**
+ * Verify the behavior of ACL operations on paths above the root of
+ * any mount table entry.
+ */
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalModifyAclEntries() throws IOException {
+ fcView.modifyAclEntries(new Path("/internalDir"),
+ new ArrayList());
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveAclEntries() throws IOException {
+ fcView.removeAclEntries(new Path("/internalDir"),
+ new ArrayList());
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveDefaultAcl() throws IOException {
+ fcView.removeDefaultAcl(new Path("/internalDir"));
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveAcl() throws IOException {
+ fcView.removeAcl(new Path("/internalDir"));
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalSetAcl() throws IOException {
+ fcView.setAcl(new Path("/internalDir"), new ArrayList());
+ }
+
+ @Test
+ public void testInternalGetAclStatus() throws IOException {
+ final UserGroupInformation currentUser =
+ UserGroupInformation.getCurrentUser();
+ AclStatus aclStatus = fcView.getAclStatus(new Path("/internalDir"));
+ assertEquals(aclStatus.getOwner(), currentUser.getUserName());
+ assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
+ assertEquals(aclStatus.getEntries(),
+ AclUtil.getMinimalAcl(PERMISSION_555));
+ assertFalse(aclStatus.isStickyBit());
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 45499f5c98f..dfbc91c43a6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -496,6 +496,8 @@ public class TestRPC {
caught = true;
}
assertTrue(caught);
+ rb = getMetrics(server.rpcDetailedMetrics.name());
+ assertCounter("IOExceptionNumOps", 1L, rb);
proxy.testServerGet();
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
index 178719589bf..e097a0f8738 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGangliaMetrics.java
@@ -60,12 +60,12 @@ public class TestGangliaMetrics {
@Test
public void testTagsForPrefix() throws Exception {
ConfigBuilder cb = new ConfigBuilder()
- .add("Test.sink.ganglia.tagsForPrefix.all", "*")
- .add("Test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
+ .add("test.sink.ganglia.tagsForPrefix.all", "*")
+ .add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
"NumActiveSources")
- .add("Test.sink.ganglia.tagsForPrefix.none", "");
+ .add("test.sink.ganglia.tagsForPrefix.none", "");
GangliaSink30 sink = new GangliaSink30();
- sink.init(cb.subset("Test.sink.ganglia"));
+ sink.init(cb.subset("test.sink.ganglia"));
List tags = new ArrayList();
tags.add(new MetricsTag(MsInfo.Context, "all"));
@@ -98,8 +98,8 @@ public class TestGangliaMetrics {
@Test public void testGangliaMetrics2() throws Exception {
ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
- .add("Test.sink.gsink30.context", "test") // filter out only "test"
- .add("Test.sink.gsink31.context", "test") // filter out only "test"
+ .add("test.sink.gsink30.context", "test") // filter out only "test"
+ .add("test.sink.gsink31.context", "test") // filter out only "test"
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
index 564214bba65..0122045d383 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
@@ -88,11 +88,11 @@ public class TestMetricsSystemImpl {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
- .add("Test.sink.test.class", TestSink.class.getName())
- .add("Test.*.source.filter.exclude", "s0")
- .add("Test.source.s1.metric.filter.exclude", "X*")
- .add("Test.sink.sink1.metric.filter.exclude", "Y*")
- .add("Test.sink.sink2.metric.filter.exclude", "Y*")
+ .add("test.sink.test.class", TestSink.class.getName())
+ .add("test.*.source.filter.exclude", "s0")
+ .add("test.source.s1.metric.filter.exclude", "X*")
+ .add("test.sink.sink1.metric.filter.exclude", "Y*")
+ .add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
@@ -130,11 +130,11 @@ public class TestMetricsSystemImpl {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
- .add("Test.sink.test.class", TestSink.class.getName())
- .add("Test.*.source.filter.exclude", "s0")
- .add("Test.source.s1.metric.filter.exclude", "X*")
- .add("Test.sink.sink1.metric.filter.exclude", "Y*")
- .add("Test.sink.sink2.metric.filter.exclude", "Y*")
+ .add("test.sink.test.class", TestSink.class.getName())
+ .add("test.*.source.filter.exclude", "s0")
+ .add("test.source.s1.metric.filter.exclude", "X*")
+ .add("test.sink.sink1.metric.filter.exclude", "Y*")
+ .add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
@@ -169,13 +169,14 @@ public class TestMetricsSystemImpl {
@Test public void testMultiThreadedPublish() throws Exception {
final int numThreads = 10;
new ConfigBuilder().add("*.period", 80)
- .add("Test.sink.Collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
+ .add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
numThreads)
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
+
final CollectingSink sink = new CollectingSink(numThreads);
- ms.registerSink("Collector",
+ ms.registerSink("collector",
"Collector of values from all threads.", sink);
final TestSource[] sources = new TestSource[numThreads];
final Thread[] threads = new Thread[numThreads];
@@ -280,10 +281,10 @@ public class TestMetricsSystemImpl {
@Test public void testHangingSink() {
new ConfigBuilder().add("*.period", 8)
- .add("Test.sink.test.class", TestSink.class.getName())
- .add("Test.sink.hanging.retry.delay", "1")
- .add("Test.sink.hanging.retry.backoff", "1.01")
- .add("Test.sink.hanging.retry.count", "0")
+ .add("test.sink.test.class", TestSink.class.getName())
+ .add("test.sink.hanging.retry.delay", "1")
+ .add("test.sink.hanging.retry.backoff", "1.01")
+ .add("test.sink.hanging.retry.count", "0")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start();
@@ -379,6 +380,23 @@ public class TestMetricsSystemImpl {
ms.shutdown();
}
+ @Test public void testUnregisterSource() {
+ MetricsSystem ms = new MetricsSystemImpl();
+ TestSource ts1 = new TestSource("ts1");
+ TestSource ts2 = new TestSource("ts2");
+ ms.register("ts1", "", ts1);
+ ms.register("ts2", "", ts2);
+ MetricsSource s1 = ms.getSource("ts1");
+ assertNotNull(s1);
+ // should work when metrics system is not started
+ ms.unregisterSource("ts1");
+ s1 = ms.getSource("ts1");
+ assertNull(s1);
+ MetricsSource s2 = ms.getSource("ts2");
+ assertNotNull(s2);
+ ms.shutdown();
+ }
+
private void checkMetricsRecords(List recs) {
LOG.debug(recs);
MetricsRecord r = recs.get(0);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
index 34758be95e7..c48b69f2149 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
@@ -127,6 +127,22 @@ public class TestCredShell {
"CredentialProviders configured."));
}
+ @Test
+ public void testPromptForCredentialWithEmptyPasswd() throws Exception {
+ String[] args1 = {"create", "credential1", "--provider",
+ "jceks://file" + tmpDir + "/credstore.jceks"};
+ ArrayList passwords = new ArrayList();
+ passwords.add(null);
+ passwords.add("p@ssw0rd");
+ int rc = 0;
+ CredentialShell shell = new CredentialShell();
+ shell.setConf(new Configuration());
+ shell.setPasswordReader(new MockPasswordReader(passwords));
+ rc = shell.run(args1);
+ assertEquals(outContent.toString(), -1, rc);
+ assertTrue(outContent.toString().contains("Passwords don't match"));
+ }
+
@Test
public void testPromptForCredential() throws Exception {
String[] args1 = {"create", "credential1", "--provider",
@@ -142,7 +158,7 @@ public class TestCredShell {
assertEquals(0, rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " +
"created."));
-
+
String[] args2 = {"delete", "credential1", "--provider",
"jceks://file" + tmpDir + "/credstore.jceks"};
rc = shell.run(args2);
@@ -162,7 +178,7 @@ public class TestCredShell {
public char[] readPassword(String prompt) {
if (passwords.size() == 0) return null;
String pass = passwords.remove(0);
- return pass.toCharArray();
+ return pass == null ? null : pass.toCharArray();
}
@Override
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index d04a7142bd0..e3e6ce09007 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -28,8 +28,6 @@ import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Provides access to the AccessControlList
s used by KMS,
@@ -52,13 +50,11 @@ public class KMSACLs implements Runnable {
public static final int RELOADER_SLEEP_MILLIS = 1000;
- Map acls;
- private ReadWriteLock lock;
+ private volatile Map acls;
private ScheduledExecutorService executorService;
private long lastReload;
KMSACLs(Configuration conf) {
- lock = new ReentrantReadWriteLock();
if (conf == null) {
conf = loadACLs();
}
@@ -70,17 +66,13 @@ public class KMSACLs implements Runnable {
}
private void setACLs(Configuration conf) {
- lock.writeLock().lock();
- try {
- acls = new HashMap();
- for (Type aclType : Type.values()) {
- String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
- acls.put(aclType, new AccessControlList(aclStr));
- LOG.info("'{}' ACL '{}'", aclType, aclStr);
- }
- } finally {
- lock.writeLock().unlock();
+ Map tempAcls = new HashMap();
+ for (Type aclType : Type.values()) {
+ String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
+ tempAcls.put(aclType, new AccessControlList(aclStr));
+ LOG.info("'{}' ACL '{}'", aclType, aclStr);
}
+ acls = tempAcls;
}
@Override
@@ -120,14 +112,7 @@ public class KMSACLs implements Runnable {
public boolean hasAccess(Type type, String user) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
- AccessControlList acl = null;
- lock.readLock().lock();
- try {
- acl = acls.get(type);
- } finally {
- lock.readLock().unlock();
- }
- return acl.isUserAllowed(ugi);
+ return acls.get(type).isUserAllowed(ugi);
}
}
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index d3893471613..8d7d6dc83c7 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -19,12 +19,16 @@ package org.apache.hadoop.mount;
import java.io.IOException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.oncrpc.SimpleUdpServer;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
/**
* Main class for starting mountd daemon. This daemon implements the NFS
* mount protocol. When receiving a MOUNT request from an NFS client, it checks
@@ -33,6 +37,7 @@ import org.apache.hadoop.util.ShutdownHookManager;
* handle for requested directory and returns it to the client.
*/
abstract public class MountdBase {
+ public static final Log LOG = LogFactory.getLog(MountdBase.class);
private final RpcProgram rpcProgram;
private int udpBoundPort; // Will set after server starts
private int tcpBoundPort; // Will set after server starts
@@ -40,11 +45,11 @@ abstract public class MountdBase {
public RpcProgram getRpcProgram() {
return rpcProgram;
}
-
+
/**
* Constructor
* @param program
- * @throws IOException
+ * @throws IOException
*/
public MountdBase(RpcProgram program) throws IOException {
rpcProgram = program;
@@ -74,11 +79,16 @@ abstract public class MountdBase {
if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY);
- rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
- rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
+ try {
+ rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
+ rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
+ } catch (Throwable e) {
+ LOG.fatal("Failed to start the server. Cause:", e);
+ terminate(1, e);
+ }
}
}
-
+
/**
* Priority of the mountd shutdown hook.
*/
@@ -91,5 +101,5 @@ abstract public class MountdBase {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
}
}
-
+
}
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
index bf77dcd43cb..96286865385 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
@@ -71,7 +71,16 @@ public class NfsExports {
private static final Pattern CIDR_FORMAT_LONG =
Pattern.compile(SLASH_FORMAT_LONG);
-
+
+ // Hostnames are composed of series of 'labels' concatenated with dots.
+ // Labels can be between 1-63 characters long, and can only take
+ // letters, digits & hyphens. They cannot start and end with hyphens. For
+ // more details, refer RFC-1123 & http://en.wikipedia.org/wiki/Hostname
+ private static final String LABEL_FORMAT =
+ "[a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?";
+ private static final Pattern HOSTNAME_FORMAT =
+ Pattern.compile("^(" + LABEL_FORMAT + "\\.)*" + LABEL_FORMAT + "$");
+
static class AccessCacheEntry implements LightWeightCache.Entry{
private final String hostAddr;
private AccessPrivilege access;
@@ -381,10 +390,14 @@ public class NfsExports {
LOG.debug("Using Regex match for '" + host + "' and " + privilege);
}
return new RegexMatch(privilege, host);
+ } else if (HOSTNAME_FORMAT.matcher(host).matches()) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Using exact match for '" + host + "' and " + privilege);
+ }
+ return new ExactMatch(privilege, host);
+ } else {
+ throw new IllegalArgumentException("Invalid hostname provided '" + host
+ + "'");
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Using exact match for '" + host + "' and " + privilege);
- }
- return new ExactMatch(privilege, host);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index d1f87cc1e12..b166330c79e 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
/**
* Nfs server. Supports NFS v3 using {@link RpcProgram}.
* Currently Mountd program is also started inside this class.
@@ -34,7 +36,7 @@ public abstract class Nfs3Base {
public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
private final RpcProgram rpcProgram;
private int nfsBoundPort; // Will set after server starts
-
+
public RpcProgram getRpcProgram() {
return rpcProgram;
}
@@ -46,11 +48,16 @@ public abstract class Nfs3Base {
public void start(boolean register) {
startTCPServer(); // Start TCP server
-
+
if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY);
- rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
+ try {
+ rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
+ } catch (Throwable e) {
+ LOG.fatal("Failed to start the server. Cause:", e);
+ terminate(1, e);
+ }
}
}
@@ -61,7 +68,7 @@ public abstract class Nfs3Base {
tcpServer.run();
nfsBoundPort = tcpServer.getBoundPort();
}
-
+
/**
* Priority of the nfsd shutdown hook.
*/
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
index 89e7173a469..31906035939 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
@@ -131,7 +131,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
} catch (IOException e) {
String request = set ? "Registration" : "Unregistration";
LOG.error(request + " failure with " + host + ":" + port
- + ", portmap entry: " + mapEntry, e);
+ + ", portmap entry: " + mapEntry);
throw new RuntimeException(request + " failure", e);
}
}
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java
index a2214554462..40ecdf5b8d4 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java
@@ -60,6 +60,7 @@ public class SimpleUdpClient {
DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
IPAddress, port);
socket.send(sendPacket);
+ socket.setSoTimeout(500);
DatagramPacket receivePacket = new DatagramPacket(receiveData,
receiveData.length);
socket.receive(receivePacket);
diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
index bf8a227c82e..349e82adbad 100644
--- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
+++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
@@ -194,4 +194,16 @@ public class TestNfsExports {
} while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
Assert.assertEquals(AccessPrivilege.NONE, ap);
}
+
+ @Test(expected=IllegalArgumentException.class)
+ public void testInvalidHost() {
+ NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
+ "foo#bar");
+ }
+
+ @Test(expected=IllegalArgumentException.class)
+ public void testInvalidSeparator() {
+ NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
+ "foo ro : bar rw");
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
index 5c30f16bc97..71b018506e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
@@ -154,6 +154,8 @@ public class Nfs3Utils {
if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
if (type == NfsFileType.NFSREG.toValue()) {
rtn |= Nfs3Constant.ACCESS3_EXECUTE;
+ } else {
+ rtn |= Nfs3Constant.ACCESS3_LOOKUP;
}
}
return rtn;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
index b5f0cd4c539..77646af2c5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
@@ -68,5 +68,12 @@ public class TestNfs3Utils {
0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr));
assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",
0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr));
+
+ Mockito.when(attr.getUid()).thenReturn(2);
+ Mockito.when(attr.getGid()).thenReturn(10);
+ Mockito.when(attr.getMode()).thenReturn(457); // 711
+ Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
+ assertEquals("Access should be allowed for dir as mode is 711 and GID matches",
+ 2 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 11}, attr));
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 09cc4116504..6bd8392539b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -23,6 +23,8 @@ Trunk (Unreleased)
HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs.
(wheat9)
+ HDFS-2538. option to disable fsck dots (Mohammad Kamrul Islam via aw)
+
NEW FEATURES
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@@ -298,8 +300,13 @@ Release 2.6.0 - UNRELEASED
HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
(cnauroth)
+ HDFS-5624. Add HDFS tests for ACLs in combination with viewfs.
+ (Stephen Chu via cnauroth)
+
OPTIMIZATIONS
+ HDFS-6690. Deduplicate xattr names in memory. (wang)
+
BUG FIXES
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
@@ -314,6 +321,25 @@ Release 2.6.0 - UNRELEASED
HADOOP-8158. Interrupting hadoop fs -put from the command line
causes a LeaseExpiredException. (daryn via harsh)
+ HDFS-6678. MiniDFSCluster may still be partially running after initialization
+ fails. (cnauroth)
+
+ HDFS-5809. BlockPoolSliceScanner and high speed hdfs appending make
+ datanode to drop into infinite loop (cmccabe)
+
+ HDFS-6456. NFS should throw error for invalid entry in
+ dfs.nfs.exports.allowed.hosts (Abhiraj Butala via brandonli)
+
+ HDFS-6689. NFS doesn't return correct lookup access for direcories (brandonli)
+
+ HDFS-6478. RemoteException can't be retried properly for non-HA scenario.
+ (Ming Ma via jing9)
+
+ HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb )
+
+ HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails
+ with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -836,6 +862,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted
file present in snapshot (kihwal)
+ HDFS-6378. NFS registration should timeout instead of hanging when
+ portmap/rpcbind is not available (Abhiraj Butala via brandonli)
+
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 250d41c5cba..90acedea12c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -26,7 +26,6 @@ import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
-import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@@ -38,14 +37,13 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
-import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
@@ -259,12 +257,11 @@ public class HAUtil {
/**
* Parse the file system URI out of the provided token.
*/
- public static URI getServiceUriFromToken(final String scheme,
- Token> token) {
+ public static URI getServiceUriFromToken(final String scheme, Token> token) {
String tokStr = token.getService().toString();
-
- if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) {
- tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, "");
+ final String prefix = buildTokenServicePrefixForLogicalUri(scheme);
+ if (tokStr.startsWith(prefix)) {
+ tokStr = tokStr.replaceFirst(prefix, "");
}
return URI.create(scheme + "://" + tokStr);
}
@@ -273,10 +270,13 @@ public class HAUtil {
* Get the service name used in the delegation token for the given logical
* HA service.
* @param uri the logical URI of the cluster
+ * @param scheme the scheme of the corresponding FileSystem
* @return the service name
*/
- public static Text buildTokenServiceForLogicalUri(URI uri) {
- return new Text(HA_DT_SERVICE_PREFIX + uri.getHost());
+ public static Text buildTokenServiceForLogicalUri(final URI uri,
+ final String scheme) {
+ return new Text(buildTokenServicePrefixForLogicalUri(scheme)
+ + uri.getHost());
}
/**
@@ -286,7 +286,11 @@ public class HAUtil {
public static boolean isTokenForLogicalUri(Token> token) {
return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
}
-
+
+ public static String buildTokenServicePrefixForLogicalUri(String scheme) {
+ return HA_DT_SERVICE_PREFIX + scheme + ":";
+ }
+
/**
* Locate a delegation token associated with the given HA cluster URI, and if
* one is found, clone it to also represent the underlying namenode address.
@@ -298,7 +302,9 @@ public class HAUtil {
public static void cloneDelegationTokenForLogicalUri(
UserGroupInformation ugi, URI haUri,
Collection nnAddrs) {
- Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri);
+ // this cloning logic is only used by hdfs
+ Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
+ HdfsConstants.HDFS_URI_SCHEME);
Token haToken =
tokenSelector.selectToken(haService, ugi.getTokens());
if (haToken != null) {
@@ -309,8 +315,9 @@ public class HAUtil {
Token specificToken =
new Token.PrivateToken(haToken);
SecurityUtil.setTokenService(specificToken, singleNNAddr);
- Text alias =
- new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService());
+ Text alias = new Text(
+ buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
+ + "//" + specificToken.getService());
ugi.addToken(alias, specificToken);
LOG.debug("Mapped HA service delegation token for logical URI " +
haUri + " to namenode " + singleNNAddr);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index 2bcb2a16222..17653345ef9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -163,7 +163,8 @@ public class NameNodeProxies {
Text dtService;
if (failoverProxyProvider.useLogicalURI()) {
- dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
+ dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
+ HdfsConstants.HDFS_URI_SCHEME);
} else {
dtService = SecurityUtil.buildTokenService(
NameNode.getAddress(nameNodeUri));
@@ -224,7 +225,8 @@ public class NameNodeProxies {
new Class[] { xface }, dummyHandler);
Text dtService;
if (failoverProxyProvider.useLogicalURI()) {
- dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
+ dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
+ HdfsConstants.HDFS_URI_SCHEME);
} else {
dtService = SecurityUtil.buildTokenService(
NameNode.getAddress(nameNodeUri));
@@ -333,19 +335,18 @@ public class NameNodeProxies {
address, conf, ugi, NamenodeProtocolPB.class, 0);
if (withRetries) { // create the proxy with retries
RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
- TimeUnit.MILLISECONDS);
- Map, RetryPolicy> exceptionToPolicyMap
- = new HashMap, RetryPolicy>();
- RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy,
- exceptionToPolicyMap);
- Map methodNameToPolicyMap
- = new HashMap();
- methodNameToPolicyMap.put("getBlocks", methodPolicy);
- methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
- proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
- proxy, methodNameToPolicyMap);
+ TimeUnit.MILLISECONDS);
+ Map methodNameToPolicyMap
+ = new HashMap();
+ methodNameToPolicyMap.put("getBlocks", timeoutPolicy);
+ methodNameToPolicyMap.put("getAccessKeys", timeoutPolicy);
+ NamenodeProtocol translatorProxy =
+ new NamenodeProtocolTranslatorPB(proxy);
+ return (NamenodeProtocol) RetryProxy.create(
+ NamenodeProtocol.class, translatorProxy, methodNameToPolicyMap);
+ } else {
+ return new NamenodeProtocolTranslatorPB(proxy);
}
- return new NamenodeProtocolTranslatorPB(proxy);
}
private static ClientProtocol createNNProxyWithClientProtocol(
@@ -379,29 +380,27 @@ public class NameNodeProxies {
= new HashMap, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
-
- Map, RetryPolicy> exceptionToPolicyMap
- = new HashMap, RetryPolicy>();
- exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
- .retryByRemoteException(defaultPolicy,
- remoteExceptionToPolicyMap));
- RetryPolicy methodPolicy = RetryPolicies.retryByException(
- defaultPolicy, exceptionToPolicyMap);
+
+ RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
+ defaultPolicy, remoteExceptionToPolicyMap);
Map methodNameToPolicyMap
= new HashMap();
methodNameToPolicyMap.put("create", methodPolicy);
-
- proxy = (ClientNamenodeProtocolPB) RetryProxy.create(
- ClientNamenodeProtocolPB.class,
- new DefaultFailoverProxyProvider(
- ClientNamenodeProtocolPB.class, proxy),
+
+ ClientProtocol translatorProxy =
+ new ClientNamenodeProtocolTranslatorPB(proxy);
+ return (ClientProtocol) RetryProxy.create(
+ ClientProtocol.class,
+ new DefaultFailoverProxyProvider(
+ ClientProtocol.class, translatorProxy),
methodNameToPolicyMap,
defaultPolicy);
+ } else {
+ return new ClientNamenodeProtocolTranslatorPB(proxy);
}
- return new ClientNamenodeProtocolTranslatorPB(proxy);
}
-
+
private static Object createNameNodeProxy(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi, Class> xface,
int rpcTimeout) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 7cc8c318803..77fe543784b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -124,7 +124,7 @@ public class HdfsConstants {
* of a delgation token, indicating that the URI is a logical (HA)
* URI.
*/
- public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:";
+ public static final String HA_DT_SERVICE_PREFIX = "ha-";
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 2c039aed236..5775d6e2634 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -97,7 +97,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
ProtobufRpcEngine.class);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
+ rpcProxy = createNamenode(nameNodeAddr, conf, ugi);
}
private static DatanodeProtocolPB createNamenode(
@@ -109,33 +109,6 @@ public class DatanodeProtocolClientSideTranslatorPB implements
org.apache.hadoop.ipc.Client.getPingInterval(conf), null).getProxy();
}
- /** Create a {@link NameNode} proxy */
- static DatanodeProtocolPB createNamenodeWithRetry(
- DatanodeProtocolPB rpcNamenode) {
- RetryPolicy createPolicy = RetryPolicies
- .retryUpToMaximumCountWithFixedSleep(5,
- HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
-
- Map, RetryPolicy> remoteExceptionToPolicyMap =
- new HashMap, RetryPolicy>();
- remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
- createPolicy);
-
- Map, RetryPolicy> exceptionToPolicyMap =
- new HashMap, RetryPolicy>();
- exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
- .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
- remoteExceptionToPolicyMap));
- RetryPolicy methodPolicy = RetryPolicies.retryByException(
- RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
- Map methodNameToPolicyMap = new HashMap();
-
- methodNameToPolicyMap.put("create", methodPolicy);
-
- return (DatanodeProtocolPB) RetryProxy.create(DatanodeProtocolPB.class,
- rpcNamenode, methodNameToPolicyMap);
- }
-
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
index 87f105c7d8c..98e99f0e061 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
+import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
@@ -61,7 +62,7 @@ import com.google.protobuf.ServiceException;
@InterfaceAudience.Private
@InterfaceStability.Stable
public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
- ProtocolMetaInterface, Closeable {
+ ProtocolMetaInterface, Closeable, ProtocolTranslator {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
@@ -88,6 +89,11 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
RPC.stopProxy(rpcProxy);
}
+ @Override
+ public Object getUnderlyingProxyObject() {
+ return rpcProxy;
+ }
+
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
index 5310c3df522..1039b4fe922 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
@@ -310,18 +310,11 @@ class BlockPoolSliceScanner {
}
}
- private synchronized void updateScanStatus(Block block,
+ private synchronized void updateScanStatus(BlockScanInfo info,
ScanType type,
boolean scanOk) {
- BlockScanInfo info = blockMap.get(block);
-
- if ( info != null ) {
- delBlockInfo(info);
- } else {
- // It might already be removed. Thats ok, it will be caught next time.
- info = new BlockScanInfo(block);
- }
-
+ delBlockInfo(info);
+
long now = Time.monotonicNow();
info.lastScanType = type;
info.lastScanTime = now;
@@ -334,8 +327,8 @@ class BlockPoolSliceScanner {
}
if (verificationLog != null) {
- verificationLog.append(now, block.getGenerationStamp(),
- block.getBlockId());
+ verificationLog.append(now, info.getGenerationStamp(),
+ info.getBlockId());
}
}
@@ -434,11 +427,13 @@ class BlockPoolSliceScanner {
totalTransientErrors++;
}
- updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, true);
+ updateScanStatus((BlockScanInfo)block.getLocalBlock(),
+ ScanType.VERIFICATION_SCAN, true);
return;
} catch (IOException e) {
- updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false);
+ updateScanStatus((BlockScanInfo)block.getLocalBlock(),
+ ScanType.VERIFICATION_SCAN, false);
// If the block does not exists anymore, then its not an error
if (!dataset.contains(block)) {
@@ -497,7 +492,7 @@ class BlockPoolSliceScanner {
// Picks one block and verifies it
private void verifyFirstBlock() {
- Block block = null;
+ BlockScanInfo block = null;
synchronized (this) {
if (!blockInfoSet.isEmpty()) {
block = blockInfoSet.first();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
index 83de6ebe41b..51731c8d013 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
@@ -128,7 +128,8 @@ public class DatanodeWebHdfsMethods {
"://" + nnId);
boolean isLogical = HAUtil.isLogicalUri(conf, nnUri);
if (isLogical) {
- token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri));
+ token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri,
+ HdfsConstants.HDFS_URI_SCHEME));
} else {
token.setService(SecurityUtil.buildTokenService(nnUri));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 542e60e4016..5cc8a4797e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -126,6 +126,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
private boolean showBlocks = false;
private boolean showLocations = false;
private boolean showRacks = false;
+ private boolean showprogress = false;
private boolean showCorruptFileBlocks = false;
/**
@@ -203,6 +204,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
else if (key.equals("blocks")) { this.showBlocks = true; }
else if (key.equals("locations")) { this.showLocations = true; }
else if (key.equals("racks")) { this.showRacks = true; }
+ else if (key.equals("showprogress")) { this.showprogress = true; }
else if (key.equals("openforwrite")) {this.showOpenFiles = true; }
else if (key.equals("listcorruptfileblocks")) {
this.showCorruptFileBlocks = true;
@@ -381,10 +383,13 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
} else if (showFiles) {
out.print(path + " " + fileLen + " bytes, " +
blocks.locatedBlockCount() + " block(s): ");
- } else {
+ } else if (showprogress) {
out.print('.');
}
- if (res.totalFiles % 100 == 0) { out.println(); out.flush(); }
+ if ((showprogress) && res.totalFiles % 100 == 0) {
+ out.println();
+ out.flush();
+ }
int missing = 0;
int corrupt = 0;
long missize = 0;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
index fdb549648f2..7e843d207ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
@@ -19,24 +19,30 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.util.List;
+import java.util.Map;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.server.namenode.INode;
-
-import com.google.common.collect.ImmutableList;
/**
* XAttrStorage is used to read and set xattrs for an inode.
*/
@InterfaceAudience.Private
public class XAttrStorage {
-
+
+ private static final Map internedNames = Maps.newHashMap();
+
/**
* Reads the existing extended attributes of an inode. If the
* inode does not have an XAttr
, then this method
* returns an empty list.
+ *
+ * Must be called while holding the FSDirectory read lock.
+ *
* @param inode INode to read
* @param snapshotId
* @return List XAttr
list.
@@ -48,6 +54,9 @@ public class XAttrStorage {
/**
* Reads the existing extended attributes of an inode.
+ *
+ * Must be called while holding the FSDirectory read lock.
+ *
* @param inode INode to read.
* @return List XAttr
list.
*/
@@ -58,6 +67,9 @@ public class XAttrStorage {
/**
* Update xattrs of inode.
+ *
+ * Must be called while holding the FSDirectory write lock.
+ *
* @param inode INode to update
* @param xAttrs to update xAttrs.
* @param snapshotId id of the latest snapshot of the inode
@@ -70,8 +82,24 @@ public class XAttrStorage {
}
return;
}
-
- ImmutableList newXAttrs = ImmutableList.copyOf(xAttrs);
+ // Dedupe the xAttr name and save them into a new interned list
+ List internedXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
+ for (XAttr xAttr : xAttrs) {
+ final String name = xAttr.getName();
+ String internedName = internedNames.get(name);
+ if (internedName == null) {
+ internedName = name;
+ internedNames.put(internedName, internedName);
+ }
+ XAttr internedXAttr = new XAttr.Builder()
+ .setName(internedName)
+ .setNameSpace(xAttr.getNameSpace())
+ .setValue(xAttr.getValue())
+ .build();
+ internedXAttrs.add(internedXAttr);
+ }
+ // Save the list of interned xattrs
+ ImmutableList newXAttrs = ImmutableList.copyOf(internedXAttrs);
if (inode.getXAttrFeature() != null) {
inode.removeXAttrFeature(snapshotId);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index db253b6f270..b91090d1677 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -77,7 +77,7 @@ public class DFSck extends Configured implements Tool {
private static final String USAGE = "Usage: DFSck "
+ "[-list-corruptfileblocks | "
+ "[-move | -delete | -openforwrite] "
- + "[-files [-blocks [-locations | -racks]]]]\n"
+ + "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
+ "\t\tstart checking from this path\n"
+ "\t-move\tmove corrupted files to /lost+found\n"
+ "\t-delete\tdelete corrupted files\n"
@@ -90,7 +90,8 @@ public class DFSck extends Configured implements Tool {
+ "blocks and files they belong to\n"
+ "\t-blocks\tprint out block report\n"
+ "\t-locations\tprint out locations for every block\n"
- + "\t-racks\tprint out network topology for data-node locations\n\n"
+ + "\t-racks\tprint out network topology for data-node locations\n"
+ + "\t-showprogress\tshow progress in output. Default is OFF (no progress)\n\n"
+ "Please Note:\n"
+ "\t1. By default fsck ignores files opened for write, "
+ "use -openforwrite to report such files. They are usually "
@@ -270,6 +271,7 @@ public class DFSck extends Configured implements Tool {
else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
+ else if (args[idx].equals("-showprogress")) { url.append("&showprogress=1"); }
else if (args[idx].equals("-list-corruptfileblocks")) {
url.append("&listcorruptfileblocks=1");
doListCorruptFileBlocks = true;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 94c666a3a11..6eb09f61340 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -158,7 +158,7 @@ public class WebHdfsFileSystem extends FileSystem
// getCanonicalUri() in order to handle the case where no port is
// specified in the URI
this.tokenServiceName = isLogicalUri ?
- HAUtil.buildTokenServiceForLogicalUri(uri)
+ HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
: SecurityUtil.buildTokenService(getCanonicalUri());
if (!isHA) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
new file mode 100644
index 00000000000..68dd7f2239f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Verify ACL through ViewFileSystem functionality.
+ */
+public class TestViewFileSystemWithAcls {
+
+ private static MiniDFSCluster cluster;
+ private static Configuration clusterConf = new Configuration();
+ private static FileSystem fHdfs;
+ private static FileSystem fHdfs2;
+ private FileSystem fsView;
+ private Configuration fsViewConf;
+ private FileSystem fsTarget, fsTarget2;
+ private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
+ private FileSystemTestHelper fileSystemTestHelper =
+ new FileSystemTestHelper("/tmp/TestViewFileSystemWithAcls");
+
+ @BeforeClass
+ public static void clusterSetupAtBeginning() throws IOException {
+ clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ cluster = new MiniDFSCluster.Builder(clusterConf)
+ .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+ .numDataNodes(2)
+ .build();
+ cluster.waitClusterUp();
+
+ fHdfs = cluster.getFileSystem(0);
+ fHdfs2 = cluster.getFileSystem(1);
+ }
+
+ @AfterClass
+ public static void ClusterShutdownAtEnd() throws Exception {
+ cluster.shutdown();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ fsTarget = fHdfs;
+ fsTarget2 = fHdfs2;
+ targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
+ targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
+
+ fsTarget.delete(targetTestRoot, true);
+ fsTarget2.delete(targetTestRoot2, true);
+ fsTarget.mkdirs(targetTestRoot);
+ fsTarget2.mkdirs(targetTestRoot2);
+
+ fsViewConf = ViewFileSystemTestSetup.createConfig();
+ setupMountPoints();
+ fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
+ }
+
+ private void setupMountPoints() {
+ mountOnNn1 = new Path("/mountOnNn1");
+ mountOnNn2 = new Path("/mountOnNn2");
+ ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
+ ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
+ fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
+ }
+
+ /**
+ * Verify a ViewFs wrapped over multiple federated NameNodes will
+ * dispatch the ACL operations to the correct NameNode.
+ */
+ @Test
+ public void testAclOnMountEntry() throws Exception {
+ // Set ACLs on the first namespace and verify they are correct
+ List aclSpec = Lists.newArrayList(
+ aclEntry(ACCESS, USER, READ_WRITE),
+ aclEntry(ACCESS, USER, "foo", READ),
+ aclEntry(ACCESS, GROUP, READ),
+ aclEntry(ACCESS, OTHER, NONE));
+ fsView.setAcl(mountOnNn1, aclSpec);
+
+ AclEntry[] expected = new AclEntry[] {
+ aclEntry(ACCESS, USER, "foo", READ),
+ aclEntry(ACCESS, GROUP, READ) };
+ assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
+ // Double-check by getting ACL status using FileSystem
+ // instead of ViewFs
+ assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
+
+ // Modify the ACL entries on the first namespace
+ aclSpec = Lists.newArrayList(
+ aclEntry(DEFAULT, USER, "foo", READ));
+ fsView.modifyAclEntries(mountOnNn1, aclSpec);
+ expected = new AclEntry[] {
+ aclEntry(ACCESS, USER, "foo", READ),
+ aclEntry(ACCESS, GROUP, READ),
+ aclEntry(DEFAULT, USER, READ_WRITE),
+ aclEntry(DEFAULT, USER, "foo", READ),
+ aclEntry(DEFAULT, GROUP, READ),
+ aclEntry(DEFAULT, MASK, READ),
+ aclEntry(DEFAULT, OTHER, NONE) };
+ assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
+
+ fsView.removeDefaultAcl(mountOnNn1);
+ expected = new AclEntry[] {
+ aclEntry(ACCESS, USER, "foo", READ),
+ aclEntry(ACCESS, GROUP, READ) };
+ assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
+ assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
+
+ // Paranoid check: verify the other namespace does not
+ // have ACLs set on the same path.
+ assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
+ assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
+
+ // Remove the ACL entries on the first namespace
+ fsView.removeAcl(mountOnNn1);
+ assertEquals(0, fsView.getAclStatus(mountOnNn1).getEntries().size());
+ assertEquals(0, fHdfs.getAclStatus(targetTestRoot).getEntries().size());
+
+ // Now set ACLs on the second namespace
+ aclSpec = Lists.newArrayList(
+ aclEntry(ACCESS, USER, "bar", READ));
+ fsView.modifyAclEntries(mountOnNn2, aclSpec);
+ expected = new AclEntry[] {
+ aclEntry(ACCESS, USER, "bar", READ),
+ aclEntry(ACCESS, GROUP, READ_EXECUTE) };
+ assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn2)));
+ assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
+
+ // Remove the ACL entries on the second namespace
+ fsView.removeAclEntries(mountOnNn2, Lists.newArrayList(
+ aclEntry(ACCESS, USER, "bar", READ)
+ ));
+ expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
+ assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
+ fsView.removeAcl(mountOnNn2);
+ assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
+ assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
+ }
+
+ private AclEntry[] aclEntryArray(AclStatus aclStatus) {
+ return aclStatus.getEntries().toArray(new AclEntry[0]);
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
new file mode 100644
index 00000000000..70918e9d2b5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import java.util.List;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.fs.permission.FsAction.NONE;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Verify ACL through ViewFs functionality.
+ */
+public class TestViewFsWithAcls {
+
+ private static MiniDFSCluster cluster;
+ private static Configuration clusterConf = new Configuration();
+ private static FileContext fc, fc2;
+ private FileContext fcView, fcTarget, fcTarget2;
+ private Configuration fsViewConf;
+ private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
+ private FileContextTestHelper fileContextTestHelper =
+ new FileContextTestHelper("/tmp/TestViewFsWithAcls");
+
+ @BeforeClass
+ public static void clusterSetupAtBeginning() throws IOException {
+ clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ cluster = new MiniDFSCluster.Builder(clusterConf)
+ .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+ .numDataNodes(2)
+ .build();
+ cluster.waitClusterUp();
+
+ fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
+ fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
+ }
+
+ @AfterClass
+ public static void ClusterShutdownAtEnd() throws Exception {
+ cluster.shutdown();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ fcTarget = fc;
+ fcTarget2 = fc2;
+ targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
+ targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
+
+ fcTarget.delete(targetTestRoot, true);
+ fcTarget2.delete(targetTestRoot2, true);
+ fcTarget.mkdir(targetTestRoot, new FsPermission((short)0750), true);
+ fcTarget2.mkdir(targetTestRoot2, new FsPermission((short)0750), true);
+
+ fsViewConf = ViewFileSystemTestSetup.createConfig();
+ setupMountPoints();
+ fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
+ }
+
+ private void setupMountPoints() {
+ mountOnNn1 = new Path("/mountOnNn1");
+ mountOnNn2 = new Path("/mountOnNn2");
+ ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
+ ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
+ fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
+ }
+
+ /**
+ * Verify a ViewFs wrapped over multiple federated NameNodes will
+ * dispatch the ACL operations to the correct NameNode.
+ */
+ @Test
+ public void testAclOnMountEntry() throws Exception {
+ // Set ACLs on the first namespace and verify they are correct
+ List aclSpec = Lists.newArrayList(
+ aclEntry(ACCESS, USER, READ_WRITE),
+ aclEntry(ACCESS, USER, "foo", READ),
+ aclEntry(ACCESS, GROUP, READ),
+ aclEntry(ACCESS, OTHER, NONE));
+ fcView.setAcl(mountOnNn1, aclSpec);
+
+ AclEntry[] expected = new AclEntry[] {
+ aclEntry(ACCESS, USER, "foo", READ),
+ aclEntry(ACCESS, GROUP, READ) };
+ assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
+ // Double-check by getting ACL status using FileSystem
+ // instead of ViewFs
+ assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
+
+ // Modify the ACL entries on the first namespace
+ aclSpec = Lists.newArrayList(
+ aclEntry(DEFAULT, USER, "foo", READ));
+ fcView.modifyAclEntries(mountOnNn1, aclSpec);
+ expected = new AclEntry[] {
+ aclEntry(ACCESS, USER, "foo", READ),
+ aclEntry(ACCESS, GROUP, READ),
+ aclEntry(DEFAULT, USER, READ_WRITE),
+ aclEntry(DEFAULT, USER, "foo", READ),
+ aclEntry(DEFAULT, GROUP, READ),
+ aclEntry(DEFAULT, MASK, READ),
+ aclEntry(DEFAULT, OTHER, NONE) };
+ assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
+
+ fcView.removeDefaultAcl(mountOnNn1);
+ expected = new AclEntry[] {
+ aclEntry(ACCESS, USER, "foo", READ),
+ aclEntry(ACCESS, GROUP, READ) };
+ assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
+ assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
+
+ // Paranoid check: verify the other namespace does not
+ // have ACLs set on the same path.
+ assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
+ assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
+
+ // Remove the ACL entries on the first namespace
+ fcView.removeAcl(mountOnNn1);
+ assertEquals(0, fcView.getAclStatus(mountOnNn1).getEntries().size());
+ assertEquals(0, fc.getAclStatus(targetTestRoot).getEntries().size());
+
+ // Now set ACLs on the second namespace
+ aclSpec = Lists.newArrayList(
+ aclEntry(ACCESS, USER, "bar", READ));
+ fcView.modifyAclEntries(mountOnNn2, aclSpec);
+ expected = new AclEntry[] {
+ aclEntry(ACCESS, USER, "bar", READ),
+ aclEntry(ACCESS, GROUP, READ_EXECUTE) };
+ assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn2)));
+ assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
+
+ // Remove the ACL entries on the second namespace
+ fcView.removeAclEntries(mountOnNn2, Lists.newArrayList(
+ aclEntry(ACCESS, USER, "bar", READ)
+ ));
+ expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
+ assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
+ fcView.removeAcl(mountOnNn2);
+ assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
+ assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
+ }
+
+ private AclEntry[] aclEntryArray(AclStatus aclStatus) {
+ return aclStatus.getEntries().toArray(new AclEntry[0]);
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index db4f2878370..c316684138b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -663,73 +663,81 @@ public class MiniDFSCluster {
boolean checkDataNodeHostConfig,
Configuration[] dnConfOverlays)
throws IOException {
- ExitUtil.disableSystemExit();
-
- synchronized (MiniDFSCluster.class) {
- instanceId = instanceCount++;
- }
-
- this.conf = conf;
- base_dir = new File(determineDfsBaseDir());
- data_dir = new File(base_dir, "data");
- this.waitSafeMode = waitSafeMode;
- this.checkExitOnShutdown = checkExitOnShutdown;
-
- int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
- conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
- int safemodeExtension = conf.getInt(
- DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
- conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
- conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
- conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
- StaticMapping.class, DNSToSwitchMapping.class);
-
- // In an HA cluster, in order for the StandbyNode to perform checkpoints,
- // it needs to know the HTTP port of the Active. So, if ephemeral ports
- // are chosen, disable checkpoints for the test.
- if (!nnTopology.allHttpPortsSpecified() &&
- nnTopology.isHA()) {
- LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
- "since no HTTP ports have been specified.");
- conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
- }
- if (!nnTopology.allIpcPortsSpecified() &&
- nnTopology.isHA()) {
- LOG.info("MiniDFSCluster disabling log-roll triggering in the "
- + "Standby node since no IPC ports have been specified.");
- conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
- }
-
- federation = nnTopology.isFederated();
+ boolean success = false;
try {
- createNameNodesAndSetConf(
- nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
- enableManagedDfsDirsRedundancy,
- format, startOpt, clusterId, conf);
- } catch (IOException ioe) {
- LOG.error("IOE creating namenodes. Permissions dump:\n" +
- createPermissionsDiagnosisString(data_dir));
- throw ioe;
- }
- if (format) {
- if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
- throw new IOException("Cannot remove data directory: " + data_dir +
+ ExitUtil.disableSystemExit();
+
+ synchronized (MiniDFSCluster.class) {
+ instanceId = instanceCount++;
+ }
+
+ this.conf = conf;
+ base_dir = new File(determineDfsBaseDir());
+ data_dir = new File(base_dir, "data");
+ this.waitSafeMode = waitSafeMode;
+ this.checkExitOnShutdown = checkExitOnShutdown;
+
+ int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
+ conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
+ int safemodeExtension = conf.getInt(
+ DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
+ conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
+ conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
+ conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+ StaticMapping.class, DNSToSwitchMapping.class);
+
+ // In an HA cluster, in order for the StandbyNode to perform checkpoints,
+ // it needs to know the HTTP port of the Active. So, if ephemeral ports
+ // are chosen, disable checkpoints for the test.
+ if (!nnTopology.allHttpPortsSpecified() &&
+ nnTopology.isHA()) {
+ LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
+ "since no HTTP ports have been specified.");
+ conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
+ }
+ if (!nnTopology.allIpcPortsSpecified() &&
+ nnTopology.isHA()) {
+ LOG.info("MiniDFSCluster disabling log-roll triggering in the "
+ + "Standby node since no IPC ports have been specified.");
+ conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
+ }
+
+ federation = nnTopology.isFederated();
+ try {
+ createNameNodesAndSetConf(
+ nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+ enableManagedDfsDirsRedundancy,
+ format, startOpt, clusterId, conf);
+ } catch (IOException ioe) {
+ LOG.error("IOE creating namenodes. Permissions dump:\n" +
createPermissionsDiagnosisString(data_dir));
+ throw ioe;
+ }
+ if (format) {
+ if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
+ throw new IOException("Cannot remove data directory: " + data_dir +
+ createPermissionsDiagnosisString(data_dir));
+ }
+ }
+
+ if (startOpt == StartupOption.RECOVER) {
+ return;
+ }
+
+ // Start the DataNodes
+ startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
+ dnStartOpt != null ? dnStartOpt : startOpt,
+ racks, hosts, simulatedCapacities, setupHostsFile,
+ checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
+ waitClusterUp();
+ //make sure ProxyUsers uses the latest conf
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+ success = true;
+ } finally {
+ if (!success) {
+ shutdown();
}
}
-
- if (startOpt == StartupOption.RECOVER) {
- return;
- }
-
- // Start the DataNodes
- startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
- dnStartOpt != null ? dnStartOpt : startOpt,
- racks, hosts, simulatedCapacities, setupHostsFile,
- checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
- waitClusterUp();
- //make sure ProxyUsers uses the latest conf
- ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index 64697bf9d82..809e592db7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -30,6 +30,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHEC
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -79,6 +81,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
@@ -97,6 +100,8 @@ public class TestFileCreation {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
+ private static final String RPC_DETAILED_METRICS =
+ "RpcDetailedActivityForPort";
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
@@ -371,7 +376,7 @@ public class TestFileCreation {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
-
+
UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
"testuser", new String[]{"testgroup"});
FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction() {
@@ -380,12 +385,16 @@ public class TestFileCreation {
return FileSystem.get(cluster.getConfiguration(0));
}
});
-
+
+ String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();
+
try {
Path p = new Path("/testfile");
FSDataOutputStream stm1 = fs.create(p);
stm1.write(1);
+ assertCounter("CreateNumOps", 1L, getMetrics(metricsName));
+
// Create file again without overwrite
try {
fs2.create(p, false);
@@ -394,7 +403,9 @@ public class TestFileCreation {
GenericTestUtils.assertExceptionContains("already being created by",
abce);
}
-
+ // NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
+ assertCounter("AlreadyBeingCreatedExceptionNumOps",
+ 6L, getMetrics(metricsName));
FSDataOutputStream stm2 = fs2.create(p, true);
stm2.write(2);
stm2.close();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
index c6bd7ba2848..2e4a08bf0ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
@@ -25,14 +25,16 @@ import java.net.InetSocketAddress;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
@@ -76,16 +78,22 @@ public class TestIsMethodSupported {
@Test
public void testNamenodeProtocol() throws IOException {
- NamenodeProtocolTranslatorPB translator =
- (NamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(conf,
+ NamenodeProtocol np =
+ NameNodeProxies.createNonHAProxy(conf,
nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
- boolean exists = translator.isMethodSupported("rollEditLog");
+
+ boolean exists = RpcClientUtil.isMethodSupported(np,
+ NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+ RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
+
assertTrue(exists);
- exists = translator.isMethodSupported("bogusMethod");
+ exists = RpcClientUtil.isMethodSupported(np,
+ NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+ RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
assertFalse(exists);
}
-
+
@Test
public void testDatanodeProtocol() throws IOException {
DatanodeProtocolClientSideTranslatorPB translator =
@@ -107,16 +115,18 @@ public class TestIsMethodSupported {
NetUtils.getDefaultSocketFactory(conf));
assertTrue(translator.isMethodSupported("refreshNamenodes"));
}
-
+
@Test
public void testClientNamenodeProtocol() throws IOException {
- ClientNamenodeProtocolTranslatorPB translator =
- (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
+ ClientProtocol cp =
+ NameNodeProxies.createNonHAProxy(
conf, nnAddress, ClientProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
- assertTrue(translator.isMethodSupported("mkdirs"));
+ RpcClientUtil.isMethodSupported(cp,
+ ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+ RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
}
-
+
@Test
public void tesJournalProtocol() throws IOException {
JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index 2cf8f6d51b2..f50afd46325 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -116,7 +116,8 @@ public class DataNodeTestUtils {
public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
- bpScanner.verifyBlock(b);
+ bpScanner.verifyBlock(new ExtendedBlock(b.getBlockPoolId(),
+ new BlockPoolSliceScanner.BlockScanInfo(b.getLocalBlock())));
}
private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
index 46059520f88..b2cc9197aa8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@@ -299,7 +300,8 @@ public class TestDelegationTokensWithHA {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
URI haUri = new URI("hdfs://my-ha-uri/");
- token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri));
+ token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,
+ HdfsConstants.HDFS_URI_SCHEME));
ugi.addToken(token);
Collection nnAddrs = new HashSet();
@@ -355,7 +357,8 @@ public class TestDelegationTokensWithHA {
@Test
public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri = HATestUtil.getLogicalUri(cluster);
- String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
+ String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
+ HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, dfs.getCanonicalServiceName());
final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
final Token token =
@@ -371,7 +374,8 @@ public class TestDelegationTokensWithHA {
Configuration conf = dfs.getConf();
URI haUri = HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
- String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString();
+ String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
+ HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, afs.getCanonicalServiceName());
Token> token = afs.getDelegationTokens(
UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 40826134d96..6859e436a4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import com.google.common.base.Charsets;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -46,6 +47,7 @@ public class TestDFSAdminWithHA {
private PrintStream originErr;
private static final String NSID = "ns1";
+ private static String newLine = System.getProperty("line.separator");
private void assertOutputMatches(String string) {
String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
@@ -99,6 +101,14 @@ public class TestDFSAdminWithHA {
System.err.flush();
System.setOut(originOut);
System.setErr(originErr);
+ if (admin != null) {
+ admin.close();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ out.reset();
+ err.reset();
}
@Test(timeout = 30000)
@@ -108,25 +118,25 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Leave safemode
exitCode = admin.run(new String[] {"-safemode", "leave"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
// Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -136,12 +146,12 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-saveNamespace"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -151,17 +161,17 @@ public class TestDFSAdminWithHA {
assertEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage is set to false for.*";
// Default is false
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to true for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to false for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -170,7 +180,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshNodes"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -179,7 +189,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10 for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -189,7 +199,7 @@ public class TestDFSAdminWithHA {
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -198,7 +208,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -207,7 +217,7 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -217,7 +227,7 @@ public class TestDFSAdminWithHA {
new String[] {"-refreshSuperUserGroupsConfiguration"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
@@ -226,6 +236,6 @@ public class TestDFSAdminWithHA {
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue successful for.*";
- assertOutputMatches(message + "\n" + message + "\n");
+ assertOutputMatches(message + newLine + message + newLine);
}
}
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 61f9b78f99d..7da3de0f8ff 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -17,6 +17,9 @@ Trunk (Unreleased)
MAPREDUCE-5232. Add a configuration to be able to log classpath and other
system properties on mapreduce JVMs startup. (Sangjin Lee via vinodkv)
+ MAPREDUCE-5910. Make MR AM resync with RM in case of work-preserving
+ RM-restart. (Rohith via jianhe)
+
IMPROVEMENTS
MAPREDUCE-3481. [Gridmix] Improve Gridmix STRESS mode. (amarrk)
@@ -153,6 +156,9 @@ Release 2.6.0 - UNRELEASED
IMPROVEMENTS
+ MAPREDUCE-5971. Move the default options for distcp -p to
+ DistCpOptionSwitch. (clamb via wang)
+
OPTIMIZATIONS
BUG FIXES
@@ -237,6 +243,9 @@ Release 2.5.0 - UNRELEASED
MAPREDUCE-5844. Add a configurable delay to reducer-preemption.
(Maysam Yabandeh via kasha)
+ MAPREDUCE-5790. Made it easier to enable hprof profile options by default.
+ (Gera Shegalov via vinodkv)
+
OPTIMIZATIONS
BUG FIXES
@@ -304,6 +313,9 @@ Release 2.5.0 - UNRELEASED
resource configuration for deciding uber-mode on map-only jobs. (Siqi Li via
vinodkv)
+ MAPREDUCE-5952. LocalContainerLauncher#renameMapOutputForReduce incorrectly
+ assumes a single dir for mapOutIndex. (Gera Shegalov via kasha)
+
Release 2.4.1 - 2014-06-23
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
index 9ef3d454d1e..7585c9a81e8 100644
--- a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
+++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
@@ -133,6 +133,7 @@ case $startStop in
else
echo no $command to stop
fi
+ rm -f $pid
else
echo no $command to stop
fi
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index 6425144b6b5..c7898ed966f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -30,6 +30,7 @@ import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSError;
@@ -437,43 +438,6 @@ public class LocalContainerLauncher extends AbstractService implements
}
}
- /**
- * Within the _local_ filesystem (not HDFS), all activity takes place within
- * a single subdir (${local.dir}/usercache/$user/appcache/$appId/$contId/),
- * and all sub-MapTasks create the same filename ("file.out"). Rename that
- * to something unique (e.g., "map_0.out") to avoid collisions.
- *
- * Longer-term, we'll modify [something] to use TaskAttemptID-based
- * filenames instead of "file.out". (All of this is entirely internal,
- * so there are no particular compatibility issues.)
- */
- private MapOutputFile renameMapOutputForReduce(JobConf conf,
- TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
- FileSystem localFs = FileSystem.getLocal(conf);
- // move map output to reduce input
- Path mapOut = subMapOutputFile.getOutputFile();
- FileStatus mStatus = localFs.getFileStatus(mapOut);
- Path reduceIn = subMapOutputFile.getInputFileForWrite(
- TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
- Path mapOutIndex = new Path(mapOut.toString() + ".index");
- Path reduceInIndex = new Path(reduceIn.toString() + ".index");
- if (LOG.isDebugEnabled()) {
- LOG.debug("Renaming map output file for task attempt "
- + mapId.toString() + " from original location " + mapOut.toString()
- + " to destination " + reduceIn.toString());
- }
- if (!localFs.mkdirs(reduceIn.getParent())) {
- throw new IOException("Mkdirs failed to create "
- + reduceIn.getParent().toString());
- }
- if (!localFs.rename(mapOut, reduceIn))
- throw new IOException("Couldn't rename " + mapOut);
- if (!localFs.rename(mapOutIndex, reduceInIndex))
- throw new IOException("Couldn't rename " + mapOutIndex);
-
- return new RenamedMapOutputFile(reduceIn);
- }
-
/**
* Also within the local filesystem, we need to restore the initial state
* of the directory as much as possible. Compare current contents against
@@ -506,7 +470,46 @@ public class LocalContainerLauncher extends AbstractService implements
}
} // end EventHandler
-
+
+ /**
+ * Within the _local_ filesystem (not HDFS), all activity takes place within
+ * a subdir inside one of the LOCAL_DIRS
+ * (${local.dir}/usercache/$user/appcache/$appId/$contId/),
+ * and all sub-MapTasks create the same filename ("file.out"). Rename that
+ * to something unique (e.g., "map_0.out") to avoid possible collisions.
+ *
+ * Longer-term, we'll modify [something] to use TaskAttemptID-based
+ * filenames instead of "file.out". (All of this is entirely internal,
+ * so there are no particular compatibility issues.)
+ */
+ @VisibleForTesting
+ protected static MapOutputFile renameMapOutputForReduce(JobConf conf,
+ TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
+ FileSystem localFs = FileSystem.getLocal(conf);
+ // move map output to reduce input
+ Path mapOut = subMapOutputFile.getOutputFile();
+ FileStatus mStatus = localFs.getFileStatus(mapOut);
+ Path reduceIn = subMapOutputFile.getInputFileForWrite(
+ TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
+ Path mapOutIndex = subMapOutputFile.getOutputIndexFile();
+ Path reduceInIndex = new Path(reduceIn.toString() + ".index");
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Renaming map output file for task attempt "
+ + mapId.toString() + " from original location " + mapOut.toString()
+ + " to destination " + reduceIn.toString());
+ }
+ if (!localFs.mkdirs(reduceIn.getParent())) {
+ throw new IOException("Mkdirs failed to create "
+ + reduceIn.getParent().toString());
+ }
+ if (!localFs.rename(mapOut, reduceIn))
+ throw new IOException("Couldn't rename " + mapOut);
+ if (!localFs.rename(mapOutIndex, reduceInIndex))
+ throw new IOException("Couldn't rename " + mapOutIndex);
+
+ return new RenamedMapOutputFile(reduceIn);
+ }
+
private static class RenamedMapOutputFile extends MapOutputFile {
private Path path;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
index 426dc212f52..110e9c850d7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
@@ -64,6 +64,7 @@ public class LocalContainerAllocator extends RMCommunicator
private int nmPort;
private int nmHttpPort;
private ContainerId containerId;
+ protected int lastResponseID;
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
@@ -119,6 +120,11 @@ public class LocalContainerAllocator extends RMCommunicator
if (allocateResponse.getAMCommand() != null) {
switch(allocateResponse.getAMCommand()) {
case AM_RESYNC:
+ LOG.info("ApplicationMaster is out of sync with ResourceManager,"
+ + " hence resyncing.");
+ this.lastResponseID = 0;
+ register();
+ break;
case AM_SHUTDOWN:
LOG.info("Event from RM: shutting down Application Master");
// This can happen if the RM has been restarted. If it is in that state,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index e435009c6a7..4b32c045238 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -216,20 +217,27 @@ public abstract class RMCommunicator extends AbstractService
FinishApplicationMasterRequest request =
FinishApplicationMasterRequest.newInstance(finishState,
sb.toString(), historyUrl);
- while (true) {
- FinishApplicationMasterResponse response =
- scheduler.finishApplicationMaster(request);
- if (response.getIsUnregistered()) {
- // When excepting ClientService, other services are already stopped,
- // it is safe to let clients know the final states. ClientService
- // should wait for some time so clients have enough time to know the
- // final states.
- RunningAppContext raContext = (RunningAppContext) context;
- raContext.markSuccessfulUnregistration();
- break;
+ try {
+ while (true) {
+ FinishApplicationMasterResponse response =
+ scheduler.finishApplicationMaster(request);
+ if (response.getIsUnregistered()) {
+ // When excepting ClientService, other services are already stopped,
+ // it is safe to let clients know the final states. ClientService
+ // should wait for some time so clients have enough time to know the
+ // final states.
+ RunningAppContext raContext = (RunningAppContext) context;
+ raContext.markSuccessfulUnregistration();
+ break;
+ }
+ LOG.info("Waiting for application to be successfully unregistered.");
+ Thread.sleep(rmPollInterval);
}
- LOG.info("Waiting for application to be successfully unregistered.");
- Thread.sleep(rmPollInterval);
+ } catch (ApplicationMasterNotRegisteredException e) {
+ // RM might have restarted or failed over and so lost the fact that AM had
+ // registered before.
+ register();
+ doUnregistration();
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 64872cfe671..307cdfe759c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -389,6 +389,7 @@ public class RMContainerAllocator extends RMContainerRequestor
removed = true;
assignedRequests.remove(aId);
containersReleased++;
+ pendingRelease.add(containerId);
release(containerId);
}
}
@@ -641,6 +642,15 @@ public class RMContainerAllocator extends RMContainerRequestor
if (response.getAMCommand() != null) {
switch(response.getAMCommand()) {
case AM_RESYNC:
+ LOG.info("ApplicationMaster is out of sync with ResourceManager,"
+ + " hence resyncing.");
+ lastResponseID = 0;
+
+ // Registering to allow RM to discover an active AM for this
+ // application
+ register();
+ addOutstandingRequestOnResync();
+ break;
case AM_SHUTDOWN:
// This can happen if the RM has been restarted. If it is in that state,
// this application must clean itself up.
@@ -700,6 +710,7 @@ public class RMContainerAllocator extends RMContainerRequestor
LOG.error("Container complete event for unknown container id "
+ cont.getContainerId());
} else {
+ pendingRelease.remove(cont.getContainerId());
assignedRequests.remove(attemptID);
// send the container completed event to Task attempt
@@ -991,6 +1002,7 @@ public class RMContainerAllocator extends RMContainerRequestor
private void containerNotAssigned(Container allocated) {
containersReleased++;
+ pendingRelease.add(allocated.getId());
release(allocated.getId());
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
index 18242119451..943c0af0d95 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.AMCommand;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -58,7 +59,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class);
- private int lastResponseID;
+ protected int lastResponseID;
private Resource availableResources;
private final RecordFactory recordFactory =
@@ -77,8 +78,11 @@ public abstract class RMContainerRequestor extends RMCommunicator {
// numContainers dont end up as duplicates
private final Set ask = new TreeSet(
new org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator());
- private final Set release = new TreeSet();
-
+ private final Set release = new TreeSet();
+ // pendingRelease holds history or release requests.request is removed only if
+ // RM sends completedContainer.
+ // How it different from release? --> release is for per allocate() request.
+ protected Set pendingRelease = new TreeSet();
private boolean nodeBlacklistingEnabled;
private int blacklistDisablePercent;
private AtomicBoolean ignoreBlacklisting = new AtomicBoolean(false);
@@ -186,6 +190,10 @@ public abstract class RMContainerRequestor extends RMCommunicator {
} catch (YarnException e) {
throw new IOException(e);
}
+
+ if (isResyncCommand(allocateResponse)) {
+ return allocateResponse;
+ }
lastResponseID = allocateResponse.getResponseId();
availableResources = allocateResponse.getAvailableResources();
lastClusterNmCount = clusterNmCount;
@@ -214,6 +222,28 @@ public abstract class RMContainerRequestor extends RMCommunicator {
return allocateResponse;
}
+ protected boolean isResyncCommand(AllocateResponse allocateResponse) {
+ return allocateResponse.getAMCommand() != null
+ && allocateResponse.getAMCommand() == AMCommand.AM_RESYNC;
+ }
+
+ protected void addOutstandingRequestOnResync() {
+ for (Map> rr : remoteRequestsTable
+ .values()) {
+ for (Map capabalities : rr.values()) {
+ for (ResourceRequest request : capabalities.values()) {
+ addResourceRequestToAsk(request);
+ }
+ }
+ }
+ if (!ignoreBlacklisting.get()) {
+ blacklistAdditions.addAll(blacklistedNodes);
+ }
+ if (!pendingRelease.isEmpty()) {
+ release.addAll(pendingRelease);
+ }
+ }
+
// May be incorrect if there's multiple NodeManagers running on a single host.
// knownNodeCount is based on node managers, not hosts. blacklisting is
// currently based on hosts.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java
index 9a0662ee2c4..28a891850e4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java
@@ -18,17 +18,26 @@
package org.apache.hadoop.mapred;
+import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+import java.io.File;
+import java.io.IOException;
+import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -46,6 +55,9 @@ import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -53,6 +65,36 @@ import org.mockito.stubbing.Answer;
public class TestLocalContainerLauncher {
private static final Log LOG =
LogFactory.getLog(TestLocalContainerLauncher.class);
+ private static File testWorkDir;
+ private static final String[] localDirs = new String[2];
+
+ private static void delete(File dir) throws IOException {
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.getLocal(conf);
+ Path p = fs.makeQualified(new Path(dir.getAbsolutePath()));
+ fs.delete(p, true);
+ }
+
+ @BeforeClass
+ public static void setupTestDirs() throws IOException {
+ testWorkDir = new File("target",
+ TestLocalContainerLauncher.class.getCanonicalName());
+ testWorkDir.delete();
+ testWorkDir.mkdirs();
+ testWorkDir = testWorkDir.getAbsoluteFile();
+ for (int i = 0; i < localDirs.length; i++) {
+ final File dir = new File(testWorkDir, "local-" + i);
+ dir.mkdirs();
+ localDirs[i] = dir.toString();
+ }
+ }
+
+ @AfterClass
+ public static void cleanupTestDirs() throws IOException {
+ if (testWorkDir != null) {
+ delete(testWorkDir);
+ }
+ }
@SuppressWarnings("rawtypes")
@Test(timeout=10000)
@@ -141,4 +183,35 @@ public class TestLocalContainerLauncher {
when(container.getNodeId()).thenReturn(nodeId);
return container;
}
+
+
+ @Test
+ public void testRenameMapOutputForReduce() throws Exception {
+ final JobConf conf = new JobConf();
+
+ final MROutputFiles mrOutputFiles = new MROutputFiles();
+ mrOutputFiles.setConf(conf);
+
+ // make sure both dirs are distinct
+ //
+ conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString());
+ final Path mapOut = mrOutputFiles.getOutputFileForWrite(1);
+ conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString());
+ final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1);
+ Assert.assertNotEquals("Paths must be different!",
+ mapOut.getParent(), mapOutIdx.getParent());
+
+ // make both dirs part of LOCAL_DIR
+ conf.setStrings(MRConfig.LOCAL_DIR, localDirs);
+
+ final FileContext lfc = FileContext.getLocalFSFileContext(conf);
+ lfc.create(mapOut, EnumSet.of(CREATE)).close();
+ lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();
+
+ final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
+ final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+ final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);
+
+ LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 74edce22777..e554281f37e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
@@ -87,6 +88,7 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.event.Event;
@@ -95,9 +97,13 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
+import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -618,6 +624,10 @@ public class TestRMContainerAllocator {
super(conf);
}
+ public MyResourceManager(Configuration conf, RMStateStore store) {
+ super(conf, store);
+ }
+
@Override
public void serviceStart() throws Exception {
super.serviceStart();
@@ -1426,6 +1436,13 @@ public class TestRMContainerAllocator {
rm.getMyFifoScheduler().lastBlacklistRemovals.size());
}
+ private static void assertAsksAndReleases(int expectedAsk,
+ int expectedRelease, MyResourceManager rm) {
+ Assert.assertEquals(expectedAsk, rm.getMyFifoScheduler().lastAsk.size());
+ Assert.assertEquals(expectedRelease,
+ rm.getMyFifoScheduler().lastRelease.size());
+ }
+
private static class MyFifoScheduler extends FifoScheduler {
public MyFifoScheduler(RMContext rmContext) {
@@ -1440,6 +1457,7 @@ public class TestRMContainerAllocator {
}
List lastAsk = null;
+ List lastRelease = null;
List lastBlacklistAdditions;
List lastBlacklistRemovals;
@@ -1458,6 +1476,7 @@ public class TestRMContainerAllocator {
askCopy.add(reqCopy);
}
lastAsk = ask;
+ lastRelease = release;
lastBlacklistAdditions = blacklistAdditions;
lastBlacklistRemovals = blacklistRemovals;
return super.allocate(
@@ -1505,6 +1524,20 @@ public class TestRMContainerAllocator {
return new ContainerFailedEvent(attemptId, host);
}
+ private ContainerAllocatorEvent createDeallocateEvent(JobId jobId,
+ int taskAttemptId, boolean reduce) {
+ TaskId taskId;
+ if (reduce) {
+ taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+ } else {
+ taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+ }
+ TaskAttemptId attemptId =
+ MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId);
+ return new ContainerAllocatorEvent(attemptId,
+ ContainerAllocator.EventType.CONTAINER_DEALLOCATE);
+ }
+
private void checkAssignments(ContainerRequestEvent[] requests,
List assignments,
boolean checkHostMatch) {
@@ -1557,6 +1590,7 @@ public class TestRMContainerAllocator {
= new ArrayList();
private MyResourceManager rm;
private boolean isUnregistered = false;
+ private AllocateResponse allocateResponse;
private static AppContext createAppContext(
ApplicationAttemptId appAttemptId, Job job) {
AppContext context = mock(AppContext.class);
@@ -1668,6 +1702,10 @@ public class TestRMContainerAllocator {
super.handleEvent(f);
}
+ public void sendDeallocate(ContainerAllocatorEvent f) {
+ super.handleEvent(f);
+ }
+
// API to be used by tests
public List schedule()
throws Exception {
@@ -1713,6 +1751,20 @@ public class TestRMContainerAllocator {
public boolean isUnregistered() {
return isUnregistered;
}
+
+ public void updateSchedulerProxy(MyResourceManager rm) {
+ scheduler = rm.getApplicationMasterService();
+ }
+
+ @Override
+ protected AllocateResponse makeRemoteRequest() throws IOException {
+ allocateResponse = super.makeRemoteRequest();
+ return allocateResponse;
+ }
+
+ public boolean isResyncCommand() {
+ return super.isResyncCommand(allocateResponse);
+ }
}
@Test
@@ -2022,6 +2074,198 @@ public class TestRMContainerAllocator {
Assert.assertTrue(allocator.isUnregistered());
}
+ // Step-1 : AM send allocate request for 2 ContainerRequests and 1
+ // blackListeNode
+ // Step-2 : 2 containers are allocated by RM.
+ // Step-3 : AM Send 1 containerRequest(event3) and 1 releaseRequests to
+ // RM
+ // Step-4 : On RM restart, AM(does not know RM is restarted) sends
+ // additional containerRequest(event4) and blacklisted nodes.
+ // Intern RM send resync command
+ // Step-5 : On Resync,AM sends all outstanding
+ // asks,release,blacklistAaddition
+ // and another containerRequest(event5)
+ // Step-6 : RM allocates containers i.e event3,event4 and cRequest5
+ @Test
+ public void testRMContainerAllocatorResendsRequestsOnRMRestart()
+ throws Exception {
+
+ Configuration conf = new Configuration();
+ conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
+ conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+ conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+ YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+ conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
+
+ conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
+ conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
+ conf.setInt(
+ MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
+
+ MemoryRMStateStore memStore = new MemoryRMStateStore();
+ memStore.init(conf);
+
+ MyResourceManager rm1 = new MyResourceManager(conf, memStore);
+ rm1.start();
+ DrainDispatcher dispatcher =
+ (DrainDispatcher) rm1.getRMContext().getDispatcher();
+
+ // Submit the application
+ RMApp app = rm1.submitApp(1024);
+ dispatcher.await();
+
+ MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService());
+ nm1.registerNode();
+ nm1.nodeHeartbeat(true); // Node heartbeat
+ dispatcher.await();
+
+ ApplicationAttemptId appAttemptId =
+ app.getCurrentAppAttempt().getAppAttemptId();
+ rm1.sendAMLaunched(appAttemptId);
+ dispatcher.await();
+
+ JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+ Job mockJob = mock(Job.class);
+ when(mockJob.getReport()).thenReturn(
+ MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+ 0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
+ MyContainerAllocator allocator =
+ new MyContainerAllocator(rm1, conf, appAttemptId, mockJob);
+
+ // Step-1 : AM send allocate request for 2 ContainerRequests and 1
+ // blackListeNode
+ // create the container request
+ // send MAP request
+ ContainerRequestEvent event1 =
+ createReq(jobId, 1, 1024, new String[] { "h1" });
+ allocator.sendRequest(event1);
+
+ ContainerRequestEvent event2 =
+ createReq(jobId, 2, 2048, new String[] { "h1", "h2" });
+ allocator.sendRequest(event2);
+
+ // Send events to blacklist h2
+ ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h2", false);
+ allocator.sendFailure(f1);
+
+ // send allocate request and 1 blacklisted nodes
+ List assignedContainers =
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 0", 0,
+ assignedContainers.size());
+ // Why ask is 3, not 4? --> ask from blacklisted node h2 is removed
+ assertAsksAndReleases(3, 0, rm1);
+ assertBlacklistAdditionsAndRemovals(1, 0, rm1);
+
+ nm1.nodeHeartbeat(true); // Node heartbeat
+ dispatcher.await();
+
+ // Step-2 : 2 containers are allocated by RM.
+ assignedContainers = allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 2", 2,
+ assignedContainers.size());
+ assertAsksAndReleases(0, 0, rm1);
+ assertBlacklistAdditionsAndRemovals(0, 0, rm1);
+
+ assignedContainers = allocator.schedule();
+ Assert.assertEquals("No of assignments must be 0", 0,
+ assignedContainers.size());
+ assertAsksAndReleases(3, 0, rm1);
+ assertBlacklistAdditionsAndRemovals(0, 0, rm1);
+
+ // Step-3 : AM Send 1 containerRequest(event3) and 1 releaseRequests to
+ // RM
+ // send container request
+ ContainerRequestEvent event3 =
+ createReq(jobId, 3, 1000, new String[] { "h1" });
+ allocator.sendRequest(event3);
+
+ // send deallocate request
+ ContainerAllocatorEvent deallocate1 =
+ createDeallocateEvent(jobId, 1, false);
+ allocator.sendDeallocate(deallocate1);
+
+ assignedContainers = allocator.schedule();
+ Assert.assertEquals("No of assignments must be 0", 0,
+ assignedContainers.size());
+ assertAsksAndReleases(3, 1, rm1);
+ assertBlacklistAdditionsAndRemovals(0, 0, rm1);
+
+ // Phase-2 start 2nd RM is up
+ MyResourceManager rm2 = new MyResourceManager(conf, memStore);
+ rm2.start();
+ nm1.setResourceTrackerService(rm2.getResourceTrackerService());
+ allocator.updateSchedulerProxy(rm2);
+ dispatcher = (DrainDispatcher) rm2.getRMContext().getDispatcher();
+
+ // NM should be rebooted on heartbeat, even first heartbeat for nm2
+ NodeHeartbeatResponse hbResponse = nm1.nodeHeartbeat(true);
+ Assert.assertEquals(NodeAction.RESYNC, hbResponse.getNodeAction());
+
+ // new NM to represent NM re-register
+ nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService());
+ nm1.registerNode();
+ nm1.nodeHeartbeat(true);
+ dispatcher.await();
+
+ // Step-4 : On RM restart, AM(does not know RM is restarted) sends
+ // additional containerRequest(event4) and blacklisted nodes.
+ // Intern RM send resync command
+
+ // send deallocate request, release=1
+ ContainerAllocatorEvent deallocate2 =
+ createDeallocateEvent(jobId, 2, false);
+ allocator.sendDeallocate(deallocate2);
+
+ // Send events to blacklist nodes h3
+ ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h3", false);
+ allocator.sendFailure(f2);
+
+ ContainerRequestEvent event4 =
+ createReq(jobId, 4, 2000, new String[] { "h1", "h2" });
+ allocator.sendRequest(event4);
+
+ // send allocate request to 2nd RM and get resync command
+ allocator.schedule();
+ dispatcher.await();
+ Assert.assertTrue("Last allocate response is not RESYNC",
+ allocator.isResyncCommand());
+
+ // Step-5 : On Resync,AM sends all outstanding
+ // asks,release,blacklistAaddition
+ // and another containerRequest(event5)
+ ContainerRequestEvent event5 =
+ createReq(jobId, 5, 3000, new String[] { "h1", "h2", "h3" });
+ allocator.sendRequest(event5);
+
+ // send all outstanding request again.
+ assignedContainers = allocator.schedule();
+ dispatcher.await();
+ assertAsksAndReleases(3, 2, rm2);
+ assertBlacklistAdditionsAndRemovals(2, 0, rm2);
+
+ nm1.nodeHeartbeat(true);
+ dispatcher.await();
+
+ // Step-6 : RM allocates containers i.e event3,event4 and cRequest5
+ assignedContainers = allocator.schedule();
+ dispatcher.await();
+
+ Assert.assertEquals("Number of container should be 3", 3,
+ assignedContainers.size());
+
+ for (TaskAttemptContainerAssignedEvent assig : assignedContainers) {
+ Assert.assertTrue("Assigned count not correct",
+ "h1".equals(assig.getContainer().getNodeId().getHost()));
+ }
+
+ rm1.stop();
+ rm2.stop();
+
+ }
+
public static void main(String[] args) throws Exception {
TestRMContainerAllocator t = new TestRMContainerAllocator();
t.testSimple();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 508b0331024..8e7e76c3442 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -671,7 +671,7 @@
mapreduce.task.profile.params
-
+ -agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s
JVM profiler parameters used to profile map and reduce task
attempts. This string may contain a single format specifier %s that will
be replaced by the path to profile.out in the task attempt log directory.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
index 73f039d6366..b69f450ed35 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
@@ -29,11 +29,7 @@ public class TestJobConf {
@Test
public void testProfileParamsDefaults() {
JobConf configuration = new JobConf();
-
- Assert.assertNull(configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
-
String result = configuration.getProfileParams();
-
Assert.assertNotNull(result);
Assert.assertTrue(result.contains("file=%s"));
Assert.assertTrue(result.startsWith("-agentlib:hprof"));
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java
index bf7f85e0756..e91f5c98071 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithProfiler.java
@@ -24,6 +24,7 @@ import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.junit.AfterClass;
import org.junit.Assert;
import org.apache.commons.logging.Log;
@@ -39,8 +40,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
public class TestMRJobsWithProfiler {
@@ -51,6 +51,8 @@ public class TestMRJobsWithProfiler {
private static final EnumSet TERMINAL_RM_APP_STATES =
EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED);
+ private static final int PROFILED_TASK_ID = 1;
+
private static MiniMRYarnCluster mrCluster;
private static final Configuration CONF = new Configuration();
@@ -69,8 +71,8 @@ public class TestMRJobsWithProfiler {
private static final Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
- @Before
- public void setup() throws InterruptedException, IOException {
+ @BeforeClass
+ public static void setup() throws InterruptedException, IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
@@ -79,7 +81,7 @@ public class TestMRJobsWithProfiler {
}
if (mrCluster == null) {
- mrCluster = new MiniMRYarnCluster(getClass().getName());
+ mrCluster = new MiniMRYarnCluster(TestMRJobsWithProfiler.class.getName());
mrCluster.init(CONF);
mrCluster.start();
}
@@ -90,8 +92,8 @@ public class TestMRJobsWithProfiler {
localFs.setPermission(APP_JAR, new FsPermission("700"));
}
- @After
- public void tearDown() {
+ @AfterClass
+ public static void tearDown() {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
@@ -103,10 +105,19 @@ public class TestMRJobsWithProfiler {
}
}
+ @Test (timeout = 150000)
+ public void testDefaultProfiler() throws Exception {
+ LOG.info("Starting testDefaultProfiler");
+ testProfilerInternal(true);
+ }
@Test (timeout = 150000)
- public void testProfiler() throws IOException, InterruptedException,
- ClassNotFoundException {
+ public void testDifferentProfilers() throws Exception {
+ LOG.info("Starting testDefaultProfiler");
+ testProfilerInternal(false);
+ }
+
+ private void testProfilerInternal(boolean useDefault) throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test.");
@@ -117,18 +128,19 @@ public class TestMRJobsWithProfiler {
final JobConf sleepConf = new JobConf(mrCluster.getConfig());
sleepConf.setProfileEnabled(true);
- // profile map split 1
- sleepConf.setProfileTaskRange(true, "1");
- // profile reduce of map output partitions 1
- sleepConf.setProfileTaskRange(false, "1");
+ sleepConf.setProfileTaskRange(true, String.valueOf(PROFILED_TASK_ID));
+ sleepConf.setProfileTaskRange(false, String.valueOf(PROFILED_TASK_ID));
- // use hprof for map to profile.out
- sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS,
- "-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n,"
- + "file=%s");
+ if (!useDefault) {
+ // use hprof for map to profile.out
+ sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS,
+ "-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n,"
+ + "file=%s");
+
+ // use Xprof for reduce to stdout
+ sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof");
+ }
- // use Xprof for reduce to stdout
- sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof");
sleepJob.setConf(sleepConf);
// 2-map-2-reduce SleepJob
@@ -205,8 +217,8 @@ public class TestMRJobsWithProfiler {
TaskLog.LogName.PROFILE.toString());
final Path stdoutPath = new Path(dirEntry.getValue(),
TaskLog.LogName.STDOUT.toString());
- if (tid.getTaskType() == TaskType.MAP) {
- if (tid.getTaskID().getId() == 1) {
+ if (useDefault || tid.getTaskType() == TaskType.MAP) {
+ if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
// verify profile.out
final BufferedReader br = new BufferedReader(new InputStreamReader(
localFs.open(profilePath)));
@@ -222,7 +234,8 @@ public class TestMRJobsWithProfiler {
} else {
Assert.assertFalse("hprof file should not exist",
localFs.exists(profilePath));
- if (tid.getTaskID().getId() == 1) {
+ if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
+ // reducer is profiled with Xprof
final BufferedReader br = new BufferedReader(new InputStreamReader(
localFs.open(stdoutPath)));
boolean flatProfFound = false;
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index f9d7377dfae..577711f4ce0 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -373,6 +373,8 @@ public class NativeAzureFileSystem extends FileSystem {
private Path workingDir;
private long blockSize = MAX_AZURE_BLOCK_SIZE;
private AzureFileSystemInstrumentation instrumentation;
+ private String metricsSourceName;
+ private boolean isClosed = false;
private static boolean suppressRetryPolicy = false;
// A counter to create unique (within-process) names for my metrics sources.
private static AtomicInteger metricsSourceNameCounter = new AtomicInteger();
@@ -482,11 +484,10 @@ public class NativeAzureFileSystem extends FileSystem {
// Make sure the metrics system is available before interacting with Azure
AzureFileSystemMetricsSystem.fileSystemStarted();
- String sourceName = newMetricsSourceName(),
- sourceDesc = "Azure Storage Volume File System metrics";
- instrumentation = DefaultMetricsSystem.instance().register(sourceName,
- sourceDesc, new AzureFileSystemInstrumentation(conf));
- AzureFileSystemMetricsSystem.registerSource(sourceName, sourceDesc,
+ metricsSourceName = newMetricsSourceName();
+ String sourceDesc = "Azure Storage Volume File System metrics";
+ instrumentation = new AzureFileSystemInstrumentation(conf);
+ AzureFileSystemMetricsSystem.registerSource(metricsSourceName, sourceDesc,
instrumentation);
store.initialize(uri, conf, instrumentation);
@@ -502,7 +503,6 @@ public class NativeAzureFileSystem extends FileSystem {
LOG.debug(" blockSize = "
+ conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME, MAX_AZURE_BLOCK_SIZE));
}
-
}
private NativeFileSystemStore createDefaultStore(Configuration conf) {
@@ -1337,7 +1337,11 @@ public class NativeAzureFileSystem extends FileSystem {
}
@Override
- public void close() throws IOException {
+ public synchronized void close() throws IOException {
+ if (isClosed) {
+ return;
+ }
+
// Call the base close() to close any resources there.
super.close();
// Close the store
@@ -1349,12 +1353,14 @@ public class NativeAzureFileSystem extends FileSystem {
long startTime = System.currentTimeMillis();
+ AzureFileSystemMetricsSystem.unregisterSource(metricsSourceName);
AzureFileSystemMetricsSystem.fileSystemClosed();
if (LOG.isDebugEnabled()) {
LOG.debug("Submitting metrics when file system closed took "
+ (System.currentTimeMillis() - startTime) + " ms.");
}
+ isClosed = true;
}
/**
@@ -1498,6 +1504,13 @@ public class NativeAzureFileSystem extends FileSystem {
handleFilesWithDanglingTempData(root, new DanglingFileDeleter());
}
+ @Override
+ protected void finalize() throws Throwable {
+ LOG.debug("finalize() called.");
+ close();
+ super.finalize();
+ }
+
/**
* Encode the key with a random prefix for load balancing in Azure storage.
* Upload data to a random temporary file then do storage side renaming to
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemMetricsSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemMetricsSystem.java
index a5f29c1f33d..322795ab827 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemMetricsSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/AzureFileSystemMetricsSystem.java
@@ -44,21 +44,26 @@ public final class AzureFileSystemMetricsSystem {
}
public static synchronized void fileSystemClosed() {
- if (instance != null) {
- instance.publishMetricsNow();
- }
if (numFileSystems == 1) {
+ instance.publishMetricsNow();
instance.stop();
instance.shutdown();
instance = null;
}
numFileSystems--;
}
-
+
public static void registerSource(String name, String desc,
MetricsSource source) {
- // Register the source with the name appended with -WasbSystem
- // so that the name is globally unique.
- instance.register(name + "-WasbSystem", desc, source);
+ //caller has to use unique name to register source
+ instance.register(name, desc, source);
+ }
+
+ public static synchronized void unregisterSource(String name) {
+ if (instance != null) {
+ //publish metrics before unregister a metrics source
+ instance.publishMetricsNow();
+ instance.unregisterSource(name);
+ }
}
}
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
index 02738e7efb5..80e8e4351a5 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
@@ -324,9 +324,7 @@ public final class AzureBlobStorageTestAccount {
String sourceName = NativeAzureFileSystem.newMetricsSourceName();
String sourceDesc = "Azure Storage Volume File System metrics";
- AzureFileSystemInstrumentation instrumentation =
- DefaultMetricsSystem.instance().register(sourceName,
- sourceDesc, new AzureFileSystemInstrumentation(conf));
+ AzureFileSystemInstrumentation instrumentation = new AzureFileSystemInstrumentation(conf);
AzureFileSystemMetricsSystem.registerSource(
sourceName, sourceDesc, instrumentation);
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
index bc7e344540a..e731b21d506 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java
@@ -516,6 +516,13 @@ public abstract class NativeAzureFileSystemBaseTest {
assertNotNull(status);
}
+ @Test
+ public void testCloseFileSystemTwice() throws Exception {
+ //make sure close() can be called multiple times without doing any harm
+ fs.close();
+ fs.close();
+ }
+
private boolean testModifiedTime(Path testPath, long time) throws Exception {
FileStatus fileStatus = fs.getFileStatus(testPath);
final long errorMargin = modifiedTimeErrorMargin;
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
index 2f2eb7c838c..e77b6e183f0 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
@@ -162,6 +162,7 @@ public enum DistCpOptionSwitch {
BANDWIDTH(DistCpConstants.CONF_LABEL_BANDWIDTH_MB,
new Option("bandwidth", true, "Specify bandwidth per map in MB"));
+ static final String PRESERVE_STATUS_DEFAULT = "-prbugpc";
private final String confLabel;
private final Option option;
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
index 09e85505227..4bbc30dea29 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
@@ -50,7 +50,7 @@ public class OptionsParser {
protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) {
for (int index = 0; index < arguments.length; index++) {
if (arguments[index].equals("-" + DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
- arguments[index] = "-prbugpc";
+ arguments[index] = DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT;
}
}
return super.flatten(options, arguments, stopAtNonOption);
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bfe257558f0..4665703b286 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -43,6 +43,12 @@ Release 2.6.0 - UNRELEASED
YARN-2274. FairScheduler: Add debug information about cluster capacity,
availability and reservations. (kasha)
+ YARN-2228. Augmented TimelineServer to load pseudo authentication filter when
+ authentication = simple. (Zhijie Shen via vinodkv)
+
+ YARN-1341. Recover NMTokens upon nodemanager restart. (Jason Lowe via
+ junping_du)
+
OPTIMIZATIONS
BUG FIXES
@@ -53,6 +59,16 @@ Release 2.6.0 - UNRELEASED
YARN-2088. Fixed a bug in GetApplicationsRequestPBImpl#mergeLocalToBuilder.
(Binglin Chang via jianhe)
+ YARN-2260. Fixed ResourceManager's RMNode to correctly remember containers
+ when nodes resync during work-preserving RM restart. (Jian He via vinodkv)
+
+ YARN-2264. Fixed a race condition in DrainDispatcher which may cause random
+ test failures. (Li Lu via jianhe)
+
+ YARN-2219. Changed ResourceManager to avoid AMs and NMs getting exceptions
+ after RM recovery but before scheduler learns about apps and app-attempts.
+ (Jian He via vinodkv)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -89,6 +105,9 @@ Release 2.5.0 - UNRELEASED
YARN-1713. Added get-new-app and submit-app functionality to RM web services.
(Varun Vasudev via vinodkv)
+ YARN-2233. Implemented ResourceManager web-services to create, renew and
+ cancel delegation tokens. (Varun Vasudev via vinodkv)
+
IMPROVEMENTS
YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via
@@ -253,6 +272,9 @@ Release 2.5.0 - UNRELEASED
YARN-2241. ZKRMStateStore: On startup, show nicer messages if znodes already
exist. (Robert Kanter via kasha)
+ YARN-1408 Preemption caused Invalid State Event: ACQUIRED at KILLED and
+ caused a task timeout for 30mins. (Sunil G via mayank)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemon.sh b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemon.sh
index 527ae42cfd3..fbfa71d80df 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemon.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn-daemon.sh
@@ -145,6 +145,7 @@ case $startStop in
else
echo no $command to stop
fi
+ rm -f $pid
else
echo no $command to stop
fi
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index 5ffe17a24a6..daf25eafeb7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -72,6 +72,7 @@ public class TimelineClientImpl extends TimelineClient {
private static final Log LOG = LogFactory.getLog(TimelineClientImpl.class);
private static final String RESOURCE_URI_STR = "/ws/v1/timeline/";
+ private static final String URL_PARAM_USER_NAME = "user.name";
private static final Joiner JOINER = Joiner.on("");
private static Options opts;
static {
@@ -84,17 +85,18 @@ public class TimelineClientImpl extends TimelineClient {
private Client client;
private URI resURI;
private boolean isEnabled;
- private TimelineAuthenticatedURLConnectionFactory urlFactory;
+ private KerberosAuthenticatedURLConnectionFactory urlFactory;
public TimelineClientImpl() {
super(TimelineClientImpl.class.getName());
ClientConfig cc = new DefaultClientConfig();
cc.getClasses().add(YarnJacksonJaxbJsonProvider.class);
if (UserGroupInformation.isSecurityEnabled()) {
- urlFactory = new TimelineAuthenticatedURLConnectionFactory();
+ urlFactory = new KerberosAuthenticatedURLConnectionFactory();
client = new Client(new URLConnectionClientHandler(urlFactory), cc);
} else {
- client = Client.create(cc);
+ client = new Client(new URLConnectionClientHandler(
+ new PseudoAuthenticatedURLConnectionFactory()), cc);
}
}
@@ -177,7 +179,23 @@ public class TimelineClientImpl extends TimelineClient {
.post(ClientResponse.class, entities);
}
- private static class TimelineAuthenticatedURLConnectionFactory
+ private static class PseudoAuthenticatedURLConnectionFactory
+ implements HttpURLConnectionFactory {
+
+ @Override
+ public HttpURLConnection getHttpURLConnection(URL url) throws IOException {
+ Map params = new HashMap();
+ params.put(URL_PARAM_USER_NAME,
+ UserGroupInformation.getCurrentUser().getShortUserName());
+ url = TimelineAuthenticator.appendParams(url, params);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("URL with delegation token: " + url);
+ }
+ return (HttpURLConnection) url.openConnection();
+ }
+
+ }
+ private static class KerberosAuthenticatedURLConnectionFactory
implements HttpURLConnectionFactory {
private AuthenticatedURL.Token token;
@@ -185,7 +203,7 @@ public class TimelineClientImpl extends TimelineClient {
private Token dToken;
private Text service;
- public TimelineAuthenticatedURLConnectionFactory() {
+ public KerberosAuthenticatedURLConnectionFactory() {
token = new AuthenticatedURL.Token();
authenticator = new TimelineAuthenticator();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ForbiddenException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ForbiddenException.java
new file mode 100644
index 00000000000..83e0c7d3d70
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ForbiddenException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.webapp;
+
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+
+@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
+public class ForbiddenException extends WebApplicationException {
+
+ private static final long serialVersionUID = 1L;
+
+ public ForbiddenException() {
+ super(Status.FORBIDDEN);
+ }
+
+ public ForbiddenException(java.lang.Throwable cause) {
+ super(cause, Status.FORBIDDEN);
+ }
+
+ public ForbiddenException(String msg) {
+ super(new Exception(msg), Status.FORBIDDEN);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
index 1e53e022dc4..8946e2d9ca1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java
@@ -81,6 +81,8 @@ public class GenericExceptionHandler implements ExceptionMapper {
s = Response.Status.NOT_FOUND;
} else if (e instanceof IOException) {
s = Response.Status.NOT_FOUND;
+ } else if (e instanceof ForbiddenException) {
+ s = Response.Status.FORBIDDEN;
} else if (e instanceof UnsupportedOperationException) {
s = Response.Status.BAD_REQUEST;
} else if (e instanceof IllegalArgumentException) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0c1628e5ca9..8bc49e69769 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1217,6 +1217,24 @@
10
+
+ yarn.timeline-service.http-authentication.type
+ simple
+
+ Defines authentication used for the timeline server HTTP endpoint.
+ Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
+
+
+
+
+ yarn.timeline-service.http-authentication.simple.anonymous.allowed
+ true
+
+ Indicates if anonymous requests are allowed by the timeline server when using
+ 'simple' authentication.
+
+
+
The Kerberos principal for the timeline server.
yarn.timeline-service.principal
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
index e79e7b360ef..803b2bb2b3b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -28,6 +28,7 @@ public class DrainDispatcher extends AsyncDispatcher {
// and similar grotesqueries
private volatile boolean drained = false;
private final BlockingQueue queue;
+ final Object mutex;
public DrainDispatcher() {
this(new LinkedBlockingQueue());
@@ -36,6 +37,7 @@ public class DrainDispatcher extends AsyncDispatcher {
private DrainDispatcher(BlockingQueue eventQueue) {
super(eventQueue);
this.queue = eventQueue;
+ this.mutex = this;
}
/**
@@ -53,8 +55,10 @@ public class DrainDispatcher extends AsyncDispatcher {
@Override
public void run() {
while (!Thread.currentThread().isInterrupted()) {
- // !drained if dispatch queued new events on this dispatcher
- drained = queue.isEmpty();
+ synchronized (mutex) {
+ // !drained if dispatch queued new events on this dispatcher
+ drained = queue.isEmpty();
+ }
Event event;
try {
event = queue.take();
@@ -75,8 +79,10 @@ public class DrainDispatcher extends AsyncDispatcher {
return new EventHandler() {
@Override
public void handle(Event event) {
- drained = false;
- actual.handle(event);
+ synchronized (mutex) {
+ actual.handle(event);
+ drained = false;
+ }
}
};
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index dfd8c29651c..02a3bb12fc0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.util.ExitUtil;
@@ -178,23 +177,20 @@ public class ApplicationHistoryServer extends CompositeService {
protected void startWebApp() {
Configuration conf = getConfig();
- // Play trick to make the customized filter will only be loaded by the
- // timeline server when security is enabled and Kerberos authentication
- // is used.
- if (UserGroupInformation.isSecurityEnabled()
- && conf
- .get(TimelineAuthenticationFilterInitializer.PREFIX + "type", "")
- .equals("kerberos")) {
- String initializers = conf.get("hadoop.http.filter.initializers");
- initializers =
- initializers == null || initializers.length() == 0 ? "" : ","
- + initializers;
- if (!initializers.contains(
- TimelineAuthenticationFilterInitializer.class.getName())) {
- conf.set("hadoop.http.filter.initializers",
- TimelineAuthenticationFilterInitializer.class.getName()
- + initializers);
- }
+ // Always load pseudo authentication filter to parse "user.name" in an URL
+ // to identify a HTTP request's user in insecure mode.
+ // When Kerberos authentication type is set (i.e., secure mode is turned on),
+ // the customized filter will be loaded by the timeline server to do Kerberos
+ // + DT authentication.
+ String initializers = conf.get("hadoop.http.filter.initializers");
+ initializers =
+ initializers == null || initializers.length() == 0 ? "" : ","
+ + initializers;
+ if (!initializers.contains(
+ TimelineAuthenticationFilterInitializer.class.getName())) {
+ conf.set("hadoop.http.filter.initializers",
+ TimelineAuthenticationFilterInitializer.class.getName()
+ + initializers);
}
String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(conf);
LOG.info("Instantiating AHSWebApp at " + bindAddress);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
index 848ad0be243..10e62d21035 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
@@ -51,7 +51,8 @@ public class TimelineACLsManager {
public boolean checkAccess(UserGroupInformation callerUGI,
TimelineEntity entity) throws YarnException, IOException {
if (LOG.isDebugEnabled()) {
- LOG.debug("Verifying the access of " + callerUGI.getShortUserName()
+ LOG.debug("Verifying the access of "
+ + (callerUGI == null ? null : callerUGI.getShortUserName())
+ " on the timeline entity "
+ new EntityIdentifier(entity.getEntityId(), entity.getEntityType()));
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
index e6690a6d96a..8e313620b1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
@@ -38,7 +38,8 @@ public class TimelineAuthenticationFilter extends AuthenticationFilter {
// to replace the name here to use the customized Kerberos + DT service
// instead of the standard Kerberos handler.
Properties properties = super.getConfiguration(configPrefix, filterConfig);
- if (properties.getProperty(AUTH_TYPE).equals("kerberos")) {
+ String authType = properties.getProperty(AUTH_TYPE);
+ if (authType != null && authType.equals("kerberos")) {
properties.setProperty(
AUTH_TYPE, TimelineClientAuthenticationService.class.getName());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
index 8aeb4388338..29e6c34b017 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
@@ -47,9 +47,9 @@ import org.apache.hadoop.security.SecurityUtil;
public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
/**
- * The configuration prefix of timeline Kerberos + DT authentication
+ * The configuration prefix of timeline HTTP authentication
*/
- public static final String PREFIX = "yarn.timeline-service.http.authentication.";
+ public static final String PREFIX = "yarn.timeline-service.http-authentication.";
private static final String SIGNATURE_SECRET_FILE =
TimelineAuthenticationFilter.SIGNATURE_SECRET + ".file";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
index a4e8d58c4ce..ad739c94c6f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
@@ -62,11 +62,12 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.timeline.EntityIdentifier;
import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
import org.apache.hadoop.yarn.server.timeline.NameValuePair;
-import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.ForbiddenException;
import org.apache.hadoop.yarn.webapp.NotFoundException;
import com.google.inject.Inject;
@@ -336,6 +337,11 @@ public class TimelineWebServices {
return new TimelinePutResponse();
}
UserGroupInformation callerUGI = getUser(req);
+ if (callerUGI == null) {
+ String msg = "The owner of the posted timeline entities is not set";
+ LOG.error(msg);
+ throw new ForbiddenException(msg);
+ }
try {
List entityIDs = new ArrayList();
TimelineEntities entitiesToPut = new TimelineEntities();
@@ -375,8 +381,7 @@ public class TimelineWebServices {
// the timeline data.
try {
if (existingEntity == null) {
- injectOwnerInfo(entity,
- callerUGI == null ? "" : callerUGI.getShortUserName());
+ injectOwnerInfo(entity, callerUGI.getShortUserName());
}
} catch (YarnException e) {
// Skip the entity which messes up the primary filter and record the
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
index 8637fdb4b4a..de561aac49a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestMemoryApplicationHistoryStore.java
@@ -198,7 +198,7 @@ public class TestMemoryApplicationHistoryStore extends
writeContainerFinishData(containerId);
}
long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb;
- Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 200);
+ Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 400);
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
index 832a79a927c..b34197ca9be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
@@ -19,26 +19,26 @@
package org.apache.hadoop.yarn.server.timeline.webapp;
import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
-import java.io.IOException;
+import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import javax.inject.Singleton;
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletRequestWrapper;
import javax.ws.rs.core.MediaType;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
@@ -46,12 +46,11 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AdminACLsManager;
import org.apache.hadoop.yarn.server.timeline.TestMemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
-import org.apache.hadoop.yarn.server.timeline.webapp.TimelineWebServices.AboutInfo;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.junit.Assert;
@@ -74,11 +73,11 @@ public class TestTimelineWebServices extends JerseyTest {
private static TimelineStore store;
private static TimelineACLsManager timelineACLsManager;
private static AdminACLsManager adminACLsManager;
- private static String remoteUser;
private long beforeTime;
private Injector injector = Guice.createInjector(new ServletModule() {
+ @SuppressWarnings("unchecked")
@Override
protected void configureServlets() {
bind(YarnJacksonJaxbJsonProvider.class);
@@ -98,7 +97,35 @@ public class TestTimelineWebServices extends JerseyTest {
adminACLsManager = new AdminACLsManager(conf);
bind(TimelineACLsManager.class).toInstance(timelineACLsManager);
serve("/*").with(GuiceContainer.class);
- filter("/*").through(TestFilter.class);
+ TimelineAuthenticationFilter taFilter = new TimelineAuthenticationFilter();
+ FilterConfig filterConfig = mock(FilterConfig.class);
+ when(filterConfig.getInitParameter(AuthenticationFilter.CONFIG_PREFIX))
+ .thenReturn(null);
+ when(filterConfig.getInitParameter(AuthenticationFilter.AUTH_TYPE))
+ .thenReturn("simple");
+ when(filterConfig.getInitParameter(
+ PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)).thenReturn("true");
+ Enumeration