Merge r1609845 through r1611528 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-6584@1611531 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-07-18 02:21:21 +00:00
commit 04fd2012fd
131 changed files with 4555 additions and 691 deletions

View File

@ -139,6 +139,17 @@
<attach>true</attach> <attach>true</attach>
</configuration> </configuration>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins> </plugins>
</build> </build>

View File

@ -36,10 +36,6 @@ Trunk (Unreleased)
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm) HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
HADOOP-7664. Remove warmings when overriding final parameter configuration
if the override value is same as the final parameter value.
(Ravi Prakash via suresh)
HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin
Jetly via jitendra) Jetly via jitendra)
@ -162,9 +158,6 @@ Trunk (Unreleased)
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9) HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
HADOOP-10607. Create API to separate credential/password storage from
applications. (Larry McCay via omalley)
HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata. HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata.
(tucu) (tucu)
@ -182,6 +175,8 @@ Trunk (Unreleased)
HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang) HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
BUG FIXES BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled. HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -379,6 +374,16 @@ Trunk (Unreleased)
NativeAzureFileSystem#NativeAzureFsInputStream#close(). NativeAzureFileSystem#NativeAzureFsInputStream#close().
(Chen He via cnauroth) (Chen He via cnauroth)
HADOOP-10831. UserProvider is not thread safe. (Benoy Antony via umamahesh)
HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
(Mike Yoder via wang)
HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
System. (Shanyu Zhao via cnauroth)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-7761. Improve the performance of raw comparisons. (todd)
@ -397,6 +402,30 @@ Release 2.6.0 - UNRELEASED
HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth) HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth)
HADOOP-7664. Remove warmings when overriding final parameter configuration
if the override value is same as the final parameter value.
(Ravi Prakash via suresh)
HADOOP-10673. Update rpc metrics when the call throws an exception. (Ming Ma
via jing9)
HADOOP-10845. Add common tests for ACLs in combination with viewfs.
(Stephen Chu via cnauroth)
HADOOP-10839. Add unregisterSource() to MetricsSystem API.
(Shanyu Zhao via cnauroth)
HADOOP-10607. Create an API to separate credentials/password storage
from applications (Larry McCay via omalley)
HADOOP-10732. Fix locking in credential update. (Ted Yu via omalley)
HADOOP-10733. Fix potential null dereference in CredShell. (Ted Yu via
omalley)
HADOOP-10610. Upgrade S3n s3.fs.buffer.dir to support multi directories.
(Ted Malaska via atm)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -412,6 +441,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-10810. Clean up native code compilation warnings. (cnauroth) HADOOP-10810. Clean up native code compilation warnings. (cnauroth)
HADOOP-9921. daemon scripts should remove pid file on stop call after stop
or process is found not running ( vinayakumarb )
HADOOP-10591. Compression codecs must used pooled direct buffers or
deallocate direct buffers when stream is closed (cmccabe)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -198,6 +198,7 @@ case $startStop in
else else
echo no $command to stop echo no $command to stop
fi fi
rm -f $pid
else else
echo no $command to stop echo no $command to stop
fi fi

View File

@ -57,6 +57,16 @@ public class KeyShell extends Configured implements Tool {
private boolean userSuppliedProvider = false; private boolean userSuppliedProvider = false;
/**
* Primary entry point for the KeyShell; called via main().
*
* @param args Command line arguments.
* @return 0 on success and 1 on failure. This value is passed back to
* the unix shell, so we must follow shell return code conventions:
* the return code is an unsigned character, and 0 means success, and
* small positive integers mean failure.
* @throws Exception
*/
@Override @Override
public int run(String[] args) throws Exception { public int run(String[] args) throws Exception {
int exitCode = 0; int exitCode = 0;
@ -68,11 +78,11 @@ public int run(String[] args) throws Exception {
if (command.validate()) { if (command.validate()) {
command.execute(); command.execute();
} else { } else {
exitCode = -1; exitCode = 1;
} }
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(err); e.printStackTrace(err);
return -1; return 1;
} }
return exitCode; return exitCode;
} }
@ -86,8 +96,8 @@ public int run(String[] args) throws Exception {
* % hadoop key list [-provider providerPath] * % hadoop key list [-provider providerPath]
* % hadoop key delete keyName [--provider providerPath] [-i] * % hadoop key delete keyName [--provider providerPath] [-i]
* </pre> * </pre>
* @param args * @param args Command line arguments.
* @return * @return 0 on success, 1 on failure.
* @throws IOException * @throws IOException
*/ */
private int init(String[] args) throws IOException { private int init(String[] args) throws IOException {
@ -105,7 +115,7 @@ private int init(String[] args) throws IOException {
command = new CreateCommand(keyName, options); command = new CreateCommand(keyName, options);
if ("--help".equals(keyName)) { if ("--help".equals(keyName)) {
printKeyShellUsage(); printKeyShellUsage();
return -1; return 1;
} }
} else if (args[i].equals("delete")) { } else if (args[i].equals("delete")) {
String keyName = "--help"; String keyName = "--help";
@ -116,7 +126,7 @@ private int init(String[] args) throws IOException {
command = new DeleteCommand(keyName); command = new DeleteCommand(keyName);
if ("--help".equals(keyName)) { if ("--help".equals(keyName)) {
printKeyShellUsage(); printKeyShellUsage();
return -1; return 1;
} }
} else if (args[i].equals("roll")) { } else if (args[i].equals("roll")) {
String keyName = "--help"; String keyName = "--help";
@ -127,7 +137,7 @@ private int init(String[] args) throws IOException {
command = new RollCommand(keyName); command = new RollCommand(keyName);
if ("--help".equals(keyName)) { if ("--help".equals(keyName)) {
printKeyShellUsage(); printKeyShellUsage();
return -1; return 1;
} }
} else if ("list".equals(args[i])) { } else if ("list".equals(args[i])) {
command = new ListCommand(); command = new ListCommand();
@ -145,13 +155,13 @@ private int init(String[] args) throws IOException {
out.println("\nAttributes must be in attribute=value form, " + out.println("\nAttributes must be in attribute=value form, " +
"or quoted\nlike \"attribute = value\"\n"); "or quoted\nlike \"attribute = value\"\n");
printKeyShellUsage(); printKeyShellUsage();
return -1; return 1;
} }
if (attributes.containsKey(attr)) { if (attributes.containsKey(attr)) {
out.println("\nEach attribute must correspond to only one value:\n" + out.println("\nEach attribute must correspond to only one value:\n" +
"atttribute \"" + attr + "\" was repeated\n" ); "atttribute \"" + attr + "\" was repeated\n" );
printKeyShellUsage(); printKeyShellUsage();
return -1; return 1;
} }
attributes.put(attr, val); attributes.put(attr, val);
} else if ("--provider".equals(args[i]) && moreTokens) { } else if ("--provider".equals(args[i]) && moreTokens) {
@ -163,17 +173,17 @@ private int init(String[] args) throws IOException {
interactive = true; interactive = true;
} else if ("--help".equals(args[i])) { } else if ("--help".equals(args[i])) {
printKeyShellUsage(); printKeyShellUsage();
return -1; return 1;
} else { } else {
printKeyShellUsage(); printKeyShellUsage();
ToolRunner.printGenericCommandUsage(System.err); ToolRunner.printGenericCommandUsage(System.err);
return -1; return 1;
} }
} }
if (command == null) { if (command == null) {
printKeyShellUsage(); printKeyShellUsage();
return -1; return 1;
} }
if (!attributes.isEmpty()) { if (!attributes.isEmpty()) {
@ -491,10 +501,11 @@ public String getUsage() {
} }
/** /**
* Main program. * main() entry point for the KeyShell. While strictly speaking the
* return is void, it will System.exit() with a return code: 0 is for
* success and 1 for failure.
* *
* @param args * @param args Command line arguments.
* Command line arguments
* @throws Exception * @throws Exception
*/ */
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {

View File

@ -50,6 +50,7 @@
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.s3.S3Exception; import org.apache.hadoop.fs.s3.S3Exception;
@ -225,6 +226,7 @@ private class NativeS3FsOutputStream extends OutputStream {
private OutputStream backupStream; private OutputStream backupStream;
private MessageDigest digest; private MessageDigest digest;
private boolean closed; private boolean closed;
private LocalDirAllocator lDirAlloc;
public NativeS3FsOutputStream(Configuration conf, public NativeS3FsOutputStream(Configuration conf,
NativeFileSystemStore store, String key, Progressable progress, NativeFileSystemStore store, String key, Progressable progress,
@ -246,11 +248,10 @@ public NativeS3FsOutputStream(Configuration conf,
} }
private File newBackupFile() throws IOException { private File newBackupFile() throws IOException {
File dir = new File(conf.get("fs.s3.buffer.dir")); if (lDirAlloc == null) {
if (!dir.mkdirs() && !dir.exists()) { lDirAlloc = new LocalDirAllocator("fs.s3.buffer.dir");
throw new IOException("Cannot create S3 buffer directory: " + dir);
} }
File result = File.createTempFile("output-", ".tmp", dir); File result = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
result.deleteOnExit(); result.deleteOnExit();
return result; return result;
} }

View File

@ -37,6 +37,8 @@
import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -279,6 +281,38 @@ public void setTimes(final Path f, final long mtime, final long atime)
myFs.setTimes(fullPath(f), mtime, atime); myFs.setTimes(fullPath(f), mtime, atime);
} }
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
myFs.modifyAclEntries(fullPath(path), aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
myFs.removeAclEntries(fullPath(path), aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
myFs.removeDefaultAcl(fullPath(path));
}
@Override
public void removeAcl(Path path) throws IOException {
myFs.removeAcl(fullPath(path));
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
myFs.setAcl(fullPath(path), aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return myFs.getAclStatus(fullPath(path));
}
@Override @Override
public void setVerifyChecksum(final boolean verifyChecksum) public void setVerifyChecksum(final boolean verifyChecksum)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {

View File

@ -50,6 +50,7 @@
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@ -871,5 +872,46 @@ public long getDefaultBlockSize(Path f) {
public short getDefaultReplication(Path f) { public short getDefaultReplication(Path f) {
throw new NotInMountpointException(f, "getDefaultReplication"); throw new NotInMountpointException(f, "getDefaultReplication");
} }
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("modifyAclEntries", path);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAclEntries", path);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeDefaultAcl", path);
}
@Override
public void removeAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAcl", path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("setAcl", path);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
checkPathIsSlash(path);
return new AclStatus.Builder().owner(ugi.getUserName())
.group(ugi.getGroupNames()[0])
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
.stickyBit(false).build();
}
} }
} }

View File

@ -49,6 +49,9 @@
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.local.LocalConfigKeys; import org.apache.hadoop.fs.local.LocalConfigKeys;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@ -603,6 +606,51 @@ public boolean isValidName(String src) {
return true; return true;
} }
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeDefaultAcl(Path path)
throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeDefaultAcl(res.remainingPath);
}
@Override
public void removeAcl(Path path)
throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeAcl(res.remainingPath);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.setAcl(res.remainingPath, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getAclStatus(res.remainingPath);
}
/* /*
@ -832,5 +880,46 @@ public void setVerifyChecksum(final boolean verifyChecksum)
throws AccessControlException { throws AccessControlException {
throw readOnlyMountTable("setVerifyChecksum", ""); throw readOnlyMountTable("setVerifyChecksum", "");
} }
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("modifyAclEntries", path);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAclEntries", path);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeDefaultAcl", path);
}
@Override
public void removeAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAcl", path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("setAcl", path);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
checkPathIsSlash(path);
return new AclStatus.Builder().owner(ugi.getUserName())
.group(ugi.getGroupNames()[0])
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
.stickyBit(false).build();
}
} }
} }

View File

@ -100,7 +100,8 @@ public BZip2Codec() { }
@Override @Override
public CompressionOutputStream createOutputStream(OutputStream out) public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException { throws IOException {
return createOutputStream(out, createCompressor()); return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
} }
/** /**
@ -153,7 +154,8 @@ public Compressor createCompressor() {
@Override @Override
public CompressionInputStream createInputStream(InputStream in) public CompressionInputStream createInputStream(InputStream in)
throws IOException { throws IOException {
return createInputStream(in, createDecompressor()); return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
} }
/** /**

View File

@ -24,6 +24,7 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/** /**
* This class encapsulates a streaming compression/decompression pair. * This class encapsulates a streaming compression/decompression pair.
@ -113,4 +114,58 @@ CompressionInputStream createInputStream(InputStream in,
* @return the extension including the '.' * @return the extension including the '.'
*/ */
String getDefaultExtension(); String getDefaultExtension();
static class Util {
/**
* Create an output stream with a codec taken from the global CodecPool.
*
* @param codec The codec to use to create the output stream.
* @param conf The configuration to use if we need to create a new codec.
* @param out The output stream to wrap.
* @return The new output stream
* @throws IOException
*/
static CompressionOutputStream createOutputStreamWithCodecPool(
CompressionCodec codec, Configuration conf, OutputStream out)
throws IOException {
Compressor compressor = CodecPool.getCompressor(codec, conf);
CompressionOutputStream stream = null;
try {
stream = codec.createOutputStream(out, compressor);
} finally {
if (stream == null) {
CodecPool.returnCompressor(compressor);
} else {
stream.setTrackedCompressor(compressor);
}
}
return stream;
}
/**
* Create an input stream with a codec taken from the global CodecPool.
*
* @param codec The codec to use to create the input stream.
* @param conf The configuration to use if we need to create a new codec.
* @param in The input stream to wrap.
* @return The new input stream
* @throws IOException
*/
static CompressionInputStream createInputStreamWithCodecPool(
CompressionCodec codec, Configuration conf, InputStream in)
throws IOException {
Decompressor decompressor = CodecPool.getDecompressor(codec);
CompressionInputStream stream = null;
try {
stream = codec.createInputStream(in, decompressor);
} finally {
if (stream == null) {
CodecPool.returnDecompressor(decompressor);
} else {
stream.setTrackedDecompressor(decompressor);
}
}
return stream;
}
}
} }

View File

@ -41,6 +41,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
protected final InputStream in; protected final InputStream in;
protected long maxAvailableData = 0L; protected long maxAvailableData = 0L;
private Decompressor trackedDecompressor;
/** /**
* Create a compression input stream that reads * Create a compression input stream that reads
* the decompressed bytes from the given stream. * the decompressed bytes from the given stream.
@ -58,6 +60,10 @@ protected CompressionInputStream(InputStream in) throws IOException {
@Override @Override
public void close() throws IOException { public void close() throws IOException {
in.close(); in.close();
if (trackedDecompressor != null) {
CodecPool.returnDecompressor(trackedDecompressor);
trackedDecompressor = null;
}
} }
/** /**
@ -112,4 +118,8 @@ public void seek(long pos) throws UnsupportedOperationException {
public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException { public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
void setTrackedDecompressor(Decompressor decompressor) {
trackedDecompressor = decompressor;
}
} }

View File

@ -34,7 +34,13 @@ public abstract class CompressionOutputStream extends OutputStream {
* The output stream to be compressed. * The output stream to be compressed.
*/ */
protected final OutputStream out; protected final OutputStream out;
/**
* If non-null, this is the Compressor object that we should call
* CodecPool#returnCompressor on when this stream is closed.
*/
private Compressor trackedCompressor;
/** /**
* Create a compression output stream that writes * Create a compression output stream that writes
* the compressed bytes to the given stream. * the compressed bytes to the given stream.
@ -43,11 +49,19 @@ public abstract class CompressionOutputStream extends OutputStream {
protected CompressionOutputStream(OutputStream out) { protected CompressionOutputStream(OutputStream out) {
this.out = out; this.out = out;
} }
void setTrackedCompressor(Compressor compressor) {
trackedCompressor = compressor;
}
@Override @Override
public void close() throws IOException { public void close() throws IOException {
finish(); finish();
out.close(); out.close();
if (trackedCompressor != null) {
CodecPool.returnCompressor(trackedCompressor);
trackedCompressor = null;
}
} }
@Override @Override

View File

@ -51,14 +51,8 @@ public Configuration getConf() {
@Override @Override
public CompressionOutputStream createOutputStream(OutputStream out) public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException { throws IOException {
// This may leak memory if called in a loop. The createCompressor() call return CompressionCodec.Util.
// may cause allocation of an untracked direct-backed buffer if native createOutputStreamWithCodecPool(this, conf, out);
// libs are being used (even if you close the stream). A Compressor
// object should be reused between successive calls.
LOG.warn("DefaultCodec.createOutputStream() may leak memory. "
+ "Create a compressor first.");
return new CompressorStream(out, createCompressor(),
conf.getInt("io.file.buffer.size", 4*1024));
} }
@Override @Override
@ -82,8 +76,8 @@ public Compressor createCompressor() {
@Override @Override
public CompressionInputStream createInputStream(InputStream in) public CompressionInputStream createInputStream(InputStream in)
throws IOException { throws IOException {
return new DecompressorStream(in, createDecompressor(), return CompressionCodec.Util.
conf.getInt("io.file.buffer.size", 4*1024)); createInputStreamWithCodecPool(this, conf, in);
} }
@Override @Override

View File

@ -159,10 +159,11 @@ public void resetState() throws IOException {
@Override @Override
public CompressionOutputStream createOutputStream(OutputStream out) public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException { throws IOException {
return (ZlibFactory.isNativeZlibLoaded(conf)) ? if (!ZlibFactory.isNativeZlibLoaded(conf)) {
new CompressorStream(out, createCompressor(), return new GzipOutputStream(out);
conf.getInt("io.file.buffer.size", 4*1024)) : }
new GzipOutputStream(out); return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
} }
@Override @Override
@ -192,8 +193,9 @@ public Class<? extends Compressor> getCompressorType() {
@Override @Override
public CompressionInputStream createInputStream(InputStream in) public CompressionInputStream createInputStream(InputStream in)
throws IOException { throws IOException {
return createInputStream(in, null); return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
} }
@Override @Override

View File

@ -84,7 +84,8 @@ public static String getLibraryName() {
@Override @Override
public CompressionOutputStream createOutputStream(OutputStream out) public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException { throws IOException {
return createOutputStream(out, createCompressor()); return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
} }
/** /**
@ -157,7 +158,8 @@ public Compressor createCompressor() {
@Override @Override
public CompressionInputStream createInputStream(InputStream in) public CompressionInputStream createInputStream(InputStream in)
throws IOException { throws IOException {
return createInputStream(in, createDecompressor()); return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
} }
/** /**

View File

@ -95,7 +95,8 @@ public static String getLibraryName() {
@Override @Override
public CompressionOutputStream createOutputStream(OutputStream out) public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException { throws IOException {
return createOutputStream(out, createCompressor()); return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
} }
/** /**
@ -158,7 +159,8 @@ public Compressor createCompressor() {
@Override @Override
public CompressionInputStream createInputStream(InputStream in) public CompressionInputStream createInputStream(InputStream in)
throws IOException { throws IOException {
return createInputStream(in, createDecompressor()); return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
} }
/** /**

View File

@ -599,24 +599,35 @@ public Writable call(RPC.Server server, String connectionProtocolName,
.mergeFrom(request.theRequestRead).build(); .mergeFrom(request.theRequestRead).build();
Message result; Message result;
long startTime = Time.now();
int qTime = (int) (startTime - receiveTime);
Exception exception = null;
try { try {
long startTime = Time.now();
server.rpcDetailedMetrics.init(protocolImpl.protocolClass); server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
result = service.callBlockingMethod(methodDescriptor, null, param); result = service.callBlockingMethod(methodDescriptor, null, param);
int processingTime = (int) (Time.now() - startTime);
int qTime = (int) (startTime - receiveTime);
if (LOG.isDebugEnabled()) {
LOG.info("Served: " + methodName + " queueTime= " + qTime +
" procesingTime= " + processingTime);
}
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(methodName,
processingTime);
} catch (ServiceException e) { } catch (ServiceException e) {
exception = (Exception) e.getCause();
throw (Exception) e.getCause(); throw (Exception) e.getCause();
} catch (Exception e) { } catch (Exception e) {
exception = e;
throw e; throw e;
} finally {
int processingTime = (int) (Time.now() - startTime);
if (LOG.isDebugEnabled()) {
String msg = "Served: " + methodName + " queueTime= " + qTime +
" procesingTime= " + processingTime;
if (exception != null) {
msg += " exception= " + exception.getClass().getSimpleName();
}
LOG.debug(msg);
}
String detailedMetricsName = (exception == null) ?
methodName :
exception.getClass().getSimpleName();
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
processingTime);
} }
return new RpcResponseWrapper(result); return new RpcResponseWrapper(result);
} }

View File

@ -355,8 +355,8 @@ public static boolean isRpcInvocation() {
private int readThreads; // number of read threads private int readThreads; // number of read threads
private int readerPendingConnectionQueue; // number of connections to queue per read thread private int readerPendingConnectionQueue; // number of connections to queue per read thread
private Class<? extends Writable> rpcRequestClass; // class used for deserializing the rpc request private Class<? extends Writable> rpcRequestClass; // class used for deserializing the rpc request
protected RpcMetrics rpcMetrics; final protected RpcMetrics rpcMetrics;
protected RpcDetailedMetrics rpcDetailedMetrics; final protected RpcDetailedMetrics rpcDetailedMetrics;
private Configuration conf; private Configuration conf;
private String portRangeConfig = null; private String portRangeConfig = null;
@ -2494,12 +2494,8 @@ public synchronized void stop() {
listener.doStop(); listener.doStop();
responder.interrupt(); responder.interrupt();
notifyAll(); notifyAll();
if (this.rpcMetrics != null) { this.rpcMetrics.shutdown();
this.rpcMetrics.shutdown(); this.rpcDetailedMetrics.shutdown();
}
if (this.rpcDetailedMetrics != null) {
this.rpcDetailedMetrics.shutdown();
}
} }
/** Wait for the server to be stopped. /** Wait for the server to be stopped.

View File

@ -471,37 +471,29 @@ public Writable call(org.apache.hadoop.ipc.RPC.Server server,
// Invoke the protocol method // Invoke the protocol method
long startTime = Time.now();
int qTime = (int) (startTime-receivedTime);
Exception exception = null;
try { try {
long startTime = Time.now(); Method method =
Method method =
protocolImpl.protocolClass.getMethod(call.getMethodName(), protocolImpl.protocolClass.getMethod(call.getMethodName(),
call.getParameterClasses()); call.getParameterClasses());
method.setAccessible(true); method.setAccessible(true);
server.rpcDetailedMetrics.init(protocolImpl.protocolClass); server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
Object value = Object value =
method.invoke(protocolImpl.protocolImpl, call.getParameters()); method.invoke(protocolImpl.protocolImpl, call.getParameters());
int processingTime = (int) (Time.now() - startTime);
int qTime = (int) (startTime-receivedTime);
if (LOG.isDebugEnabled()) {
LOG.debug("Served: " + call.getMethodName() +
" queueTime= " + qTime +
" procesingTime= " + processingTime);
}
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
processingTime);
if (server.verbose) log("Return: "+value); if (server.verbose) log("Return: "+value);
return new ObjectWritable(method.getReturnType(), value); return new ObjectWritable(method.getReturnType(), value);
} catch (InvocationTargetException e) { } catch (InvocationTargetException e) {
Throwable target = e.getTargetException(); Throwable target = e.getTargetException();
if (target instanceof IOException) { if (target instanceof IOException) {
exception = (IOException)target;
throw (IOException)target; throw (IOException)target;
} else { } else {
IOException ioe = new IOException(target.toString()); IOException ioe = new IOException(target.toString());
ioe.setStackTrace(target.getStackTrace()); ioe.setStackTrace(target.getStackTrace());
exception = ioe;
throw ioe; throw ioe;
} }
} catch (Throwable e) { } catch (Throwable e) {
@ -510,8 +502,27 @@ public Writable call(org.apache.hadoop.ipc.RPC.Server server,
} }
IOException ioe = new IOException(e.toString()); IOException ioe = new IOException(e.toString());
ioe.setStackTrace(e.getStackTrace()); ioe.setStackTrace(e.getStackTrace());
exception = ioe;
throw ioe; throw ioe;
} } finally {
int processingTime = (int) (Time.now() - startTime);
if (LOG.isDebugEnabled()) {
String msg = "Served: " + call.getMethodName() +
" queueTime= " + qTime +
" procesingTime= " + processingTime;
if (exception != null) {
msg += " exception= " + exception.getClass().getSimpleName();
}
LOG.debug(msg);
}
String detailedMetricsName = (exception == null) ?
call.getMethodName() :
exception.getClass().getSimpleName();
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
processingTime);
}
} }
} }
} }

View File

@ -54,6 +54,12 @@ public abstract class MetricsSystem implements MetricsSystemMXBean {
*/ */
public abstract <T> T register(String name, String desc, T source); public abstract <T> T register(String name, String desc, T source);
/**
* Unregister a metrics source
* @param name of the source. This is the name you use to call register()
*/
public abstract void unregisterSource(String name);
/** /**
* Register a metrics source (deriving name and description from the object) * Register a metrics source (deriving name and description from the object)
* @param <T> the actual type of the source object * @param <T> the actual type of the source object

View File

@ -85,7 +85,7 @@ class MetricsConfig extends SubsetConfiguration {
private ClassLoader pluginLoader; private ClassLoader pluginLoader;
MetricsConfig(Configuration c, String prefix) { MetricsConfig(Configuration c, String prefix) {
super(c, prefix, "."); super(c, prefix.toLowerCase(Locale.US), ".");
} }
static MetricsConfig create(String prefix) { static MetricsConfig create(String prefix) {

View File

@ -232,6 +232,17 @@ T register(String name, String desc, T source) {
return source; return source;
} }
@Override public synchronized
void unregisterSource(String name) {
if (sources.containsKey(name)) {
sources.get(name).stop();
sources.remove(name);
}
if (allSources.containsKey(name)) {
allSources.remove(name);
}
}
synchronized synchronized
void registerSource(String name, String desc, MetricsSource source) { void registerSource(String name, String desc, MetricsSource source) {
checkNotNull(config, "config"); checkNotNull(config, "config");

View File

@ -29,6 +29,8 @@
* abstraction to separate credential storage from users of them. It * abstraction to separate credential storage from users of them. It
* is intended to support getting or storing passwords in a variety of ways, * is intended to support getting or storing passwords in a variety of ways,
* including third party bindings. * including third party bindings.
*
* <code>CredentialProvider</code> implementations must be thread safe.
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Unstable @InterfaceStability.Unstable

View File

@ -264,7 +264,7 @@ public boolean validate() {
alias + " from CredentialProvider " + provider.toString() + alias + " from CredentialProvider " + provider.toString() +
". Continue?:"); ". Continue?:");
if (!cont) { if (!cont) {
out.println("Nothing has been be deleted."); out.println("Nothing has been deleted.");
} }
return cont; return cont;
} catch (IOException e) { } catch (IOException e) {
@ -373,12 +373,12 @@ protected char[] promptForCredential() throws IOException {
char[] newPassword2 = c.readPassword("Enter password again: "); char[] newPassword2 = c.readPassword("Enter password again: ");
noMatch = !Arrays.equals(newPassword1, newPassword2); noMatch = !Arrays.equals(newPassword1, newPassword2);
if (noMatch) { if (noMatch) {
Arrays.fill(newPassword1, ' '); if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
c.format("Passwords don't match. Try again.%n"); c.format("Passwords don't match. Try again.%n");
} else { } else {
cred = newPassword1; cred = newPassword1;
} }
Arrays.fill(newPassword2, ' '); if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
} while (noMatch); } while (noMatch);
return cred; return cred;
} }

View File

@ -230,6 +230,7 @@ public void deleteCredentialEntry(String name) throws IOException {
CredentialEntry innerSetCredential(String alias, char[] material) CredentialEntry innerSetCredential(String alias, char[] material)
throws IOException { throws IOException {
writeLock.lock();
try { try {
keyStore.setKeyEntry(alias, new SecretKeySpec( keyStore.setKeyEntry(alias, new SecretKeySpec(
new String(material).getBytes("UTF-8"), "AES"), new String(material).getBytes("UTF-8"), "AES"),
@ -237,6 +238,8 @@ CredentialEntry innerSetCredential(String alias, char[] material)
} catch (KeyStoreException e) { } catch (KeyStoreException e) {
throw new IOException("Can't store credential " + alias + " in " + this, throw new IOException("Can't store credential " + alias + " in " + this,
e); e);
} finally {
writeLock.unlock();
} }
changed = true; changed = true;
return new CredentialEntry(alias, material); return new CredentialEntry(alias, material);

View File

@ -55,7 +55,7 @@ public boolean isTransient() {
} }
@Override @Override
public CredentialEntry getCredentialEntry(String alias) { public synchronized CredentialEntry getCredentialEntry(String alias) {
byte[] bytes = credentials.getSecretKey(new Text(alias)); byte[] bytes = credentials.getSecretKey(new Text(alias));
if (bytes == null) { if (bytes == null) {
return null; return null;
@ -64,7 +64,7 @@ public CredentialEntry getCredentialEntry(String alias) {
} }
@Override @Override
public CredentialEntry createCredentialEntry(String name, char[] credential) public synchronized CredentialEntry createCredentialEntry(String name, char[] credential)
throws IOException { throws IOException {
Text nameT = new Text(name); Text nameT = new Text(name);
if (credentials.getSecretKey(nameT) != null) { if (credentials.getSecretKey(nameT) != null) {
@ -77,7 +77,7 @@ public CredentialEntry createCredentialEntry(String name, char[] credential)
} }
@Override @Override
public void deleteCredentialEntry(String name) throws IOException { public synchronized void deleteCredentialEntry(String name) throws IOException {
byte[] cred = credentials.getSecretKey(new Text(name)); byte[] cred = credentials.getSecretKey(new Text(name));
if (cred != null) { if (cred != null) {
credentials.removeSecretKey(new Text(name)); credentials.removeSecretKey(new Text(name));
@ -95,7 +95,7 @@ public String toString() {
} }
@Override @Override
public void flush() { public synchronized void flush() {
user.addCredentials(credentials); user.addCredentials(credentials);
} }
@ -112,7 +112,7 @@ public CredentialProvider createProvider(URI providerName,
} }
@Override @Override
public List<String> getAliases() throws IOException { public synchronized List<String> getAliases() throws IOException {
List<String> list = new ArrayList<String>(); List<String> list = new ArrayList<String>();
List<Text> aliases = credentials.getAllSecretKeys(); List<Text> aliases = credentials.getAllSecretKeys();
for (Text key : aliases) { for (Text key : aliases) {

View File

@ -127,7 +127,7 @@ User Commands
Runs a HDFS filesystem checking utility. Runs a HDFS filesystem checking utility.
See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info. See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]]>>> Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]] [-showprogress]>>>
*------------------+---------------------------------------------+ *------------------+---------------------------------------------+
|| COMMAND_OPTION || Description || COMMAND_OPTION || Description
@ -148,6 +148,8 @@ User Commands
*------------------+---------------------------------------------+ *------------------+---------------------------------------------+
| -racks | Print out network topology for data-node locations. | -racks | Print out network topology for data-node locations.
*------------------+---------------------------------------------+ *------------------+---------------------------------------------+
| -showprogress | Print out show progress in output. Default is OFF (no progress).
*------------------+---------------------------------------------+
* <<<fetchdt>>> * <<<fetchdt>>>

View File

@ -161,7 +161,7 @@ public void testInvalidKeySize() throws Exception {
KeyShell ks = new KeyShell(); KeyShell ks = new KeyShell();
ks.setConf(new Configuration()); ks.setConf(new Configuration());
rc = ks.run(args1); rc = ks.run(args1);
assertEquals(-1, rc); assertEquals(1, rc);
assertTrue(outContent.toString().contains("key1 has not been created.")); assertTrue(outContent.toString().contains("key1 has not been created."));
} }
@ -174,7 +174,7 @@ public void testInvalidCipher() throws Exception {
KeyShell ks = new KeyShell(); KeyShell ks = new KeyShell();
ks.setConf(new Configuration()); ks.setConf(new Configuration());
rc = ks.run(args1); rc = ks.run(args1);
assertEquals(-1, rc); assertEquals(1, rc);
assertTrue(outContent.toString().contains("key1 has not been created.")); assertTrue(outContent.toString().contains("key1 has not been created."));
} }
@ -187,7 +187,7 @@ public void testInvalidProvider() throws Exception {
KeyShell ks = new KeyShell(); KeyShell ks = new KeyShell();
ks.setConf(new Configuration()); ks.setConf(new Configuration());
rc = ks.run(args1); rc = ks.run(args1);
assertEquals(-1, rc); assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " + assertTrue(outContent.toString().contains("There are no valid " +
"KeyProviders configured.")); "KeyProviders configured."));
} }
@ -216,7 +216,7 @@ public void testTransientProviderOnlyConfig() throws Exception {
config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///"); config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///");
ks.setConf(config); ks.setConf(config);
rc = ks.run(args1); rc = ks.run(args1);
assertEquals(-1, rc); assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " + assertTrue(outContent.toString().contains("There are no valid " +
"KeyProviders configured.")); "KeyProviders configured."));
} }
@ -262,19 +262,19 @@ public void testAttributes() throws Exception {
final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider, final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
"--attr", "=bar"}; "--attr", "=bar"};
rc = ks.run(args2); rc = ks.run(args2);
assertEquals(-1, rc); assertEquals(1, rc);
/* Not in attribute = value form */ /* Not in attribute = value form */
outContent.reset(); outContent.reset();
args2[5] = "foo"; args2[5] = "foo";
rc = ks.run(args2); rc = ks.run(args2);
assertEquals(-1, rc); assertEquals(1, rc);
/* No attribute or value */ /* No attribute or value */
outContent.reset(); outContent.reset();
args2[5] = "="; args2[5] = "=";
rc = ks.run(args2); rc = ks.run(args2);
assertEquals(-1, rc); assertEquals(1, rc);
/* Legal: attribute is a, value is b=c */ /* Legal: attribute is a, value is b=c */
outContent.reset(); outContent.reset();
@ -308,7 +308,7 @@ public void testAttributes() throws Exception {
"--attr", "foo=bar", "--attr", "foo=bar",
"--attr", "foo=glarch"}; "--attr", "foo=glarch"};
rc = ks.run(args4); rc = ks.run(args4);
assertEquals(-1, rc); assertEquals(1, rc);
/* Clean up to be a good citizen */ /* Clean up to be a good citizen */
deleteKey(ks, "keyattr1"); deleteKey(ks, "keyattr1");

View File

@ -20,6 +20,7 @@
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.ArrayList;
import java.util.List; import java.util.List;
@ -28,9 +29,16 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileSystemTestHelper;
import static org.apache.hadoop.fs.FileSystemTestHelper.*; import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import org.apache.hadoop.fs.permission.AclEntry;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.ConfigUtil; import org.apache.hadoop.fs.viewfs.ConfigUtil;
@ -38,6 +46,7 @@
import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint; import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
@ -96,7 +105,6 @@ public void setUp() throws Exception {
// in the test root // in the test root
// Set up the defaultMT in the config with our mount point links // Set up the defaultMT in the config with our mount point links
//Configuration conf = new Configuration();
conf = ViewFileSystemTestSetup.createConfig(); conf = ViewFileSystemTestSetup.createConfig();
setupMountPoints(); setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf); fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
@ -720,4 +728,49 @@ public void testRootReadableExecutable() throws IOException {
Assert.assertTrue("Other-readable permission not set!", Assert.assertTrue("Other-readable permission not set!",
perms.getOtherAction().implies(FsAction.READ)); perms.getOtherAction().implies(FsAction.READ));
} }
/**
* Verify the behavior of ACL operations on paths above the root of
* any mount table entry.
*/
@Test(expected=AccessControlException.class)
public void testInternalModifyAclEntries() throws IOException {
fsView.modifyAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAclEntries() throws IOException {
fsView.removeAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveDefaultAcl() throws IOException {
fsView.removeDefaultAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAcl() throws IOException {
fsView.removeAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalSetAcl() throws IOException {
fsView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
}
@Test
public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
AclStatus aclStatus = fsView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(), currentUser.getUserName());
assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),
AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
} }

View File

@ -22,10 +22,14 @@
import static org.apache.hadoop.fs.FileContextTestHelper.exists; import static org.apache.hadoop.fs.FileContextTestHelper.exists;
import static org.apache.hadoop.fs.FileContextTestHelper.isDir; import static org.apache.hadoop.fs.FileContextTestHelper.isDir;
import static org.apache.hadoop.fs.FileContextTestHelper.isFile; import static org.apache.hadoop.fs.FileContextTestHelper.isFile;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -39,8 +43,12 @@
import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint; import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
@ -695,4 +703,48 @@ public void testInternalRenameFromSlash() throws IOException {
public void testInternalSetOwner() throws IOException { public void testInternalSetOwner() throws IOException {
fcView.setOwner(new Path("/internalDir"), "foo", "bar"); fcView.setOwner(new Path("/internalDir"), "foo", "bar");
} }
/**
* Verify the behavior of ACL operations on paths above the root of
* any mount table entry.
*/
@Test(expected=AccessControlException.class)
public void testInternalModifyAclEntries() throws IOException {
fcView.modifyAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAclEntries() throws IOException {
fcView.removeAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveDefaultAcl() throws IOException {
fcView.removeDefaultAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAcl() throws IOException {
fcView.removeAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalSetAcl() throws IOException {
fcView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
}
@Test
public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
AclStatus aclStatus = fcView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(), currentUser.getUserName());
assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),
AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
} }

View File

@ -496,6 +496,8 @@ private void testCallsInternal(Configuration conf) throws IOException {
caught = true; caught = true;
} }
assertTrue(caught); assertTrue(caught);
rb = getMetrics(server.rpcDetailedMetrics.name());
assertCounter("IOExceptionNumOps", 1L, rb);
proxy.testServerGet(); proxy.testServerGet();

View File

@ -60,12 +60,12 @@ public class TestGangliaMetrics {
@Test @Test
public void testTagsForPrefix() throws Exception { public void testTagsForPrefix() throws Exception {
ConfigBuilder cb = new ConfigBuilder() ConfigBuilder cb = new ConfigBuilder()
.add("Test.sink.ganglia.tagsForPrefix.all", "*") .add("test.sink.ganglia.tagsForPrefix.all", "*")
.add("Test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " + .add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
"NumActiveSources") "NumActiveSources")
.add("Test.sink.ganglia.tagsForPrefix.none", ""); .add("test.sink.ganglia.tagsForPrefix.none", "");
GangliaSink30 sink = new GangliaSink30(); GangliaSink30 sink = new GangliaSink30();
sink.init(cb.subset("Test.sink.ganglia")); sink.init(cb.subset("test.sink.ganglia"));
List<MetricsTag> tags = new ArrayList<MetricsTag>(); List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all")); tags.add(new MetricsTag(MsInfo.Context, "all"));
@ -98,8 +98,8 @@ public void testTagsForPrefix() throws Exception {
@Test public void testGangliaMetrics2() throws Exception { @Test public void testGangliaMetrics2() throws Exception {
ConfigBuilder cb = new ConfigBuilder().add("default.period", 10) ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
.add("Test.sink.gsink30.context", "test") // filter out only "test" .add("test.sink.gsink30.context", "test") // filter out only "test"
.add("Test.sink.gsink31.context", "test") // filter out only "test" .add("test.sink.gsink31.context", "test") // filter out only "test"
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test"); MetricsSystemImpl ms = new MetricsSystemImpl("Test");

View File

@ -88,11 +88,11 @@ public static class TestSink implements MetricsSink {
DefaultMetricsSystem.shutdown(); DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8) new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString()) //.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("Test.sink.test.class", TestSink.class.getName()) .add("test.sink.test.class", TestSink.class.getName())
.add("Test.*.source.filter.exclude", "s0") .add("test.*.source.filter.exclude", "s0")
.add("Test.source.s1.metric.filter.exclude", "X*") .add("test.source.s1.metric.filter.exclude", "X*")
.add("Test.sink.sink1.metric.filter.exclude", "Y*") .add("test.sink.sink1.metric.filter.exclude", "Y*")
.add("Test.sink.sink2.metric.filter.exclude", "Y*") .add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test"); MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start(); ms.start();
@ -130,11 +130,11 @@ public static class TestSink implements MetricsSink {
DefaultMetricsSystem.shutdown(); DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8) new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString()) //.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("Test.sink.test.class", TestSink.class.getName()) .add("test.sink.test.class", TestSink.class.getName())
.add("Test.*.source.filter.exclude", "s0") .add("test.*.source.filter.exclude", "s0")
.add("Test.source.s1.metric.filter.exclude", "X*") .add("test.source.s1.metric.filter.exclude", "X*")
.add("Test.sink.sink1.metric.filter.exclude", "Y*") .add("test.sink.sink1.metric.filter.exclude", "Y*")
.add("Test.sink.sink2.metric.filter.exclude", "Y*") .add("test.sink.sink2.metric.filter.exclude", "Y*")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test"); MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start(); ms.start();
@ -169,13 +169,14 @@ public static class TestSink implements MetricsSink {
@Test public void testMultiThreadedPublish() throws Exception { @Test public void testMultiThreadedPublish() throws Exception {
final int numThreads = 10; final int numThreads = 10;
new ConfigBuilder().add("*.period", 80) new ConfigBuilder().add("*.period", 80)
.add("Test.sink.Collector." + MetricsConfig.QUEUE_CAPACITY_KEY, .add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
numThreads) numThreads)
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
final MetricsSystemImpl ms = new MetricsSystemImpl("Test"); final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start(); ms.start();
final CollectingSink sink = new CollectingSink(numThreads); final CollectingSink sink = new CollectingSink(numThreads);
ms.registerSink("Collector", ms.registerSink("collector",
"Collector of values from all threads.", sink); "Collector of values from all threads.", sink);
final TestSource[] sources = new TestSource[numThreads]; final TestSource[] sources = new TestSource[numThreads];
final Thread[] threads = new Thread[numThreads]; final Thread[] threads = new Thread[numThreads];
@ -280,10 +281,10 @@ public void flush() {
@Test public void testHangingSink() { @Test public void testHangingSink() {
new ConfigBuilder().add("*.period", 8) new ConfigBuilder().add("*.period", 8)
.add("Test.sink.test.class", TestSink.class.getName()) .add("test.sink.test.class", TestSink.class.getName())
.add("Test.sink.hanging.retry.delay", "1") .add("test.sink.hanging.retry.delay", "1")
.add("Test.sink.hanging.retry.backoff", "1.01") .add("test.sink.hanging.retry.backoff", "1.01")
.add("Test.sink.hanging.retry.count", "0") .add("test.sink.hanging.retry.count", "0")
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); .save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms = new MetricsSystemImpl("Test"); MetricsSystemImpl ms = new MetricsSystemImpl("Test");
ms.start(); ms.start();
@ -379,6 +380,23 @@ public void flush() {
ms.shutdown(); ms.shutdown();
} }
@Test public void testUnregisterSource() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts1 = new TestSource("ts1");
TestSource ts2 = new TestSource("ts2");
ms.register("ts1", "", ts1);
ms.register("ts2", "", ts2);
MetricsSource s1 = ms.getSource("ts1");
assertNotNull(s1);
// should work when metrics system is not started
ms.unregisterSource("ts1");
s1 = ms.getSource("ts1");
assertNull(s1);
MetricsSource s2 = ms.getSource("ts2");
assertNotNull(s2);
ms.shutdown();
}
private void checkMetricsRecords(List<MetricsRecord> recs) { private void checkMetricsRecords(List<MetricsRecord> recs) {
LOG.debug(recs); LOG.debug(recs);
MetricsRecord r = recs.get(0); MetricsRecord r = recs.get(0);

View File

@ -127,6 +127,22 @@ public void testTransientProviderOnlyConfig() throws Exception {
"CredentialProviders configured.")); "CredentialProviders configured."));
} }
@Test
public void testPromptForCredentialWithEmptyPasswd() throws Exception {
String[] args1 = {"create", "credential1", "--provider",
"jceks://file" + tmpDir + "/credstore.jceks"};
ArrayList<String> passwords = new ArrayList<String>();
passwords.add(null);
passwords.add("p@ssw0rd");
int rc = 0;
CredentialShell shell = new CredentialShell();
shell.setConf(new Configuration());
shell.setPasswordReader(new MockPasswordReader(passwords));
rc = shell.run(args1);
assertEquals(outContent.toString(), -1, rc);
assertTrue(outContent.toString().contains("Passwords don't match"));
}
@Test @Test
public void testPromptForCredential() throws Exception { public void testPromptForCredential() throws Exception {
String[] args1 = {"create", "credential1", "--provider", String[] args1 = {"create", "credential1", "--provider",
@ -142,7 +158,7 @@ public void testPromptForCredential() throws Exception {
assertEquals(0, rc); assertEquals(0, rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + assertTrue(outContent.toString().contains("credential1 has been successfully " +
"created.")); "created."));
String[] args2 = {"delete", "credential1", "--provider", String[] args2 = {"delete", "credential1", "--provider",
"jceks://file" + tmpDir + "/credstore.jceks"}; "jceks://file" + tmpDir + "/credstore.jceks"};
rc = shell.run(args2); rc = shell.run(args2);
@ -162,7 +178,7 @@ public MockPasswordReader(List<String> passwds) {
public char[] readPassword(String prompt) { public char[] readPassword(String prompt) {
if (passwords.size() == 0) return null; if (passwords.size() == 0) return null;
String pass = passwords.remove(0); String pass = passwords.remove(0);
return pass.toCharArray(); return pass == null ? null : pass.toCharArray();
} }
@Override @Override

View File

@ -28,8 +28,6 @@
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/** /**
* Provides access to the <code>AccessControlList</code>s used by KMS, * Provides access to the <code>AccessControlList</code>s used by KMS,
@ -52,13 +50,11 @@ public String getConfigKey() {
public static final int RELOADER_SLEEP_MILLIS = 1000; public static final int RELOADER_SLEEP_MILLIS = 1000;
Map<Type, AccessControlList> acls; private volatile Map<Type, AccessControlList> acls;
private ReadWriteLock lock;
private ScheduledExecutorService executorService; private ScheduledExecutorService executorService;
private long lastReload; private long lastReload;
KMSACLs(Configuration conf) { KMSACLs(Configuration conf) {
lock = new ReentrantReadWriteLock();
if (conf == null) { if (conf == null) {
conf = loadACLs(); conf = loadACLs();
} }
@ -70,17 +66,13 @@ public KMSACLs() {
} }
private void setACLs(Configuration conf) { private void setACLs(Configuration conf) {
lock.writeLock().lock(); Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
try { for (Type aclType : Type.values()) {
acls = new HashMap<Type, AccessControlList>(); String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
for (Type aclType : Type.values()) { tempAcls.put(aclType, new AccessControlList(aclStr));
String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT); LOG.info("'{}' ACL '{}'", aclType, aclStr);
acls.put(aclType, new AccessControlList(aclStr));
LOG.info("'{}' ACL '{}'", aclType, aclStr);
}
} finally {
lock.writeLock().unlock();
} }
acls = tempAcls;
} }
@Override @Override
@ -120,14 +112,7 @@ private Configuration loadACLs() {
public boolean hasAccess(Type type, String user) { public boolean hasAccess(Type type, String user) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
AccessControlList acl = null; return acls.get(type).isUserAllowed(ugi);
lock.readLock().lock();
try {
acl = acls.get(type);
} finally {
lock.readLock().unlock();
}
return acl.isUserAllowed(ugi);
} }
} }

View File

@ -19,12 +19,16 @@
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer; import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.oncrpc.SimpleUdpServer; import org.apache.hadoop.oncrpc.SimpleUdpServer;
import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.ShutdownHookManager;
import static org.apache.hadoop.util.ExitUtil.terminate;
/** /**
* Main class for starting mountd daemon. This daemon implements the NFS * Main class for starting mountd daemon. This daemon implements the NFS
* mount protocol. When receiving a MOUNT request from an NFS client, it checks * mount protocol. When receiving a MOUNT request from an NFS client, it checks
@ -33,6 +37,7 @@
* handle for requested directory and returns it to the client. * handle for requested directory and returns it to the client.
*/ */
abstract public class MountdBase { abstract public class MountdBase {
public static final Log LOG = LogFactory.getLog(MountdBase.class);
private final RpcProgram rpcProgram; private final RpcProgram rpcProgram;
private int udpBoundPort; // Will set after server starts private int udpBoundPort; // Will set after server starts
private int tcpBoundPort; // Will set after server starts private int tcpBoundPort; // Will set after server starts
@ -40,11 +45,11 @@ abstract public class MountdBase {
public RpcProgram getRpcProgram() { public RpcProgram getRpcProgram() {
return rpcProgram; return rpcProgram;
} }
/** /**
* Constructor * Constructor
* @param program * @param program
* @throws IOException * @throws IOException
*/ */
public MountdBase(RpcProgram program) throws IOException { public MountdBase(RpcProgram program) throws IOException {
rpcProgram = program; rpcProgram = program;
@ -74,11 +79,16 @@ public void start(boolean register) {
if (register) { if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(), ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY); SHUTDOWN_HOOK_PRIORITY);
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort); try {
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort); rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
} catch (Throwable e) {
LOG.fatal("Failed to start the server. Cause:", e);
terminate(1, e);
}
} }
} }
/** /**
* Priority of the mountd shutdown hook. * Priority of the mountd shutdown hook.
*/ */
@ -91,5 +101,5 @@ public synchronized void run() {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort); rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
} }
} }
} }

View File

@ -71,7 +71,16 @@ public static synchronized NfsExports getInstance(Configuration conf) {
private static final Pattern CIDR_FORMAT_LONG = private static final Pattern CIDR_FORMAT_LONG =
Pattern.compile(SLASH_FORMAT_LONG); Pattern.compile(SLASH_FORMAT_LONG);
// Hostnames are composed of series of 'labels' concatenated with dots.
// Labels can be between 1-63 characters long, and can only take
// letters, digits & hyphens. They cannot start and end with hyphens. For
// more details, refer RFC-1123 & http://en.wikipedia.org/wiki/Hostname
private static final String LABEL_FORMAT =
"[a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?";
private static final Pattern HOSTNAME_FORMAT =
Pattern.compile("^(" + LABEL_FORMAT + "\\.)*" + LABEL_FORMAT + "$");
static class AccessCacheEntry implements LightWeightCache.Entry{ static class AccessCacheEntry implements LightWeightCache.Entry{
private final String hostAddr; private final String hostAddr;
private AccessPrivilege access; private AccessPrivilege access;
@ -381,10 +390,14 @@ private static Match getMatch(String line) {
LOG.debug("Using Regex match for '" + host + "' and " + privilege); LOG.debug("Using Regex match for '" + host + "' and " + privilege);
} }
return new RegexMatch(privilege, host); return new RegexMatch(privilege, host);
} else if (HOSTNAME_FORMAT.matcher(host).matches()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using exact match for '" + host + "' and " + privilege);
}
return new ExactMatch(privilege, host);
} else {
throw new IllegalArgumentException("Invalid hostname provided '" + host
+ "'");
} }
if (LOG.isDebugEnabled()) {
LOG.debug("Using exact match for '" + host + "' and " + privilege);
}
return new ExactMatch(privilege, host);
} }
} }

View File

@ -25,6 +25,8 @@
import org.apache.hadoop.portmap.PortmapMapping; import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.ShutdownHookManager;
import static org.apache.hadoop.util.ExitUtil.terminate;
/** /**
* Nfs server. Supports NFS v3 using {@link RpcProgram}. * Nfs server. Supports NFS v3 using {@link RpcProgram}.
* Currently Mountd program is also started inside this class. * Currently Mountd program is also started inside this class.
@ -34,7 +36,7 @@ public abstract class Nfs3Base {
public static final Log LOG = LogFactory.getLog(Nfs3Base.class); public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
private final RpcProgram rpcProgram; private final RpcProgram rpcProgram;
private int nfsBoundPort; // Will set after server starts private int nfsBoundPort; // Will set after server starts
public RpcProgram getRpcProgram() { public RpcProgram getRpcProgram() {
return rpcProgram; return rpcProgram;
} }
@ -46,11 +48,16 @@ protected Nfs3Base(RpcProgram rpcProgram, Configuration conf) {
public void start(boolean register) { public void start(boolean register) {
startTCPServer(); // Start TCP server startTCPServer(); // Start TCP server
if (register) { if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(), ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY); SHUTDOWN_HOOK_PRIORITY);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort); try {
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
} catch (Throwable e) {
LOG.fatal("Failed to start the server. Cause:", e);
terminate(1, e);
}
} }
} }
@ -61,7 +68,7 @@ private void startTCPServer() {
tcpServer.run(); tcpServer.run();
nfsBoundPort = tcpServer.getBoundPort(); nfsBoundPort = tcpServer.getBoundPort();
} }
/** /**
* Priority of the nfsd shutdown hook. * Priority of the nfsd shutdown hook.
*/ */

View File

@ -131,7 +131,7 @@ protected void register(PortmapMapping mapEntry, boolean set) {
} catch (IOException e) { } catch (IOException e) {
String request = set ? "Registration" : "Unregistration"; String request = set ? "Registration" : "Unregistration";
LOG.error(request + " failure with " + host + ":" + port LOG.error(request + " failure with " + host + ":" + port
+ ", portmap entry: " + mapEntry, e); + ", portmap entry: " + mapEntry);
throw new RuntimeException(request + " failure", e); throw new RuntimeException(request + " failure", e);
} }
} }

View File

@ -60,6 +60,7 @@ public void run() throws IOException {
DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length, DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
IPAddress, port); IPAddress, port);
socket.send(sendPacket); socket.send(sendPacket);
socket.setSoTimeout(500);
DatagramPacket receivePacket = new DatagramPacket(receiveData, DatagramPacket receivePacket = new DatagramPacket(receiveData,
receiveData.length); receiveData.length);
socket.receive(receivePacket); socket.receive(receivePacket);

View File

@ -194,4 +194,16 @@ public void testMultiMatchers() throws Exception {
} while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000); } while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
Assert.assertEquals(AccessPrivilege.NONE, ap); Assert.assertEquals(AccessPrivilege.NONE, ap);
} }
@Test(expected=IllegalArgumentException.class)
public void testInvalidHost() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"foo#bar");
}
@Test(expected=IllegalArgumentException.class)
public void testInvalidSeparator() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"foo ro : bar rw");
}
} }

View File

@ -154,6 +154,8 @@ public static int getAccessRights(int mode, int type) {
if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) { if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
if (type == NfsFileType.NFSREG.toValue()) { if (type == NfsFileType.NFSREG.toValue()) {
rtn |= Nfs3Constant.ACCESS3_EXECUTE; rtn |= Nfs3Constant.ACCESS3_EXECUTE;
} else {
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
} }
} }
return rtn; return rtn;

View File

@ -68,5 +68,12 @@ public void testGetAccessRightsForUserGroup() throws IOException {
0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr)); 0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr));
assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match", assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",
0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr)); 0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
Mockito.when(attr.getMode()).thenReturn(457); // 711
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
assertEquals("Access should be allowed for dir as mode is 711 and GID matches",
2 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 11}, attr));
} }
} }

View File

@ -23,6 +23,8 @@ Trunk (Unreleased)
HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs. HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs.
(wheat9) (wheat9)
HDFS-2538. option to disable fsck dots (Mohammad Kamrul Islam via aw)
NEW FEATURES NEW FEATURES
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh) HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@ -298,8 +300,13 @@ Release 2.6.0 - UNRELEASED
HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc. HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
(cnauroth) (cnauroth)
HDFS-5624. Add HDFS tests for ACLs in combination with viewfs.
(Stephen Chu via cnauroth)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
BUG FIXES BUG FIXES
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
@ -314,6 +321,25 @@ Release 2.6.0 - UNRELEASED
HADOOP-8158. Interrupting hadoop fs -put from the command line HADOOP-8158. Interrupting hadoop fs -put from the command line
causes a LeaseExpiredException. (daryn via harsh) causes a LeaseExpiredException. (daryn via harsh)
HDFS-6678. MiniDFSCluster may still be partially running after initialization
fails. (cnauroth)
HDFS-5809. BlockPoolSliceScanner and high speed hdfs appending make
datanode to drop into infinite loop (cmccabe)
HDFS-6456. NFS should throw error for invalid entry in
dfs.nfs.exports.allowed.hosts (Abhiraj Butala via brandonli)
HDFS-6689. NFS doesn't return correct lookup access for direcories (brandonli)
HDFS-6478. RemoteException can't be retried properly for non-HA scenario.
(Ming Ma via jing9)
HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb )
HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails
with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -836,6 +862,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted
file present in snapshot (kihwal) file present in snapshot (kihwal)
HDFS-6378. NFS registration should timeout instead of hanging when
portmap/rpcbind is not available (Abhiraj Butala via brandonli)
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)

View File

@ -26,7 +26,6 @@
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
@ -38,14 +37,13 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo; import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
@ -259,12 +257,11 @@ public static boolean useLogicalUri(Configuration conf, URI nameNodeUri)
/** /**
* Parse the file system URI out of the provided token. * Parse the file system URI out of the provided token.
*/ */
public static URI getServiceUriFromToken(final String scheme, public static URI getServiceUriFromToken(final String scheme, Token<?> token) {
Token<?> token) {
String tokStr = token.getService().toString(); String tokStr = token.getService().toString();
final String prefix = buildTokenServicePrefixForLogicalUri(scheme);
if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) { if (tokStr.startsWith(prefix)) {
tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, ""); tokStr = tokStr.replaceFirst(prefix, "");
} }
return URI.create(scheme + "://" + tokStr); return URI.create(scheme + "://" + tokStr);
} }
@ -273,10 +270,13 @@ public static URI getServiceUriFromToken(final String scheme,
* Get the service name used in the delegation token for the given logical * Get the service name used in the delegation token for the given logical
* HA service. * HA service.
* @param uri the logical URI of the cluster * @param uri the logical URI of the cluster
* @param scheme the scheme of the corresponding FileSystem
* @return the service name * @return the service name
*/ */
public static Text buildTokenServiceForLogicalUri(URI uri) { public static Text buildTokenServiceForLogicalUri(final URI uri,
return new Text(HA_DT_SERVICE_PREFIX + uri.getHost()); final String scheme) {
return new Text(buildTokenServicePrefixForLogicalUri(scheme)
+ uri.getHost());
} }
/** /**
@ -286,7 +286,11 @@ public static Text buildTokenServiceForLogicalUri(URI uri) {
public static boolean isTokenForLogicalUri(Token<?> token) { public static boolean isTokenForLogicalUri(Token<?> token) {
return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX); return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
} }
public static String buildTokenServicePrefixForLogicalUri(String scheme) {
return HA_DT_SERVICE_PREFIX + scheme + ":";
}
/** /**
* Locate a delegation token associated with the given HA cluster URI, and if * Locate a delegation token associated with the given HA cluster URI, and if
* one is found, clone it to also represent the underlying namenode address. * one is found, clone it to also represent the underlying namenode address.
@ -298,7 +302,9 @@ public static boolean isTokenForLogicalUri(Token<?> token) {
public static void cloneDelegationTokenForLogicalUri( public static void cloneDelegationTokenForLogicalUri(
UserGroupInformation ugi, URI haUri, UserGroupInformation ugi, URI haUri,
Collection<InetSocketAddress> nnAddrs) { Collection<InetSocketAddress> nnAddrs) {
Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri); // this cloning logic is only used by hdfs
Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME);
Token<DelegationTokenIdentifier> haToken = Token<DelegationTokenIdentifier> haToken =
tokenSelector.selectToken(haService, ugi.getTokens()); tokenSelector.selectToken(haService, ugi.getTokens());
if (haToken != null) { if (haToken != null) {
@ -309,8 +315,9 @@ public static void cloneDelegationTokenForLogicalUri(
Token<DelegationTokenIdentifier> specificToken = Token<DelegationTokenIdentifier> specificToken =
new Token.PrivateToken<DelegationTokenIdentifier>(haToken); new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
SecurityUtil.setTokenService(specificToken, singleNNAddr); SecurityUtil.setTokenService(specificToken, singleNNAddr);
Text alias = Text alias = new Text(
new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService()); buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
+ "//" + specificToken.getService());
ugi.addToken(alias, specificToken); ugi.addToken(alias, specificToken);
LOG.debug("Mapped HA service delegation token for logical URI " + LOG.debug("Mapped HA service delegation token for logical URI " +
haUri + " to namenode " + singleNNAddr); haUri + " to namenode " + singleNNAddr);

View File

@ -163,7 +163,8 @@ public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
Text dtService; Text dtService;
if (failoverProxyProvider.useLogicalURI()) { if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
HdfsConstants.HDFS_URI_SCHEME);
} else { } else {
dtService = SecurityUtil.buildTokenService( dtService = SecurityUtil.buildTokenService(
NameNode.getAddress(nameNodeUri)); NameNode.getAddress(nameNodeUri));
@ -224,7 +225,8 @@ public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
new Class[] { xface }, dummyHandler); new Class[] { xface }, dummyHandler);
Text dtService; Text dtService;
if (failoverProxyProvider.useLogicalURI()) { if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
HdfsConstants.HDFS_URI_SCHEME);
} else { } else {
dtService = SecurityUtil.buildTokenService( dtService = SecurityUtil.buildTokenService(
NameNode.getAddress(nameNodeUri)); NameNode.getAddress(nameNodeUri));
@ -333,19 +335,18 @@ private static NamenodeProtocol createNNProxyWithNamenodeProtocol(
address, conf, ugi, NamenodeProtocolPB.class, 0); address, conf, ugi, NamenodeProtocolPB.class, 0);
if (withRetries) { // create the proxy with retries if (withRetries) { // create the proxy with retries
RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200, RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>(); = new HashMap<String, RetryPolicy>();
RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy, methodNameToPolicyMap.put("getBlocks", timeoutPolicy);
exceptionToPolicyMap); methodNameToPolicyMap.put("getAccessKeys", timeoutPolicy);
Map<String, RetryPolicy> methodNameToPolicyMap NamenodeProtocol translatorProxy =
= new HashMap<String, RetryPolicy>(); new NamenodeProtocolTranslatorPB(proxy);
methodNameToPolicyMap.put("getBlocks", methodPolicy); return (NamenodeProtocol) RetryProxy.create(
methodNameToPolicyMap.put("getAccessKeys", methodPolicy); NamenodeProtocol.class, translatorProxy, methodNameToPolicyMap);
proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class, } else {
proxy, methodNameToPolicyMap); return new NamenodeProtocolTranslatorPB(proxy);
} }
return new NamenodeProtocolTranslatorPB(proxy);
} }
private static ClientProtocol createNNProxyWithClientProtocol( private static ClientProtocol createNNProxyWithClientProtocol(
@ -379,29 +380,27 @@ private static ClientProtocol createNNProxyWithClientProtocol(
= new HashMap<Class<? extends Exception>, RetryPolicy>(); = new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy); createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
= new HashMap<Class<? extends Exception>, RetryPolicy>(); defaultPolicy, remoteExceptionToPolicyMap);
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(defaultPolicy,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
defaultPolicy, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>(); = new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy); methodNameToPolicyMap.put("create", methodPolicy);
proxy = (ClientNamenodeProtocolPB) RetryProxy.create( ClientProtocol translatorProxy =
ClientNamenodeProtocolPB.class, new ClientNamenodeProtocolTranslatorPB(proxy);
new DefaultFailoverProxyProvider<ClientNamenodeProtocolPB>( return (ClientProtocol) RetryProxy.create(
ClientNamenodeProtocolPB.class, proxy), ClientProtocol.class,
new DefaultFailoverProxyProvider<ClientProtocol>(
ClientProtocol.class, translatorProxy),
methodNameToPolicyMap, methodNameToPolicyMap,
defaultPolicy); defaultPolicy);
} else {
return new ClientNamenodeProtocolTranslatorPB(proxy);
} }
return new ClientNamenodeProtocolTranslatorPB(proxy);
} }
private static Object createNameNodeProxy(InetSocketAddress address, private static Object createNameNodeProxy(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi, Class<?> xface, Configuration conf, UserGroupInformation ugi, Class<?> xface,
int rpcTimeout) throws IOException { int rpcTimeout) throws IOException {

View File

@ -124,7 +124,7 @@ public static enum DatanodeReportType {
* of a delgation token, indicating that the URI is a logical (HA) * of a delgation token, indicating that the URI is a logical (HA)
* URI. * URI.
*/ */
public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:"; public static final String HA_DT_SERVICE_PREFIX = "ha-";
/** /**

View File

@ -97,7 +97,7 @@ public DatanodeProtocolClientSideTranslatorPB(InetSocketAddress nameNodeAddr,
RPC.setProtocolEngine(conf, DatanodeProtocolPB.class, RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
ProtobufRpcEngine.class); ProtobufRpcEngine.class);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi)); rpcProxy = createNamenode(nameNodeAddr, conf, ugi);
} }
private static DatanodeProtocolPB createNamenode( private static DatanodeProtocolPB createNamenode(
@ -109,33 +109,6 @@ private static DatanodeProtocolPB createNamenode(
org.apache.hadoop.ipc.Client.getPingInterval(conf), null).getProxy(); org.apache.hadoop.ipc.Client.getPingInterval(conf), null).getProxy();
} }
/** Create a {@link NameNode} proxy */
static DatanodeProtocolPB createNamenodeWithRetry(
DatanodeProtocolPB rpcNamenode) {
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
return (DatanodeProtocolPB) RetryProxy.create(DatanodeProtocolPB.class,
rpcNamenode, methodNameToPolicyMap);
}
@Override @Override
public void close() throws IOException { public void close() throws IOException {
RPC.stopProxy(rpcProxy); RPC.stopProxy(rpcProxy);

View File

@ -47,6 +47,7 @@
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.ipc.RpcClientUtil;
@ -61,7 +62,7 @@
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Stable @InterfaceStability.Stable
public class NamenodeProtocolTranslatorPB implements NamenodeProtocol, public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
ProtocolMetaInterface, Closeable { ProtocolMetaInterface, Closeable, ProtocolTranslator {
/** RpcController is not used and hence is set to null */ /** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null; private final static RpcController NULL_CONTROLLER = null;
@ -88,6 +89,11 @@ public void close() {
RPC.stopProxy(rpcProxy); RPC.stopProxy(rpcProxy);
} }
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
@Override @Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException { throws IOException {

View File

@ -310,18 +310,11 @@ void deleteBlocks(Block[] blocks) {
} }
} }
private synchronized void updateScanStatus(Block block, private synchronized void updateScanStatus(BlockScanInfo info,
ScanType type, ScanType type,
boolean scanOk) { boolean scanOk) {
BlockScanInfo info = blockMap.get(block); delBlockInfo(info);
if ( info != null ) {
delBlockInfo(info);
} else {
// It might already be removed. Thats ok, it will be caught next time.
info = new BlockScanInfo(block);
}
long now = Time.monotonicNow(); long now = Time.monotonicNow();
info.lastScanType = type; info.lastScanType = type;
info.lastScanTime = now; info.lastScanTime = now;
@ -334,8 +327,8 @@ private synchronized void updateScanStatus(Block block,
} }
if (verificationLog != null) { if (verificationLog != null) {
verificationLog.append(now, block.getGenerationStamp(), verificationLog.append(now, info.getGenerationStamp(),
block.getBlockId()); info.getBlockId());
} }
} }
@ -434,11 +427,13 @@ void verifyBlock(ExtendedBlock block) {
totalTransientErrors++; totalTransientErrors++;
} }
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, true); updateScanStatus((BlockScanInfo)block.getLocalBlock(),
ScanType.VERIFICATION_SCAN, true);
return; return;
} catch (IOException e) { } catch (IOException e) {
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false); updateScanStatus((BlockScanInfo)block.getLocalBlock(),
ScanType.VERIFICATION_SCAN, false);
// If the block does not exists anymore, then its not an error // If the block does not exists anymore, then its not an error
if (!dataset.contains(block)) { if (!dataset.contains(block)) {
@ -497,7 +492,7 @@ private synchronized boolean isFirstBlockProcessed() {
// Picks one block and verifies it // Picks one block and verifies it
private void verifyFirstBlock() { private void verifyFirstBlock() {
Block block = null; BlockScanInfo block = null;
synchronized (this) { synchronized (this) {
if (!blockInfoSet.isEmpty()) { if (!blockInfoSet.isEmpty()) {
block = blockInfoSet.first(); block = blockInfoSet.first();

View File

@ -128,7 +128,8 @@ private void init(final UserGroupInformation ugi,
"://" + nnId); "://" + nnId);
boolean isLogical = HAUtil.isLogicalUri(conf, nnUri); boolean isLogical = HAUtil.isLogicalUri(conf, nnUri);
if (isLogical) { if (isLogical) {
token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri)); token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri,
HdfsConstants.HDFS_URI_SCHEME));
} else { } else {
token.setService(SecurityUtil.buildTokenService(nnUri)); token.setService(SecurityUtil.buildTokenService(nnUri));
} }

View File

@ -126,6 +126,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
private boolean showBlocks = false; private boolean showBlocks = false;
private boolean showLocations = false; private boolean showLocations = false;
private boolean showRacks = false; private boolean showRacks = false;
private boolean showprogress = false;
private boolean showCorruptFileBlocks = false; private boolean showCorruptFileBlocks = false;
/** /**
@ -203,6 +204,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
else if (key.equals("blocks")) { this.showBlocks = true; } else if (key.equals("blocks")) { this.showBlocks = true; }
else if (key.equals("locations")) { this.showLocations = true; } else if (key.equals("locations")) { this.showLocations = true; }
else if (key.equals("racks")) { this.showRacks = true; } else if (key.equals("racks")) { this.showRacks = true; }
else if (key.equals("showprogress")) { this.showprogress = true; }
else if (key.equals("openforwrite")) {this.showOpenFiles = true; } else if (key.equals("openforwrite")) {this.showOpenFiles = true; }
else if (key.equals("listcorruptfileblocks")) { else if (key.equals("listcorruptfileblocks")) {
this.showCorruptFileBlocks = true; this.showCorruptFileBlocks = true;
@ -381,10 +383,13 @@ void check(String parent, HdfsFileStatus file, Result res) throws IOException {
} else if (showFiles) { } else if (showFiles) {
out.print(path + " " + fileLen + " bytes, " + out.print(path + " " + fileLen + " bytes, " +
blocks.locatedBlockCount() + " block(s): "); blocks.locatedBlockCount() + " block(s): ");
} else { } else if (showprogress) {
out.print('.'); out.print('.');
} }
if (res.totalFiles % 100 == 0) { out.println(); out.flush(); } if ((showprogress) && res.totalFiles % 100 == 0) {
out.println();
out.flush();
}
int missing = 0; int missing = 0;
int corrupt = 0; int corrupt = 0;
long missize = 0; long missize = 0;

View File

@ -19,24 +19,30 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.util.List; import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode;
import com.google.common.collect.ImmutableList;
/** /**
* XAttrStorage is used to read and set xattrs for an inode. * XAttrStorage is used to read and set xattrs for an inode.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class XAttrStorage { public class XAttrStorage {
private static final Map<String, String> internedNames = Maps.newHashMap();
/** /**
* Reads the existing extended attributes of an inode. If the * Reads the existing extended attributes of an inode. If the
* inode does not have an <code>XAttr</code>, then this method * inode does not have an <code>XAttr</code>, then this method
* returns an empty list. * returns an empty list.
* <p/>
* Must be called while holding the FSDirectory read lock.
*
* @param inode INode to read * @param inode INode to read
* @param snapshotId * @param snapshotId
* @return List<XAttr> <code>XAttr</code> list. * @return List<XAttr> <code>XAttr</code> list.
@ -48,6 +54,9 @@ public static List<XAttr> readINodeXAttrs(INode inode, int snapshotId) {
/** /**
* Reads the existing extended attributes of an inode. * Reads the existing extended attributes of an inode.
* <p/>
* Must be called while holding the FSDirectory read lock.
*
* @param inode INode to read. * @param inode INode to read.
* @return List<XAttr> <code>XAttr</code> list. * @return List<XAttr> <code>XAttr</code> list.
*/ */
@ -58,6 +67,9 @@ public static List<XAttr> readINodeXAttrs(INode inode) {
/** /**
* Update xattrs of inode. * Update xattrs of inode.
* <p/>
* Must be called while holding the FSDirectory write lock.
*
* @param inode INode to update * @param inode INode to update
* @param xAttrs to update xAttrs. * @param xAttrs to update xAttrs.
* @param snapshotId id of the latest snapshot of the inode * @param snapshotId id of the latest snapshot of the inode
@ -70,8 +82,24 @@ public static void updateINodeXAttrs(INode inode,
} }
return; return;
} }
// Dedupe the xAttr name and save them into a new interned list
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(xAttrs); List<XAttr> internedXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
for (XAttr xAttr : xAttrs) {
final String name = xAttr.getName();
String internedName = internedNames.get(name);
if (internedName == null) {
internedName = name;
internedNames.put(internedName, internedName);
}
XAttr internedXAttr = new XAttr.Builder()
.setName(internedName)
.setNameSpace(xAttr.getNameSpace())
.setValue(xAttr.getValue())
.build();
internedXAttrs.add(internedXAttr);
}
// Save the list of interned xattrs
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(internedXAttrs);
if (inode.getXAttrFeature() != null) { if (inode.getXAttrFeature() != null) {
inode.removeXAttrFeature(snapshotId); inode.removeXAttrFeature(snapshotId);
} }

View File

@ -77,7 +77,7 @@ public class DFSck extends Configured implements Tool {
private static final String USAGE = "Usage: DFSck <path> " private static final String USAGE = "Usage: DFSck <path> "
+ "[-list-corruptfileblocks | " + "[-list-corruptfileblocks | "
+ "[-move | -delete | -openforwrite] " + "[-move | -delete | -openforwrite] "
+ "[-files [-blocks [-locations | -racks]]]]\n" + "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
+ "\t<path>\tstart checking from this path\n" + "\t<path>\tstart checking from this path\n"
+ "\t-move\tmove corrupted files to /lost+found\n" + "\t-move\tmove corrupted files to /lost+found\n"
+ "\t-delete\tdelete corrupted files\n" + "\t-delete\tdelete corrupted files\n"
@ -90,7 +90,8 @@ public class DFSck extends Configured implements Tool {
+ "blocks and files they belong to\n" + "blocks and files they belong to\n"
+ "\t-blocks\tprint out block report\n" + "\t-blocks\tprint out block report\n"
+ "\t-locations\tprint out locations for every block\n" + "\t-locations\tprint out locations for every block\n"
+ "\t-racks\tprint out network topology for data-node locations\n\n" + "\t-racks\tprint out network topology for data-node locations\n"
+ "\t-showprogress\tshow progress in output. Default is OFF (no progress)\n\n"
+ "Please Note:\n" + "Please Note:\n"
+ "\t1. By default fsck ignores files opened for write, " + "\t1. By default fsck ignores files opened for write, "
+ "use -openforwrite to report such files. They are usually " + "use -openforwrite to report such files. They are usually "
@ -270,6 +271,7 @@ private int doWork(final String[] args) throws IOException {
else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); } else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
else if (args[idx].equals("-locations")) { url.append("&locations=1"); } else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
else if (args[idx].equals("-racks")) { url.append("&racks=1"); } else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
else if (args[idx].equals("-showprogress")) { url.append("&showprogress=1"); }
else if (args[idx].equals("-list-corruptfileblocks")) { else if (args[idx].equals("-list-corruptfileblocks")) {
url.append("&listcorruptfileblocks=1"); url.append("&listcorruptfileblocks=1");
doListCorruptFileBlocks = true; doListCorruptFileBlocks = true;

View File

@ -158,7 +158,7 @@ public synchronized void initialize(URI uri, Configuration conf
// getCanonicalUri() in order to handle the case where no port is // getCanonicalUri() in order to handle the case where no port is
// specified in the URI // specified in the URI
this.tokenServiceName = isLogicalUri ? this.tokenServiceName = isLogicalUri ?
HAUtil.buildTokenServiceForLogicalUri(uri) HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
: SecurityUtil.buildTokenService(getCanonicalUri()); : SecurityUtil.buildTokenService(getCanonicalUri());
if (!isHA) { if (!isHA) {

View File

@ -0,0 +1,190 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
/**
* Verify ACL through ViewFileSystem functionality.
*/
public class TestViewFileSystemWithAcls {
private static MiniDFSCluster cluster;
private static Configuration clusterConf = new Configuration();
private static FileSystem fHdfs;
private static FileSystem fHdfs2;
private FileSystem fsView;
private Configuration fsViewConf;
private FileSystem fsTarget, fsTarget2;
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
private FileSystemTestHelper fileSystemTestHelper =
new FileSystemTestHelper("/tmp/TestViewFileSystemWithAcls");
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem(0);
fHdfs2 = cluster.getFileSystem(1);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Before
public void setUp() throws Exception {
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
fsTarget.delete(targetTestRoot, true);
fsTarget2.delete(targetTestRoot2, true);
fsTarget.mkdirs(targetTestRoot);
fsTarget2.mkdirs(targetTestRoot2);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
}
private void setupMountPoints() {
mountOnNn1 = new Path("/mountOnNn1");
mountOnNn2 = new Path("/mountOnNn2");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
@After
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
}
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the ACL operations to the correct NameNode.
*/
@Test
public void testAclOnMountEntry() throws Exception {
// Set ACLs on the first namespace and verify they are correct
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fsView.setAcl(mountOnNn1, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
// Double-check by getting ACL status using FileSystem
// instead of ViewFs
assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
// Modify the ACL entries on the first namespace
aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ));
fsView.modifyAclEntries(mountOnNn1, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(DEFAULT, USER, READ_WRITE),
aclEntry(DEFAULT, USER, "foo", READ),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, MASK, READ),
aclEntry(DEFAULT, OTHER, NONE) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
fsView.removeDefaultAcl(mountOnNn1);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
// Paranoid check: verify the other namespace does not
// have ACLs set on the same path.
assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
// Remove the ACL entries on the first namespace
fsView.removeAcl(mountOnNn1);
assertEquals(0, fsView.getAclStatus(mountOnNn1).getEntries().size());
assertEquals(0, fHdfs.getAclStatus(targetTestRoot).getEntries().size());
// Now set ACLs on the second namespace
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ));
fsView.modifyAclEntries(mountOnNn2, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bar", READ),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn2)));
assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
// Remove the ACL entries on the second namespace
fsView.removeAclEntries(mountOnNn2, Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ)
));
expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
fsView.removeAcl(mountOnNn2);
assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
}
private AclEntry[] aclEntryArray(AclStatus aclStatus) {
return aclStatus.getEntries().toArray(new AclEntry[0]);
}
}

View File

@ -0,0 +1,190 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.List;
import java.io.IOException;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.fs.permission.FsAction.NONE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
/**
* Verify ACL through ViewFs functionality.
*/
public class TestViewFsWithAcls {
private static MiniDFSCluster cluster;
private static Configuration clusterConf = new Configuration();
private static FileContext fc, fc2;
private FileContext fcView, fcTarget, fcTarget2;
private Configuration fsViewConf;
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
private FileContextTestHelper fileContextTestHelper =
new FileContextTestHelper("/tmp/TestViewFsWithAcls");
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Before
public void setUp() throws Exception {
fcTarget = fc;
fcTarget2 = fc2;
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
fcTarget.delete(targetTestRoot, true);
fcTarget2.delete(targetTestRoot2, true);
fcTarget.mkdir(targetTestRoot, new FsPermission((short)0750), true);
fcTarget2.mkdir(targetTestRoot2, new FsPermission((short)0750), true);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
}
private void setupMountPoints() {
mountOnNn1 = new Path("/mountOnNn1");
mountOnNn2 = new Path("/mountOnNn2");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
@After
public void tearDown() throws Exception {
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
}
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the ACL operations to the correct NameNode.
*/
@Test
public void testAclOnMountEntry() throws Exception {
// Set ACLs on the first namespace and verify they are correct
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fcView.setAcl(mountOnNn1, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
// Double-check by getting ACL status using FileSystem
// instead of ViewFs
assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
// Modify the ACL entries on the first namespace
aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ));
fcView.modifyAclEntries(mountOnNn1, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(DEFAULT, USER, READ_WRITE),
aclEntry(DEFAULT, USER, "foo", READ),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, MASK, READ),
aclEntry(DEFAULT, OTHER, NONE) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
fcView.removeDefaultAcl(mountOnNn1);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
// Paranoid check: verify the other namespace does not
// have ACLs set on the same path.
assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
// Remove the ACL entries on the first namespace
fcView.removeAcl(mountOnNn1);
assertEquals(0, fcView.getAclStatus(mountOnNn1).getEntries().size());
assertEquals(0, fc.getAclStatus(targetTestRoot).getEntries().size());
// Now set ACLs on the second namespace
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ));
fcView.modifyAclEntries(mountOnNn2, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bar", READ),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn2)));
assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
// Remove the ACL entries on the second namespace
fcView.removeAclEntries(mountOnNn2, Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ)
));
expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
fcView.removeAcl(mountOnNn2);
assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
}
private AclEntry[] aclEntryArray(AclStatus aclStatus) {
return aclStatus.getEntries().toArray(new AclEntry[0]);
}
}

View File

@ -663,73 +663,81 @@ private void initMiniDFSCluster(
boolean checkDataNodeHostConfig, boolean checkDataNodeHostConfig,
Configuration[] dnConfOverlays) Configuration[] dnConfOverlays)
throws IOException { throws IOException {
ExitUtil.disableSystemExit(); boolean success = false;
synchronized (MiniDFSCluster.class) {
instanceId = instanceCount++;
}
this.conf = conf;
base_dir = new File(determineDfsBaseDir());
data_dir = new File(base_dir, "data");
this.waitSafeMode = waitSafeMode;
this.checkExitOnShutdown = checkExitOnShutdown;
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
int safemodeExtension = conf.getInt(
DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
StaticMapping.class, DNSToSwitchMapping.class);
// In an HA cluster, in order for the StandbyNode to perform checkpoints,
// it needs to know the HTTP port of the Active. So, if ephemeral ports
// are chosen, disable checkpoints for the test.
if (!nnTopology.allHttpPortsSpecified() &&
nnTopology.isHA()) {
LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
"since no HTTP ports have been specified.");
conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
}
if (!nnTopology.allIpcPortsSpecified() &&
nnTopology.isHA()) {
LOG.info("MiniDFSCluster disabling log-roll triggering in the "
+ "Standby node since no IPC ports have been specified.");
conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
}
federation = nnTopology.isFederated();
try { try {
createNameNodesAndSetConf( ExitUtil.disableSystemExit();
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
enableManagedDfsDirsRedundancy, synchronized (MiniDFSCluster.class) {
format, startOpt, clusterId, conf); instanceId = instanceCount++;
} catch (IOException ioe) { }
LOG.error("IOE creating namenodes. Permissions dump:\n" +
createPermissionsDiagnosisString(data_dir)); this.conf = conf;
throw ioe; base_dir = new File(determineDfsBaseDir());
} data_dir = new File(base_dir, "data");
if (format) { this.waitSafeMode = waitSafeMode;
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) { this.checkExitOnShutdown = checkExitOnShutdown;
throw new IOException("Cannot remove data directory: " + data_dir +
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
int safemodeExtension = conf.getInt(
DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
StaticMapping.class, DNSToSwitchMapping.class);
// In an HA cluster, in order for the StandbyNode to perform checkpoints,
// it needs to know the HTTP port of the Active. So, if ephemeral ports
// are chosen, disable checkpoints for the test.
if (!nnTopology.allHttpPortsSpecified() &&
nnTopology.isHA()) {
LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
"since no HTTP ports have been specified.");
conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
}
if (!nnTopology.allIpcPortsSpecified() &&
nnTopology.isHA()) {
LOG.info("MiniDFSCluster disabling log-roll triggering in the "
+ "Standby node since no IPC ports have been specified.");
conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
}
federation = nnTopology.isFederated();
try {
createNameNodesAndSetConf(
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
enableManagedDfsDirsRedundancy,
format, startOpt, clusterId, conf);
} catch (IOException ioe) {
LOG.error("IOE creating namenodes. Permissions dump:\n" +
createPermissionsDiagnosisString(data_dir)); createPermissionsDiagnosisString(data_dir));
throw ioe;
}
if (format) {
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
throw new IOException("Cannot remove data directory: " + data_dir +
createPermissionsDiagnosisString(data_dir));
}
}
if (startOpt == StartupOption.RECOVER) {
return;
}
// Start the DataNodes
startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
dnStartOpt != null ? dnStartOpt : startOpt,
racks, hosts, simulatedCapacities, setupHostsFile,
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
waitClusterUp();
//make sure ProxyUsers uses the latest conf
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
success = true;
} finally {
if (!success) {
shutdown();
} }
} }
if (startOpt == StartupOption.RECOVER) {
return;
}
// Start the DataNodes
startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
dnStartOpt != null ? dnStartOpt : startOpt,
racks, hosts, simulatedCapacities, setupHostsFile,
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
waitClusterUp();
//make sure ProxyUsers uses the latest conf
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
} }
/** /**

View File

@ -30,6 +30,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -79,6 +81,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -97,6 +100,8 @@ public class TestFileCreation {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
} }
private static final String RPC_DETAILED_METRICS =
"RpcDetailedActivityForPort";
static final long seed = 0xDEADBEEFL; static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192; static final int blockSize = 8192;
@ -371,7 +376,7 @@ public void testOverwriteOpenForWrite() throws Exception {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting( UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
"testuser", new String[]{"testgroup"}); "testuser", new String[]{"testgroup"});
FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() { FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@ -380,12 +385,16 @@ public FileSystem run() throws Exception {
return FileSystem.get(cluster.getConfiguration(0)); return FileSystem.get(cluster.getConfiguration(0));
} }
}); });
String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();
try { try {
Path p = new Path("/testfile"); Path p = new Path("/testfile");
FSDataOutputStream stm1 = fs.create(p); FSDataOutputStream stm1 = fs.create(p);
stm1.write(1); stm1.write(1);
assertCounter("CreateNumOps", 1L, getMetrics(metricsName));
// Create file again without overwrite // Create file again without overwrite
try { try {
fs2.create(p, false); fs2.create(p, false);
@ -394,7 +403,9 @@ public FileSystem run() throws Exception {
GenericTestUtils.assertExceptionContains("already being created by", GenericTestUtils.assertExceptionContains("already being created by",
abce); abce);
} }
// NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
assertCounter("AlreadyBeingCreatedExceptionNumOps",
6L, getMetrics(metricsName));
FSDataOutputStream stm2 = fs2.create(p, true); FSDataOutputStream stm2 = fs2.create(p, true);
stm2.write(2); stm2.write(2);
stm2.close(); stm2.close();

View File

@ -25,14 +25,16 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -76,16 +78,22 @@ public static void tearDown() throws Exception {
@Test @Test
public void testNamenodeProtocol() throws IOException { public void testNamenodeProtocol() throws IOException {
NamenodeProtocolTranslatorPB translator = NamenodeProtocol np =
(NamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(conf, NameNodeProxies.createNonHAProxy(conf,
nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy(); true).getProxy();
boolean exists = translator.isMethodSupported("rollEditLog");
boolean exists = RpcClientUtil.isMethodSupported(np,
NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
assertTrue(exists); assertTrue(exists);
exists = translator.isMethodSupported("bogusMethod"); exists = RpcClientUtil.isMethodSupported(np,
NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
assertFalse(exists); assertFalse(exists);
} }
@Test @Test
public void testDatanodeProtocol() throws IOException { public void testDatanodeProtocol() throws IOException {
DatanodeProtocolClientSideTranslatorPB translator = DatanodeProtocolClientSideTranslatorPB translator =
@ -107,16 +115,18 @@ public void testClientDatanodeProtocol() throws IOException {
NetUtils.getDefaultSocketFactory(conf)); NetUtils.getDefaultSocketFactory(conf));
assertTrue(translator.isMethodSupported("refreshNamenodes")); assertTrue(translator.isMethodSupported("refreshNamenodes"));
} }
@Test @Test
public void testClientNamenodeProtocol() throws IOException { public void testClientNamenodeProtocol() throws IOException {
ClientNamenodeProtocolTranslatorPB translator = ClientProtocol cp =
(ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy( NameNodeProxies.createNonHAProxy(
conf, nnAddress, ClientProtocol.class, conf, nnAddress, ClientProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy(); UserGroupInformation.getCurrentUser(), true).getProxy();
assertTrue(translator.isMethodSupported("mkdirs")); RpcClientUtil.isMethodSupported(cp,
ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
} }
@Test @Test
public void tesJournalProtocol() throws IOException { public void tesJournalProtocol() throws IOException {
JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB) JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB)

View File

@ -116,7 +116,8 @@ public static InterDatanodeProtocol createInterDatanodeProtocolProxy(
public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) { public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b); BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
bpScanner.verifyBlock(b); bpScanner.verifyBlock(new ExtendedBlock(b.getBlockPoolId(),
new BlockPoolSliceScanner.BlockScanInfo(b.getLocalBlock())));
} }
private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn, private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,

View File

@ -50,6 +50,7 @@
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@ -299,7 +300,8 @@ public void testHAUtilClonesDelegationTokens() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
URI haUri = new URI("hdfs://my-ha-uri/"); URI haUri = new URI("hdfs://my-ha-uri/");
token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri)); token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME));
ugi.addToken(token); ugi.addToken(token);
Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>(); Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
@ -355,7 +357,8 @@ public void testHAUtilClonesDelegationTokens() throws Exception {
@Test @Test
public void testDFSGetCanonicalServiceName() throws Exception { public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri = HATestUtil.getLogicalUri(cluster); URI hAUri = HATestUtil.getLogicalUri(cluster);
String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString(); String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, dfs.getCanonicalServiceName()); assertEquals(haService, dfs.getCanonicalServiceName());
final String renewer = UserGroupInformation.getCurrentUser().getShortUserName(); final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
final Token<DelegationTokenIdentifier> token = final Token<DelegationTokenIdentifier> token =
@ -371,7 +374,8 @@ public void testHdfsGetCanonicalServiceName() throws Exception {
Configuration conf = dfs.getConf(); Configuration conf = dfs.getConf();
URI haUri = HATestUtil.getLogicalUri(cluster); URI haUri = HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf); AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString(); String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService, afs.getCanonicalServiceName()); assertEquals(haService, afs.getCanonicalServiceName());
Token<?> token = afs.getDelegationTokens( Token<?> token = afs.getDelegationTokens(
UserGroupInformation.getCurrentUser().getShortUserName()).get(0); UserGroupInformation.getCurrentUser().getShortUserName()).get(0);

View File

@ -21,6 +21,7 @@
import java.io.PrintStream; import java.io.PrintStream;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -46,6 +47,7 @@ public class TestDFSAdminWithHA {
private PrintStream originErr; private PrintStream originErr;
private static final String NSID = "ns1"; private static final String NSID = "ns1";
private static String newLine = System.getProperty("line.separator");
private void assertOutputMatches(String string) { private void assertOutputMatches(String string) {
String errOutput = new String(out.toByteArray(), Charsets.UTF_8); String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
@ -99,6 +101,14 @@ public void tearDown() throws Exception {
System.err.flush(); System.err.flush();
System.setOut(originOut); System.setOut(originOut);
System.setErr(originErr); System.setErr(originErr);
if (admin != null) {
admin.close();
}
if (cluster != null) {
cluster.shutdown();
}
out.reset();
err.reset();
} }
@Test(timeout = 30000) @Test(timeout = 30000)
@ -108,25 +118,25 @@ public void testSetSafeMode() throws Exception {
int exitCode = admin.run(new String[] {"-safemode", "enter"}); int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*"; String message = "Safe mode is ON in.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
// Get safemode // Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"}); exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is ON in.*"; message = "Safe mode is ON in.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
// Leave safemode // Leave safemode
exitCode = admin.run(new String[] {"-safemode", "leave"}); exitCode = admin.run(new String[] {"-safemode", "leave"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*"; message = "Safe mode is OFF in.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
// Get safemode // Get safemode
exitCode = admin.run(new String[] {"-safemode", "get"}); exitCode = admin.run(new String[] {"-safemode", "get"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
message = "Safe mode is OFF in.*"; message = "Safe mode is OFF in.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -136,12 +146,12 @@ public void testSaveNamespace() throws Exception {
int exitCode = admin.run(new String[] {"-safemode", "enter"}); int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*"; String message = "Safe mode is ON in.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-saveNamespace"}); exitCode = admin.run(new String[] {"-saveNamespace"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace successful for.*"; message = "Save namespace successful for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -151,17 +161,17 @@ public void testRestoreFailedStorage() throws Exception {
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage is set to false for.*"; String message = "restoreFailedStorage is set to false for.*";
// Default is false // Default is false
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"}); exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to true for.*"; message = "restoreFailedStorage is set to true for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"}); exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to false for.*"; message = "restoreFailedStorage is set to false for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -170,7 +180,7 @@ public void testRefreshNodes() throws Exception {
int exitCode = admin.run(new String[] {"-refreshNodes"}); int exitCode = admin.run(new String[] {"-refreshNodes"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes successful for.*"; String message = "Refresh nodes successful for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -179,7 +189,7 @@ public void testSetBalancerBandwidth() throws Exception {
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"}); int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10 for.*"; String message = "Balancer bandwidth is set to 10 for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -189,7 +199,7 @@ public void testMetaSave() throws Exception {
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory" String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*"; + " of namenode.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -198,7 +208,7 @@ public void testRefreshServiceAcl() throws Exception {
int exitCode = admin.run(new String[] {"-refreshServiceAcl"}); int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl successful for.*"; String message = "Refresh service acl successful for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -207,7 +217,7 @@ public void testRefreshUserToGroupsMappings() throws Exception {
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"}); int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping successful for.*"; String message = "Refresh user to groups mapping successful for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -217,7 +227,7 @@ public void testRefreshSuperUserGroupsConfiguration() throws Exception {
new String[] {"-refreshSuperUserGroupsConfiguration"}); new String[] {"-refreshSuperUserGroupsConfiguration"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration successful for.*"; String message = "Refresh super user groups configuration successful for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -226,6 +236,6 @@ public void testRefreshCallQueue() throws Exception {
int exitCode = admin.run(new String[] {"-refreshCallQueue"}); int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue successful for.*"; String message = "Refresh call queue successful for.*";
assertOutputMatches(message + "\n" + message + "\n"); assertOutputMatches(message + newLine + message + newLine);
} }
} }

View File

@ -17,6 +17,9 @@ Trunk (Unreleased)
MAPREDUCE-5232. Add a configuration to be able to log classpath and other MAPREDUCE-5232. Add a configuration to be able to log classpath and other
system properties on mapreduce JVMs startup. (Sangjin Lee via vinodkv) system properties on mapreduce JVMs startup. (Sangjin Lee via vinodkv)
MAPREDUCE-5910. Make MR AM resync with RM in case of work-preserving
RM-restart. (Rohith via jianhe)
IMPROVEMENTS IMPROVEMENTS
MAPREDUCE-3481. [Gridmix] Improve Gridmix STRESS mode. (amarrk) MAPREDUCE-3481. [Gridmix] Improve Gridmix STRESS mode. (amarrk)
@ -153,6 +156,9 @@ Release 2.6.0 - UNRELEASED
IMPROVEMENTS IMPROVEMENTS
MAPREDUCE-5971. Move the default options for distcp -p to
DistCpOptionSwitch. (clamb via wang)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -237,6 +243,9 @@ Release 2.5.0 - UNRELEASED
MAPREDUCE-5844. Add a configurable delay to reducer-preemption. MAPREDUCE-5844. Add a configurable delay to reducer-preemption.
(Maysam Yabandeh via kasha) (Maysam Yabandeh via kasha)
MAPREDUCE-5790. Made it easier to enable hprof profile options by default.
(Gera Shegalov via vinodkv)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -304,6 +313,9 @@ Release 2.5.0 - UNRELEASED
resource configuration for deciding uber-mode on map-only jobs. (Siqi Li via resource configuration for deciding uber-mode on map-only jobs. (Siqi Li via
vinodkv) vinodkv)
MAPREDUCE-5952. LocalContainerLauncher#renameMapOutputForReduce incorrectly
assumes a single dir for mapOutIndex. (Gera Shegalov via kasha)
Release 2.4.1 - 2014-06-23 Release 2.4.1 - 2014-06-23
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -133,6 +133,7 @@ case $startStop in
else else
echo no $command to stop echo no $command to stop
fi fi
rm -f $pid
else else
echo no $command to stop echo no $command to stop
fi fi

View File

@ -30,6 +30,7 @@
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSError; import org.apache.hadoop.fs.FSError;
@ -437,43 +438,6 @@ private void runSubtask(org.apache.hadoop.mapred.Task task,
} }
} }
/**
* Within the _local_ filesystem (not HDFS), all activity takes place within
* a single subdir (${local.dir}/usercache/$user/appcache/$appId/$contId/),
* and all sub-MapTasks create the same filename ("file.out"). Rename that
* to something unique (e.g., "map_0.out") to avoid collisions.
*
* Longer-term, we'll modify [something] to use TaskAttemptID-based
* filenames instead of "file.out". (All of this is entirely internal,
* so there are no particular compatibility issues.)
*/
private MapOutputFile renameMapOutputForReduce(JobConf conf,
TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
// move map output to reduce input
Path mapOut = subMapOutputFile.getOutputFile();
FileStatus mStatus = localFs.getFileStatus(mapOut);
Path reduceIn = subMapOutputFile.getInputFileForWrite(
TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
Path mapOutIndex = new Path(mapOut.toString() + ".index");
Path reduceInIndex = new Path(reduceIn.toString() + ".index");
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming map output file for task attempt "
+ mapId.toString() + " from original location " + mapOut.toString()
+ " to destination " + reduceIn.toString());
}
if (!localFs.mkdirs(reduceIn.getParent())) {
throw new IOException("Mkdirs failed to create "
+ reduceIn.getParent().toString());
}
if (!localFs.rename(mapOut, reduceIn))
throw new IOException("Couldn't rename " + mapOut);
if (!localFs.rename(mapOutIndex, reduceInIndex))
throw new IOException("Couldn't rename " + mapOutIndex);
return new RenamedMapOutputFile(reduceIn);
}
/** /**
* Also within the local filesystem, we need to restore the initial state * Also within the local filesystem, we need to restore the initial state
* of the directory as much as possible. Compare current contents against * of the directory as much as possible. Compare current contents against
@ -506,7 +470,46 @@ private void relocalize() {
} }
} // end EventHandler } // end EventHandler
/**
* Within the _local_ filesystem (not HDFS), all activity takes place within
* a subdir inside one of the LOCAL_DIRS
* (${local.dir}/usercache/$user/appcache/$appId/$contId/),
* and all sub-MapTasks create the same filename ("file.out"). Rename that
* to something unique (e.g., "map_0.out") to avoid possible collisions.
*
* Longer-term, we'll modify [something] to use TaskAttemptID-based
* filenames instead of "file.out". (All of this is entirely internal,
* so there are no particular compatibility issues.)
*/
@VisibleForTesting
protected static MapOutputFile renameMapOutputForReduce(JobConf conf,
TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
// move map output to reduce input
Path mapOut = subMapOutputFile.getOutputFile();
FileStatus mStatus = localFs.getFileStatus(mapOut);
Path reduceIn = subMapOutputFile.getInputFileForWrite(
TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
Path mapOutIndex = subMapOutputFile.getOutputIndexFile();
Path reduceInIndex = new Path(reduceIn.toString() + ".index");
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming map output file for task attempt "
+ mapId.toString() + " from original location " + mapOut.toString()
+ " to destination " + reduceIn.toString());
}
if (!localFs.mkdirs(reduceIn.getParent())) {
throw new IOException("Mkdirs failed to create "
+ reduceIn.getParent().toString());
}
if (!localFs.rename(mapOut, reduceIn))
throw new IOException("Couldn't rename " + mapOut);
if (!localFs.rename(mapOutIndex, reduceInIndex))
throw new IOException("Couldn't rename " + mapOutIndex);
return new RenamedMapOutputFile(reduceIn);
}
private static class RenamedMapOutputFile extends MapOutputFile { private static class RenamedMapOutputFile extends MapOutputFile {
private Path path; private Path path;

View File

@ -64,6 +64,7 @@ public class LocalContainerAllocator extends RMCommunicator
private int nmPort; private int nmPort;
private int nmHttpPort; private int nmHttpPort;
private ContainerId containerId; private ContainerId containerId;
protected int lastResponseID;
private final RecordFactory recordFactory = private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null); RecordFactoryProvider.getRecordFactory(null);
@ -119,6 +120,11 @@ protected synchronized void heartbeat() throws Exception {
if (allocateResponse.getAMCommand() != null) { if (allocateResponse.getAMCommand() != null) {
switch(allocateResponse.getAMCommand()) { switch(allocateResponse.getAMCommand()) {
case AM_RESYNC: case AM_RESYNC:
LOG.info("ApplicationMaster is out of sync with ResourceManager,"
+ " hence resyncing.");
this.lastResponseID = 0;
register();
break;
case AM_SHUTDOWN: case AM_SHUTDOWN:
LOG.info("Event from RM: shutting down Application Master"); LOG.info("Event from RM: shutting down Application Master");
// This can happen if the RM has been restarted. If it is in that state, // This can happen if the RM has been restarted. If it is in that state,

View File

@ -52,6 +52,7 @@
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factories.RecordFactory;
@ -216,20 +217,27 @@ protected void doUnregistration()
FinishApplicationMasterRequest request = FinishApplicationMasterRequest request =
FinishApplicationMasterRequest.newInstance(finishState, FinishApplicationMasterRequest.newInstance(finishState,
sb.toString(), historyUrl); sb.toString(), historyUrl);
while (true) { try {
FinishApplicationMasterResponse response = while (true) {
scheduler.finishApplicationMaster(request); FinishApplicationMasterResponse response =
if (response.getIsUnregistered()) { scheduler.finishApplicationMaster(request);
// When excepting ClientService, other services are already stopped, if (response.getIsUnregistered()) {
// it is safe to let clients know the final states. ClientService // When excepting ClientService, other services are already stopped,
// should wait for some time so clients have enough time to know the // it is safe to let clients know the final states. ClientService
// final states. // should wait for some time so clients have enough time to know the
RunningAppContext raContext = (RunningAppContext) context; // final states.
raContext.markSuccessfulUnregistration(); RunningAppContext raContext = (RunningAppContext) context;
break; raContext.markSuccessfulUnregistration();
break;
}
LOG.info("Waiting for application to be successfully unregistered.");
Thread.sleep(rmPollInterval);
} }
LOG.info("Waiting for application to be successfully unregistered."); } catch (ApplicationMasterNotRegisteredException e) {
Thread.sleep(rmPollInterval); // RM might have restarted or failed over and so lost the fact that AM had
// registered before.
register();
doUnregistration();
} }
} }

View File

@ -389,6 +389,7 @@ protected synchronized void handleEvent(ContainerAllocatorEvent event) {
removed = true; removed = true;
assignedRequests.remove(aId); assignedRequests.remove(aId);
containersReleased++; containersReleased++;
pendingRelease.add(containerId);
release(containerId); release(containerId);
} }
} }
@ -641,6 +642,15 @@ private List<Container> getResources() throws Exception {
if (response.getAMCommand() != null) { if (response.getAMCommand() != null) {
switch(response.getAMCommand()) { switch(response.getAMCommand()) {
case AM_RESYNC: case AM_RESYNC:
LOG.info("ApplicationMaster is out of sync with ResourceManager,"
+ " hence resyncing.");
lastResponseID = 0;
// Registering to allow RM to discover an active AM for this
// application
register();
addOutstandingRequestOnResync();
break;
case AM_SHUTDOWN: case AM_SHUTDOWN:
// This can happen if the RM has been restarted. If it is in that state, // This can happen if the RM has been restarted. If it is in that state,
// this application must clean itself up. // this application must clean itself up.
@ -700,6 +710,7 @@ private List<Container> getResources() throws Exception {
LOG.error("Container complete event for unknown container id " LOG.error("Container complete event for unknown container id "
+ cont.getContainerId()); + cont.getContainerId());
} else { } else {
pendingRelease.remove(cont.getContainerId());
assignedRequests.remove(attemptID); assignedRequests.remove(attemptID);
// send the container completed event to Task attempt // send the container completed event to Task attempt
@ -991,6 +1002,7 @@ private void containerAssigned(Container allocated,
private void containerNotAssigned(Container allocated) { private void containerNotAssigned(Container allocated) {
containersReleased++; containersReleased++;
pendingRelease.add(allocated.getId());
release(allocated.getId()); release(allocated.getId());
} }

View File

@ -40,6 +40,7 @@
import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.AMCommand;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
@ -58,7 +59,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class); private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class);
private int lastResponseID; protected int lastResponseID;
private Resource availableResources; private Resource availableResources;
private final RecordFactory recordFactory = private final RecordFactory recordFactory =
@ -77,8 +78,11 @@ public abstract class RMContainerRequestor extends RMCommunicator {
// numContainers dont end up as duplicates // numContainers dont end up as duplicates
private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>( private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(
new org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator()); new org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator());
private final Set<ContainerId> release = new TreeSet<ContainerId>(); private final Set<ContainerId> release = new TreeSet<ContainerId>();
// pendingRelease holds history or release requests.request is removed only if
// RM sends completedContainer.
// How it different from release? --> release is for per allocate() request.
protected Set<ContainerId> pendingRelease = new TreeSet<ContainerId>();
private boolean nodeBlacklistingEnabled; private boolean nodeBlacklistingEnabled;
private int blacklistDisablePercent; private int blacklistDisablePercent;
private AtomicBoolean ignoreBlacklisting = new AtomicBoolean(false); private AtomicBoolean ignoreBlacklisting = new AtomicBoolean(false);
@ -186,6 +190,10 @@ protected AllocateResponse makeRemoteRequest() throws IOException {
} catch (YarnException e) { } catch (YarnException e) {
throw new IOException(e); throw new IOException(e);
} }
if (isResyncCommand(allocateResponse)) {
return allocateResponse;
}
lastResponseID = allocateResponse.getResponseId(); lastResponseID = allocateResponse.getResponseId();
availableResources = allocateResponse.getAvailableResources(); availableResources = allocateResponse.getAvailableResources();
lastClusterNmCount = clusterNmCount; lastClusterNmCount = clusterNmCount;
@ -214,6 +222,28 @@ protected AllocateResponse makeRemoteRequest() throws IOException {
return allocateResponse; return allocateResponse;
} }
protected boolean isResyncCommand(AllocateResponse allocateResponse) {
return allocateResponse.getAMCommand() != null
&& allocateResponse.getAMCommand() == AMCommand.AM_RESYNC;
}
protected void addOutstandingRequestOnResync() {
for (Map<String, Map<Resource, ResourceRequest>> rr : remoteRequestsTable
.values()) {
for (Map<Resource, ResourceRequest> capabalities : rr.values()) {
for (ResourceRequest request : capabalities.values()) {
addResourceRequestToAsk(request);
}
}
}
if (!ignoreBlacklisting.get()) {
blacklistAdditions.addAll(blacklistedNodes);
}
if (!pendingRelease.isEmpty()) {
release.addAll(pendingRelease);
}
}
// May be incorrect if there's multiple NodeManagers running on a single host. // May be incorrect if there's multiple NodeManagers running on a single host.
// knownNodeCount is based on node managers, not hosts. blacklisting is // knownNodeCount is based on node managers, not hosts. blacklisting is
// currently based on hosts. // currently based on hosts.

View File

@ -18,17 +18,26 @@
package org.apache.hadoop.mapred; package org.apache.hadoop.mapred;
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.mockito.Matchers.isA; import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@ -46,6 +55,9 @@
import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.event.EventHandler;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
@ -53,6 +65,36 @@
public class TestLocalContainerLauncher { public class TestLocalContainerLauncher {
private static final Log LOG = private static final Log LOG =
LogFactory.getLog(TestLocalContainerLauncher.class); LogFactory.getLog(TestLocalContainerLauncher.class);
private static File testWorkDir;
private static final String[] localDirs = new String[2];
private static void delete(File dir) throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path p = fs.makeQualified(new Path(dir.getAbsolutePath()));
fs.delete(p, true);
}
@BeforeClass
public static void setupTestDirs() throws IOException {
testWorkDir = new File("target",
TestLocalContainerLauncher.class.getCanonicalName());
testWorkDir.delete();
testWorkDir.mkdirs();
testWorkDir = testWorkDir.getAbsoluteFile();
for (int i = 0; i < localDirs.length; i++) {
final File dir = new File(testWorkDir, "local-" + i);
dir.mkdirs();
localDirs[i] = dir.toString();
}
}
@AfterClass
public static void cleanupTestDirs() throws IOException {
if (testWorkDir != null) {
delete(testWorkDir);
}
}
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
@Test(timeout=10000) @Test(timeout=10000)
@ -141,4 +183,35 @@ private static Container createMockContainer() {
when(container.getNodeId()).thenReturn(nodeId); when(container.getNodeId()).thenReturn(nodeId);
return container; return container;
} }
@Test
public void testRenameMapOutputForReduce() throws Exception {
final JobConf conf = new JobConf();
final MROutputFiles mrOutputFiles = new MROutputFiles();
mrOutputFiles.setConf(conf);
// make sure both dirs are distinct
//
conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString());
final Path mapOut = mrOutputFiles.getOutputFileForWrite(1);
conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString());
final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1);
Assert.assertNotEquals("Paths must be different!",
mapOut.getParent(), mapOutIdx.getParent());
// make both dirs part of LOCAL_DIR
conf.setStrings(MRConfig.LOCAL_DIR, localDirs);
final FileContext lfc = FileContext.getLocalFSFileContext(conf);
lfc.create(mapOut, EnumSet.of(CREATE)).close();
lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();
final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);
LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
}
} }

View File

@ -78,6 +78,7 @@
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.Container;
@ -87,6 +88,7 @@
import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.Event;
@ -95,9 +97,13 @@
import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@ -618,6 +624,10 @@ public MyResourceManager(Configuration conf) {
super(conf); super(conf);
} }
public MyResourceManager(Configuration conf, RMStateStore store) {
super(conf, store);
}
@Override @Override
public void serviceStart() throws Exception { public void serviceStart() throws Exception {
super.serviceStart(); super.serviceStart();
@ -1426,6 +1436,13 @@ private static void assertBlacklistAdditionsAndRemovals(
rm.getMyFifoScheduler().lastBlacklistRemovals.size()); rm.getMyFifoScheduler().lastBlacklistRemovals.size());
} }
private static void assertAsksAndReleases(int expectedAsk,
int expectedRelease, MyResourceManager rm) {
Assert.assertEquals(expectedAsk, rm.getMyFifoScheduler().lastAsk.size());
Assert.assertEquals(expectedRelease,
rm.getMyFifoScheduler().lastRelease.size());
}
private static class MyFifoScheduler extends FifoScheduler { private static class MyFifoScheduler extends FifoScheduler {
public MyFifoScheduler(RMContext rmContext) { public MyFifoScheduler(RMContext rmContext) {
@ -1440,6 +1457,7 @@ public MyFifoScheduler(RMContext rmContext) {
} }
List<ResourceRequest> lastAsk = null; List<ResourceRequest> lastAsk = null;
List<ContainerId> lastRelease = null;
List<String> lastBlacklistAdditions; List<String> lastBlacklistAdditions;
List<String> lastBlacklistRemovals; List<String> lastBlacklistRemovals;
@ -1458,6 +1476,7 @@ public synchronized Allocation allocate(
askCopy.add(reqCopy); askCopy.add(reqCopy);
} }
lastAsk = ask; lastAsk = ask;
lastRelease = release;
lastBlacklistAdditions = blacklistAdditions; lastBlacklistAdditions = blacklistAdditions;
lastBlacklistRemovals = blacklistRemovals; lastBlacklistRemovals = blacklistRemovals;
return super.allocate( return super.allocate(
@ -1505,6 +1524,20 @@ private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
return new ContainerFailedEvent(attemptId, host); return new ContainerFailedEvent(attemptId, host);
} }
private ContainerAllocatorEvent createDeallocateEvent(JobId jobId,
int taskAttemptId, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId =
MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId);
return new ContainerAllocatorEvent(attemptId,
ContainerAllocator.EventType.CONTAINER_DEALLOCATE);
}
private void checkAssignments(ContainerRequestEvent[] requests, private void checkAssignments(ContainerRequestEvent[] requests,
List<TaskAttemptContainerAssignedEvent> assignments, List<TaskAttemptContainerAssignedEvent> assignments,
boolean checkHostMatch) { boolean checkHostMatch) {
@ -1557,6 +1590,7 @@ private static class MyContainerAllocator extends RMContainerAllocator {
= new ArrayList<JobUpdatedNodesEvent>(); = new ArrayList<JobUpdatedNodesEvent>();
private MyResourceManager rm; private MyResourceManager rm;
private boolean isUnregistered = false; private boolean isUnregistered = false;
private AllocateResponse allocateResponse;
private static AppContext createAppContext( private static AppContext createAppContext(
ApplicationAttemptId appAttemptId, Job job) { ApplicationAttemptId appAttemptId, Job job) {
AppContext context = mock(AppContext.class); AppContext context = mock(AppContext.class);
@ -1668,6 +1702,10 @@ public void sendFailure(ContainerFailedEvent f) {
super.handleEvent(f); super.handleEvent(f);
} }
public void sendDeallocate(ContainerAllocatorEvent f) {
super.handleEvent(f);
}
// API to be used by tests // API to be used by tests
public List<TaskAttemptContainerAssignedEvent> schedule() public List<TaskAttemptContainerAssignedEvent> schedule()
throws Exception { throws Exception {
@ -1713,6 +1751,20 @@ protected boolean isApplicationMasterRegistered() {
public boolean isUnregistered() { public boolean isUnregistered() {
return isUnregistered; return isUnregistered;
} }
public void updateSchedulerProxy(MyResourceManager rm) {
scheduler = rm.getApplicationMasterService();
}
@Override
protected AllocateResponse makeRemoteRequest() throws IOException {
allocateResponse = super.makeRemoteRequest();
return allocateResponse;
}
public boolean isResyncCommand() {
return super.isResyncCommand(allocateResponse);
}
} }
@Test @Test
@ -2022,6 +2074,198 @@ protected ContainerAllocator createContainerAllocator(
Assert.assertTrue(allocator.isUnregistered()); Assert.assertTrue(allocator.isUnregistered());
} }
// Step-1 : AM send allocate request for 2 ContainerRequests and 1
// blackListeNode
// Step-2 : 2 containers are allocated by RM.
// Step-3 : AM Send 1 containerRequest(event3) and 1 releaseRequests to
// RM
// Step-4 : On RM restart, AM(does not know RM is restarted) sends
// additional containerRequest(event4) and blacklisted nodes.
// Intern RM send resync command
// Step-5 : On Resync,AM sends all outstanding
// asks,release,blacklistAaddition
// and another containerRequest(event5)
// Step-6 : RM allocates containers i.e event3,event4 and cRequest5
@Test
public void testRMContainerAllocatorResendsRequestsOnRMRestart()
throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
conf.setInt(
MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager rm1 = new MyResourceManager(conf, memStore);
rm1.start();
DrainDispatcher dispatcher =
(DrainDispatcher) rm1.getRMContext().getDispatcher();
// Submit the application
RMApp app = rm1.submitApp(1024);
dispatcher.await();
MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
ApplicationAttemptId appAttemptId =
app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator =
new MyContainerAllocator(rm1, conf, appAttemptId, mockJob);
// Step-1 : AM send allocate request for 2 ContainerRequests and 1
// blackListeNode
// create the container request
// send MAP request
ContainerRequestEvent event1 =
createReq(jobId, 1, 1024, new String[] { "h1" });
allocator.sendRequest(event1);
ContainerRequestEvent event2 =
createReq(jobId, 2, 2048, new String[] { "h1", "h2" });
allocator.sendRequest(event2);
// Send events to blacklist h2
ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h2", false);
allocator.sendFailure(f1);
// send allocate request and 1 blacklisted nodes
List<TaskAttemptContainerAssignedEvent> assignedContainers =
allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0,
assignedContainers.size());
// Why ask is 3, not 4? --> ask from blacklisted node h2 is removed
assertAsksAndReleases(3, 0, rm1);
assertBlacklistAdditionsAndRemovals(1, 0, rm1);
nm1.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
// Step-2 : 2 containers are allocated by RM.
assignedContainers = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 2", 2,
assignedContainers.size());
assertAsksAndReleases(0, 0, rm1);
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
assignedContainers = allocator.schedule();
Assert.assertEquals("No of assignments must be 0", 0,
assignedContainers.size());
assertAsksAndReleases(3, 0, rm1);
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
// Step-3 : AM Send 1 containerRequest(event3) and 1 releaseRequests to
// RM
// send container request
ContainerRequestEvent event3 =
createReq(jobId, 3, 1000, new String[] { "h1" });
allocator.sendRequest(event3);
// send deallocate request
ContainerAllocatorEvent deallocate1 =
createDeallocateEvent(jobId, 1, false);
allocator.sendDeallocate(deallocate1);
assignedContainers = allocator.schedule();
Assert.assertEquals("No of assignments must be 0", 0,
assignedContainers.size());
assertAsksAndReleases(3, 1, rm1);
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
// Phase-2 start 2nd RM is up
MyResourceManager rm2 = new MyResourceManager(conf, memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
allocator.updateSchedulerProxy(rm2);
dispatcher = (DrainDispatcher) rm2.getRMContext().getDispatcher();
// NM should be rebooted on heartbeat, even first heartbeat for nm2
NodeHeartbeatResponse hbResponse = nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC, hbResponse.getNodeAction());
// new NM to represent NM re-register
nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
// Step-4 : On RM restart, AM(does not know RM is restarted) sends
// additional containerRequest(event4) and blacklisted nodes.
// Intern RM send resync command
// send deallocate request, release=1
ContainerAllocatorEvent deallocate2 =
createDeallocateEvent(jobId, 2, false);
allocator.sendDeallocate(deallocate2);
// Send events to blacklist nodes h3
ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h3", false);
allocator.sendFailure(f2);
ContainerRequestEvent event4 =
createReq(jobId, 4, 2000, new String[] { "h1", "h2" });
allocator.sendRequest(event4);
// send allocate request to 2nd RM and get resync command
allocator.schedule();
dispatcher.await();
Assert.assertTrue("Last allocate response is not RESYNC",
allocator.isResyncCommand());
// Step-5 : On Resync,AM sends all outstanding
// asks,release,blacklistAaddition
// and another containerRequest(event5)
ContainerRequestEvent event5 =
createReq(jobId, 5, 3000, new String[] { "h1", "h2", "h3" });
allocator.sendRequest(event5);
// send all outstanding request again.
assignedContainers = allocator.schedule();
dispatcher.await();
assertAsksAndReleases(3, 2, rm2);
assertBlacklistAdditionsAndRemovals(2, 0, rm2);
nm1.nodeHeartbeat(true);
dispatcher.await();
// Step-6 : RM allocates containers i.e event3,event4 and cRequest5
assignedContainers = allocator.schedule();
dispatcher.await();
Assert.assertEquals("Number of container should be 3", 3,
assignedContainers.size());
for (TaskAttemptContainerAssignedEvent assig : assignedContainers) {
Assert.assertTrue("Assigned count not correct",
"h1".equals(assig.getContainer().getNodeId().getHost()));
}
rm1.stop();
rm2.stop();
}
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
TestRMContainerAllocator t = new TestRMContainerAllocator(); TestRMContainerAllocator t = new TestRMContainerAllocator();
t.testSimple(); t.testSimple();

View File

@ -671,7 +671,7 @@
<property> <property>
<name>mapreduce.task.profile.params</name> <name>mapreduce.task.profile.params</name>
<value></value> <value>-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s</value>
<description>JVM profiler parameters used to profile map and reduce task <description>JVM profiler parameters used to profile map and reduce task
attempts. This string may contain a single format specifier %s that will attempts. This string may contain a single format specifier %s that will
be replaced by the path to profile.out in the task attempt log directory. be replaced by the path to profile.out in the task attempt log directory.

View File

@ -29,11 +29,7 @@ public class TestJobConf {
@Test @Test
public void testProfileParamsDefaults() { public void testProfileParamsDefaults() {
JobConf configuration = new JobConf(); JobConf configuration = new JobConf();
Assert.assertNull(configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
String result = configuration.getProfileParams(); String result = configuration.getProfileParams();
Assert.assertNotNull(result); Assert.assertNotNull(result);
Assert.assertTrue(result.contains("file=%s")); Assert.assertTrue(result.contains("file=%s"));
Assert.assertTrue(result.startsWith("-agentlib:hprof")); Assert.assertTrue(result.startsWith("-agentlib:hprof"));

View File

@ -24,6 +24,7 @@
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -39,8 +40,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.junit.After; import org.junit.BeforeClass;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
public class TestMRJobsWithProfiler { public class TestMRJobsWithProfiler {
@ -51,6 +51,8 @@ public class TestMRJobsWithProfiler {
private static final EnumSet<RMAppState> TERMINAL_RM_APP_STATES = private static final EnumSet<RMAppState> TERMINAL_RM_APP_STATES =
EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED); EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED);
private static final int PROFILED_TASK_ID = 1;
private static MiniMRYarnCluster mrCluster; private static MiniMRYarnCluster mrCluster;
private static final Configuration CONF = new Configuration(); private static final Configuration CONF = new Configuration();
@ -69,8 +71,8 @@ public class TestMRJobsWithProfiler {
private static final Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar"); private static final Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
@Before @BeforeClass
public void setup() throws InterruptedException, IOException { public static void setup() throws InterruptedException, IOException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
@ -79,7 +81,7 @@ public void setup() throws InterruptedException, IOException {
} }
if (mrCluster == null) { if (mrCluster == null) {
mrCluster = new MiniMRYarnCluster(getClass().getName()); mrCluster = new MiniMRYarnCluster(TestMRJobsWithProfiler.class.getName());
mrCluster.init(CONF); mrCluster.init(CONF);
mrCluster.start(); mrCluster.start();
} }
@ -90,8 +92,8 @@ public void setup() throws InterruptedException, IOException {
localFs.setPermission(APP_JAR, new FsPermission("700")); localFs.setPermission(APP_JAR, new FsPermission("700"));
} }
@After @AfterClass
public void tearDown() { public static void tearDown() {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test."); + " not found. Not running test.");
@ -103,10 +105,19 @@ public void tearDown() {
} }
} }
@Test (timeout = 150000)
public void testDefaultProfiler() throws Exception {
LOG.info("Starting testDefaultProfiler");
testProfilerInternal(true);
}
@Test (timeout = 150000) @Test (timeout = 150000)
public void testProfiler() throws IOException, InterruptedException, public void testDifferentProfilers() throws Exception {
ClassNotFoundException { LOG.info("Starting testDefaultProfiler");
testProfilerInternal(false);
}
private void testProfilerInternal(boolean useDefault) throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
+ " not found. Not running test."); + " not found. Not running test.");
@ -117,18 +128,19 @@ public void testProfiler() throws IOException, InterruptedException,
final JobConf sleepConf = new JobConf(mrCluster.getConfig()); final JobConf sleepConf = new JobConf(mrCluster.getConfig());
sleepConf.setProfileEnabled(true); sleepConf.setProfileEnabled(true);
// profile map split 1 sleepConf.setProfileTaskRange(true, String.valueOf(PROFILED_TASK_ID));
sleepConf.setProfileTaskRange(true, "1"); sleepConf.setProfileTaskRange(false, String.valueOf(PROFILED_TASK_ID));
// profile reduce of map output partitions 1
sleepConf.setProfileTaskRange(false, "1");
// use hprof for map to profile.out if (!useDefault) {
sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS, // use hprof for map to profile.out
"-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n," sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS,
+ "file=%s"); "-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n,"
+ "file=%s");
// use Xprof for reduce to stdout
sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof");
}
// use Xprof for reduce to stdout
sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof");
sleepJob.setConf(sleepConf); sleepJob.setConf(sleepConf);
// 2-map-2-reduce SleepJob // 2-map-2-reduce SleepJob
@ -205,8 +217,8 @@ public void testProfiler() throws IOException, InterruptedException,
TaskLog.LogName.PROFILE.toString()); TaskLog.LogName.PROFILE.toString());
final Path stdoutPath = new Path(dirEntry.getValue(), final Path stdoutPath = new Path(dirEntry.getValue(),
TaskLog.LogName.STDOUT.toString()); TaskLog.LogName.STDOUT.toString());
if (tid.getTaskType() == TaskType.MAP) { if (useDefault || tid.getTaskType() == TaskType.MAP) {
if (tid.getTaskID().getId() == 1) { if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
// verify profile.out // verify profile.out
final BufferedReader br = new BufferedReader(new InputStreamReader( final BufferedReader br = new BufferedReader(new InputStreamReader(
localFs.open(profilePath))); localFs.open(profilePath)));
@ -222,7 +234,8 @@ public void testProfiler() throws IOException, InterruptedException,
} else { } else {
Assert.assertFalse("hprof file should not exist", Assert.assertFalse("hprof file should not exist",
localFs.exists(profilePath)); localFs.exists(profilePath));
if (tid.getTaskID().getId() == 1) { if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
// reducer is profiled with Xprof
final BufferedReader br = new BufferedReader(new InputStreamReader( final BufferedReader br = new BufferedReader(new InputStreamReader(
localFs.open(stdoutPath))); localFs.open(stdoutPath)));
boolean flatProfFound = false; boolean flatProfFound = false;

View File

@ -373,6 +373,8 @@ private void restoreKey() throws IOException {
private Path workingDir; private Path workingDir;
private long blockSize = MAX_AZURE_BLOCK_SIZE; private long blockSize = MAX_AZURE_BLOCK_SIZE;
private AzureFileSystemInstrumentation instrumentation; private AzureFileSystemInstrumentation instrumentation;
private String metricsSourceName;
private boolean isClosed = false;
private static boolean suppressRetryPolicy = false; private static boolean suppressRetryPolicy = false;
// A counter to create unique (within-process) names for my metrics sources. // A counter to create unique (within-process) names for my metrics sources.
private static AtomicInteger metricsSourceNameCounter = new AtomicInteger(); private static AtomicInteger metricsSourceNameCounter = new AtomicInteger();
@ -482,11 +484,10 @@ public void initialize(URI uri, Configuration conf) throws IOException {
// Make sure the metrics system is available before interacting with Azure // Make sure the metrics system is available before interacting with Azure
AzureFileSystemMetricsSystem.fileSystemStarted(); AzureFileSystemMetricsSystem.fileSystemStarted();
String sourceName = newMetricsSourceName(), metricsSourceName = newMetricsSourceName();
sourceDesc = "Azure Storage Volume File System metrics"; String sourceDesc = "Azure Storage Volume File System metrics";
instrumentation = DefaultMetricsSystem.instance().register(sourceName, instrumentation = new AzureFileSystemInstrumentation(conf);
sourceDesc, new AzureFileSystemInstrumentation(conf)); AzureFileSystemMetricsSystem.registerSource(metricsSourceName, sourceDesc,
AzureFileSystemMetricsSystem.registerSource(sourceName, sourceDesc,
instrumentation); instrumentation);
store.initialize(uri, conf, instrumentation); store.initialize(uri, conf, instrumentation);
@ -502,7 +503,6 @@ public void initialize(URI uri, Configuration conf) throws IOException {
LOG.debug(" blockSize = " LOG.debug(" blockSize = "
+ conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME, MAX_AZURE_BLOCK_SIZE)); + conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME, MAX_AZURE_BLOCK_SIZE));
} }
} }
private NativeFileSystemStore createDefaultStore(Configuration conf) { private NativeFileSystemStore createDefaultStore(Configuration conf) {
@ -1337,7 +1337,11 @@ public void setOwner(Path p, String username, String groupname)
} }
@Override @Override
public void close() throws IOException { public synchronized void close() throws IOException {
if (isClosed) {
return;
}
// Call the base close() to close any resources there. // Call the base close() to close any resources there.
super.close(); super.close();
// Close the store // Close the store
@ -1349,12 +1353,14 @@ public void close() throws IOException {
long startTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis();
AzureFileSystemMetricsSystem.unregisterSource(metricsSourceName);
AzureFileSystemMetricsSystem.fileSystemClosed(); AzureFileSystemMetricsSystem.fileSystemClosed();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Submitting metrics when file system closed took " LOG.debug("Submitting metrics when file system closed took "
+ (System.currentTimeMillis() - startTime) + " ms."); + (System.currentTimeMillis() - startTime) + " ms.");
} }
isClosed = true;
} }
/** /**
@ -1498,6 +1504,13 @@ public void deleteFilesWithDanglingTempData(Path root) throws IOException {
handleFilesWithDanglingTempData(root, new DanglingFileDeleter()); handleFilesWithDanglingTempData(root, new DanglingFileDeleter());
} }
@Override
protected void finalize() throws Throwable {
LOG.debug("finalize() called.");
close();
super.finalize();
}
/** /**
* Encode the key with a random prefix for load balancing in Azure storage. * Encode the key with a random prefix for load balancing in Azure storage.
* Upload data to a random temporary file then do storage side renaming to * Upload data to a random temporary file then do storage side renaming to

View File

@ -44,21 +44,26 @@ public static synchronized void fileSystemStarted() {
} }
public static synchronized void fileSystemClosed() { public static synchronized void fileSystemClosed() {
if (instance != null) {
instance.publishMetricsNow();
}
if (numFileSystems == 1) { if (numFileSystems == 1) {
instance.publishMetricsNow();
instance.stop(); instance.stop();
instance.shutdown(); instance.shutdown();
instance = null; instance = null;
} }
numFileSystems--; numFileSystems--;
} }
public static void registerSource(String name, String desc, public static void registerSource(String name, String desc,
MetricsSource source) { MetricsSource source) {
// Register the source with the name appended with -WasbSystem //caller has to use unique name to register source
// so that the name is globally unique. instance.register(name, desc, source);
instance.register(name + "-WasbSystem", desc, source); }
public static synchronized void unregisterSource(String name) {
if (instance != null) {
//publish metrics before unregister a metrics source
instance.publishMetricsNow();
instance.unregisterSource(name);
}
} }
} }

View File

@ -324,9 +324,7 @@ public static AzureBlobStorageTestAccount createOutOfBandStore(
String sourceName = NativeAzureFileSystem.newMetricsSourceName(); String sourceName = NativeAzureFileSystem.newMetricsSourceName();
String sourceDesc = "Azure Storage Volume File System metrics"; String sourceDesc = "Azure Storage Volume File System metrics";
AzureFileSystemInstrumentation instrumentation = AzureFileSystemInstrumentation instrumentation = new AzureFileSystemInstrumentation(conf);
DefaultMetricsSystem.instance().register(sourceName,
sourceDesc, new AzureFileSystemInstrumentation(conf));
AzureFileSystemMetricsSystem.registerSource( AzureFileSystemMetricsSystem.registerSource(
sourceName, sourceDesc, instrumentation); sourceName, sourceDesc, instrumentation);

View File

@ -516,6 +516,13 @@ public void testListSlash() throws Exception {
assertNotNull(status); assertNotNull(status);
} }
@Test
public void testCloseFileSystemTwice() throws Exception {
//make sure close() can be called multiple times without doing any harm
fs.close();
fs.close();
}
private boolean testModifiedTime(Path testPath, long time) throws Exception { private boolean testModifiedTime(Path testPath, long time) throws Exception {
FileStatus fileStatus = fs.getFileStatus(testPath); FileStatus fileStatus = fs.getFileStatus(testPath);
final long errorMargin = modifiedTimeErrorMargin; final long errorMargin = modifiedTimeErrorMargin;

View File

@ -162,6 +162,7 @@ public enum DistCpOptionSwitch {
BANDWIDTH(DistCpConstants.CONF_LABEL_BANDWIDTH_MB, BANDWIDTH(DistCpConstants.CONF_LABEL_BANDWIDTH_MB,
new Option("bandwidth", true, "Specify bandwidth per map in MB")); new Option("bandwidth", true, "Specify bandwidth per map in MB"));
static final String PRESERVE_STATUS_DEFAULT = "-prbugpc";
private final String confLabel; private final String confLabel;
private final Option option; private final Option option;

View File

@ -50,7 +50,7 @@ private static class CustomParser extends GnuParser {
protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) { protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) {
for (int index = 0; index < arguments.length; index++) { for (int index = 0; index < arguments.length; index++) {
if (arguments[index].equals("-" + DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { if (arguments[index].equals("-" + DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
arguments[index] = "-prbugpc"; arguments[index] = DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT;
} }
} }
return super.flatten(options, arguments, stopAtNonOption); return super.flatten(options, arguments, stopAtNonOption);

View File

@ -43,6 +43,12 @@ Release 2.6.0 - UNRELEASED
YARN-2274. FairScheduler: Add debug information about cluster capacity, YARN-2274. FairScheduler: Add debug information about cluster capacity,
availability and reservations. (kasha) availability and reservations. (kasha)
YARN-2228. Augmented TimelineServer to load pseudo authentication filter when
authentication = simple. (Zhijie Shen via vinodkv)
YARN-1341. Recover NMTokens upon nodemanager restart. (Jason Lowe via
junping_du)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -53,6 +59,16 @@ Release 2.6.0 - UNRELEASED
YARN-2088. Fixed a bug in GetApplicationsRequestPBImpl#mergeLocalToBuilder. YARN-2088. Fixed a bug in GetApplicationsRequestPBImpl#mergeLocalToBuilder.
(Binglin Chang via jianhe) (Binglin Chang via jianhe)
YARN-2260. Fixed ResourceManager's RMNode to correctly remember containers
when nodes resync during work-preserving RM restart. (Jian He via vinodkv)
YARN-2264. Fixed a race condition in DrainDispatcher which may cause random
test failures. (Li Lu via jianhe)
YARN-2219. Changed ResourceManager to avoid AMs and NMs getting exceptions
after RM recovery but before scheduler learns about apps and app-attempts.
(Jian He via vinodkv)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -89,6 +105,9 @@ Release 2.5.0 - UNRELEASED
YARN-1713. Added get-new-app and submit-app functionality to RM web services. YARN-1713. Added get-new-app and submit-app functionality to RM web services.
(Varun Vasudev via vinodkv) (Varun Vasudev via vinodkv)
YARN-2233. Implemented ResourceManager web-services to create, renew and
cancel delegation tokens. (Varun Vasudev via vinodkv)
IMPROVEMENTS IMPROVEMENTS
YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via
@ -253,6 +272,9 @@ Release 2.5.0 - UNRELEASED
YARN-2241. ZKRMStateStore: On startup, show nicer messages if znodes already YARN-2241. ZKRMStateStore: On startup, show nicer messages if znodes already
exist. (Robert Kanter via kasha) exist. (Robert Kanter via kasha)
YARN-1408 Preemption caused Invalid State Event: ACQUIRED at KILLED and
caused a task timeout for 30mins. (Sunil G via mayank)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -145,6 +145,7 @@ case $startStop in
else else
echo no $command to stop echo no $command to stop
fi fi
rm -f $pid
else else
echo no $command to stop echo no $command to stop
fi fi

View File

@ -72,6 +72,7 @@ public class TimelineClientImpl extends TimelineClient {
private static final Log LOG = LogFactory.getLog(TimelineClientImpl.class); private static final Log LOG = LogFactory.getLog(TimelineClientImpl.class);
private static final String RESOURCE_URI_STR = "/ws/v1/timeline/"; private static final String RESOURCE_URI_STR = "/ws/v1/timeline/";
private static final String URL_PARAM_USER_NAME = "user.name";
private static final Joiner JOINER = Joiner.on(""); private static final Joiner JOINER = Joiner.on("");
private static Options opts; private static Options opts;
static { static {
@ -84,17 +85,18 @@ public class TimelineClientImpl extends TimelineClient {
private Client client; private Client client;
private URI resURI; private URI resURI;
private boolean isEnabled; private boolean isEnabled;
private TimelineAuthenticatedURLConnectionFactory urlFactory; private KerberosAuthenticatedURLConnectionFactory urlFactory;
public TimelineClientImpl() { public TimelineClientImpl() {
super(TimelineClientImpl.class.getName()); super(TimelineClientImpl.class.getName());
ClientConfig cc = new DefaultClientConfig(); ClientConfig cc = new DefaultClientConfig();
cc.getClasses().add(YarnJacksonJaxbJsonProvider.class); cc.getClasses().add(YarnJacksonJaxbJsonProvider.class);
if (UserGroupInformation.isSecurityEnabled()) { if (UserGroupInformation.isSecurityEnabled()) {
urlFactory = new TimelineAuthenticatedURLConnectionFactory(); urlFactory = new KerberosAuthenticatedURLConnectionFactory();
client = new Client(new URLConnectionClientHandler(urlFactory), cc); client = new Client(new URLConnectionClientHandler(urlFactory), cc);
} else { } else {
client = Client.create(cc); client = new Client(new URLConnectionClientHandler(
new PseudoAuthenticatedURLConnectionFactory()), cc);
} }
} }
@ -177,7 +179,23 @@ public ClientResponse doPostingEntities(TimelineEntities entities) {
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
} }
private static class TimelineAuthenticatedURLConnectionFactory private static class PseudoAuthenticatedURLConnectionFactory
implements HttpURLConnectionFactory {
@Override
public HttpURLConnection getHttpURLConnection(URL url) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(URL_PARAM_USER_NAME,
UserGroupInformation.getCurrentUser().getShortUserName());
url = TimelineAuthenticator.appendParams(url, params);
if (LOG.isDebugEnabled()) {
LOG.debug("URL with delegation token: " + url);
}
return (HttpURLConnection) url.openConnection();
}
}
private static class KerberosAuthenticatedURLConnectionFactory
implements HttpURLConnectionFactory { implements HttpURLConnectionFactory {
private AuthenticatedURL.Token token; private AuthenticatedURL.Token token;
@ -185,7 +203,7 @@ private static class TimelineAuthenticatedURLConnectionFactory
private Token<TimelineDelegationTokenIdentifier> dToken; private Token<TimelineDelegationTokenIdentifier> dToken;
private Text service; private Text service;
public TimelineAuthenticatedURLConnectionFactory() { public KerberosAuthenticatedURLConnectionFactory() {
token = new AuthenticatedURL.Token(); token = new AuthenticatedURL.Token();
authenticator = new TimelineAuthenticator(); authenticator = new TimelineAuthenticator();
} }

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.webapp;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response.Status;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public class ForbiddenException extends WebApplicationException {
private static final long serialVersionUID = 1L;
public ForbiddenException() {
super(Status.FORBIDDEN);
}
public ForbiddenException(java.lang.Throwable cause) {
super(cause, Status.FORBIDDEN);
}
public ForbiddenException(String msg) {
super(new Exception(msg), Status.FORBIDDEN);
}
}

View File

@ -81,6 +81,8 @@ public Response toResponse(Exception e) {
s = Response.Status.NOT_FOUND; s = Response.Status.NOT_FOUND;
} else if (e instanceof IOException) { } else if (e instanceof IOException) {
s = Response.Status.NOT_FOUND; s = Response.Status.NOT_FOUND;
} else if (e instanceof ForbiddenException) {
s = Response.Status.FORBIDDEN;
} else if (e instanceof UnsupportedOperationException) { } else if (e instanceof UnsupportedOperationException) {
s = Response.Status.BAD_REQUEST; s = Response.Status.BAD_REQUEST;
} else if (e instanceof IllegalArgumentException) { } else if (e instanceof IllegalArgumentException) {

View File

@ -1217,6 +1217,24 @@
<value>10</value> <value>10</value>
</property> </property>
<property>
<name>yarn.timeline-service.http-authentication.type</name>
<value>simple</value>
<description>
Defines authentication used for the timeline server HTTP endpoint.
Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
</description>
</property>
<property>
<name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
<value>true</value>
<description>
Indicates if anonymous requests are allowed by the timeline server when using
'simple' authentication.
</description>
</property>
<property> <property>
<description>The Kerberos principal for the timeline server.</description> <description>The Kerberos principal for the timeline server.</description>
<name>yarn.timeline-service.principal</name> <name>yarn.timeline-service.principal</name>

View File

@ -28,6 +28,7 @@ public class DrainDispatcher extends AsyncDispatcher {
// and similar grotesqueries // and similar grotesqueries
private volatile boolean drained = false; private volatile boolean drained = false;
private final BlockingQueue<Event> queue; private final BlockingQueue<Event> queue;
final Object mutex;
public DrainDispatcher() { public DrainDispatcher() {
this(new LinkedBlockingQueue<Event>()); this(new LinkedBlockingQueue<Event>());
@ -36,6 +37,7 @@ public DrainDispatcher() {
private DrainDispatcher(BlockingQueue<Event> eventQueue) { private DrainDispatcher(BlockingQueue<Event> eventQueue) {
super(eventQueue); super(eventQueue);
this.queue = eventQueue; this.queue = eventQueue;
this.mutex = this;
} }
/** /**
@ -53,8 +55,10 @@ Runnable createThread() {
@Override @Override
public void run() { public void run() {
while (!Thread.currentThread().isInterrupted()) { while (!Thread.currentThread().isInterrupted()) {
// !drained if dispatch queued new events on this dispatcher synchronized (mutex) {
drained = queue.isEmpty(); // !drained if dispatch queued new events on this dispatcher
drained = queue.isEmpty();
}
Event event; Event event;
try { try {
event = queue.take(); event = queue.take();
@ -75,8 +79,10 @@ public EventHandler getEventHandler() {
return new EventHandler() { return new EventHandler() {
@Override @Override
public void handle(Event event) { public void handle(Event event) {
drained = false; synchronized (mutex) {
actual.handle(event); actual.handle(event);
drained = false;
}
} }
}; };
} }

View File

@ -28,7 +28,6 @@
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.Service; import org.apache.hadoop.service.Service;
import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil;
@ -178,23 +177,20 @@ protected TimelineACLsManager createTimelineACLsManager(Configuration conf) {
protected void startWebApp() { protected void startWebApp() {
Configuration conf = getConfig(); Configuration conf = getConfig();
// Play trick to make the customized filter will only be loaded by the // Always load pseudo authentication filter to parse "user.name" in an URL
// timeline server when security is enabled and Kerberos authentication // to identify a HTTP request's user in insecure mode.
// is used. // When Kerberos authentication type is set (i.e., secure mode is turned on),
if (UserGroupInformation.isSecurityEnabled() // the customized filter will be loaded by the timeline server to do Kerberos
&& conf // + DT authentication.
.get(TimelineAuthenticationFilterInitializer.PREFIX + "type", "") String initializers = conf.get("hadoop.http.filter.initializers");
.equals("kerberos")) { initializers =
String initializers = conf.get("hadoop.http.filter.initializers"); initializers == null || initializers.length() == 0 ? "" : ","
initializers = + initializers;
initializers == null || initializers.length() == 0 ? "" : "," if (!initializers.contains(
+ initializers; TimelineAuthenticationFilterInitializer.class.getName())) {
if (!initializers.contains( conf.set("hadoop.http.filter.initializers",
TimelineAuthenticationFilterInitializer.class.getName())) { TimelineAuthenticationFilterInitializer.class.getName()
conf.set("hadoop.http.filter.initializers", + initializers);
TimelineAuthenticationFilterInitializer.class.getName()
+ initializers);
}
} }
String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(conf); String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(conf);
LOG.info("Instantiating AHSWebApp at " + bindAddress); LOG.info("Instantiating AHSWebApp at " + bindAddress);

View File

@ -51,7 +51,8 @@ public TimelineACLsManager(Configuration conf) {
public boolean checkAccess(UserGroupInformation callerUGI, public boolean checkAccess(UserGroupInformation callerUGI,
TimelineEntity entity) throws YarnException, IOException { TimelineEntity entity) throws YarnException, IOException {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Verifying the access of " + callerUGI.getShortUserName() LOG.debug("Verifying the access of "
+ (callerUGI == null ? null : callerUGI.getShortUserName())
+ " on the timeline entity " + " on the timeline entity "
+ new EntityIdentifier(entity.getEntityId(), entity.getEntityType())); + new EntityIdentifier(entity.getEntityId(), entity.getEntityType()));
} }

View File

@ -38,7 +38,8 @@ protected Properties getConfiguration(String configPrefix,
// to replace the name here to use the customized Kerberos + DT service // to replace the name here to use the customized Kerberos + DT service
// instead of the standard Kerberos handler. // instead of the standard Kerberos handler.
Properties properties = super.getConfiguration(configPrefix, filterConfig); Properties properties = super.getConfiguration(configPrefix, filterConfig);
if (properties.getProperty(AUTH_TYPE).equals("kerberos")) { String authType = properties.getProperty(AUTH_TYPE);
if (authType != null && authType.equals("kerberos")) {
properties.setProperty( properties.setProperty(
AUTH_TYPE, TimelineClientAuthenticationService.class.getName()); AUTH_TYPE, TimelineClientAuthenticationService.class.getName());
} }

View File

@ -47,9 +47,9 @@
public class TimelineAuthenticationFilterInitializer extends FilterInitializer { public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
/** /**
* The configuration prefix of timeline Kerberos + DT authentication * The configuration prefix of timeline HTTP authentication
*/ */
public static final String PREFIX = "yarn.timeline-service.http.authentication."; public static final String PREFIX = "yarn.timeline-service.http-authentication.";
private static final String SIGNATURE_SECRET_FILE = private static final String SIGNATURE_SECRET_FILE =
TimelineAuthenticationFilter.SIGNATURE_SECRET + ".file"; TimelineAuthenticationFilter.SIGNATURE_SECRET + ".file";

View File

@ -62,11 +62,12 @@
import org.apache.hadoop.yarn.server.timeline.EntityIdentifier; import org.apache.hadoop.yarn.server.timeline.EntityIdentifier;
import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper; import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
import org.apache.hadoop.yarn.server.timeline.NameValuePair; import org.apache.hadoop.yarn.server.timeline.NameValuePair;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field; import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager; import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.hadoop.yarn.webapp.BadRequestException; import org.apache.hadoop.yarn.webapp.BadRequestException;
import org.apache.hadoop.yarn.webapp.ForbiddenException;
import org.apache.hadoop.yarn.webapp.NotFoundException; import org.apache.hadoop.yarn.webapp.NotFoundException;
import com.google.inject.Inject; import com.google.inject.Inject;
@ -336,6 +337,11 @@ public TimelinePutResponse postEntities(
return new TimelinePutResponse(); return new TimelinePutResponse();
} }
UserGroupInformation callerUGI = getUser(req); UserGroupInformation callerUGI = getUser(req);
if (callerUGI == null) {
String msg = "The owner of the posted timeline entities is not set";
LOG.error(msg);
throw new ForbiddenException(msg);
}
try { try {
List<EntityIdentifier> entityIDs = new ArrayList<EntityIdentifier>(); List<EntityIdentifier> entityIDs = new ArrayList<EntityIdentifier>();
TimelineEntities entitiesToPut = new TimelineEntities(); TimelineEntities entitiesToPut = new TimelineEntities();
@ -375,8 +381,7 @@ public TimelinePutResponse postEntities(
// the timeline data. // the timeline data.
try { try {
if (existingEntity == null) { if (existingEntity == null) {
injectOwnerInfo(entity, injectOwnerInfo(entity, callerUGI.getShortUserName());
callerUGI == null ? "" : callerUGI.getShortUserName());
} }
} catch (YarnException e) { } catch (YarnException e) {
// Skip the entity which messes up the primary filter and record the // Skip the entity which messes up the primary filter and record the

View File

@ -198,7 +198,7 @@ public void testMassiveWriteContainerHistory() throws IOException {
writeContainerFinishData(containerId); writeContainerFinishData(containerId);
} }
long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb; long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb;
Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 200); Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 400);
} }
} }

View File

@ -19,26 +19,26 @@
package org.apache.hadoop.yarn.server.timeline.webapp; package org.apache.hadoop.yarn.server.timeline.webapp;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.IOException; import java.util.Enumeration;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import javax.inject.Singleton;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig; import javax.servlet.FilterConfig;
import javax.servlet.ServletException; import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MediaType;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
@ -46,12 +46,11 @@
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AdminACLsManager; import org.apache.hadoop.yarn.security.AdminACLsManager;
import org.apache.hadoop.yarn.server.timeline.TestMemoryTimelineStore; import org.apache.hadoop.yarn.server.timeline.TestMemoryTimelineStore;
import org.apache.hadoop.yarn.server.timeline.TimelineStore; import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager; import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
import org.apache.hadoop.yarn.server.timeline.webapp.TimelineWebServices.AboutInfo; import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
import org.junit.Assert; import org.junit.Assert;
@ -74,11 +73,11 @@ public class TestTimelineWebServices extends JerseyTest {
private static TimelineStore store; private static TimelineStore store;
private static TimelineACLsManager timelineACLsManager; private static TimelineACLsManager timelineACLsManager;
private static AdminACLsManager adminACLsManager; private static AdminACLsManager adminACLsManager;
private static String remoteUser;
private long beforeTime; private long beforeTime;
private Injector injector = Guice.createInjector(new ServletModule() { private Injector injector = Guice.createInjector(new ServletModule() {
@SuppressWarnings("unchecked")
@Override @Override
protected void configureServlets() { protected void configureServlets() {
bind(YarnJacksonJaxbJsonProvider.class); bind(YarnJacksonJaxbJsonProvider.class);
@ -98,7 +97,35 @@ protected void configureServlets() {
adminACLsManager = new AdminACLsManager(conf); adminACLsManager = new AdminACLsManager(conf);
bind(TimelineACLsManager.class).toInstance(timelineACLsManager); bind(TimelineACLsManager.class).toInstance(timelineACLsManager);
serve("/*").with(GuiceContainer.class); serve("/*").with(GuiceContainer.class);
filter("/*").through(TestFilter.class); TimelineAuthenticationFilter taFilter = new TimelineAuthenticationFilter();
FilterConfig filterConfig = mock(FilterConfig.class);
when(filterConfig.getInitParameter(AuthenticationFilter.CONFIG_PREFIX))
.thenReturn(null);
when(filterConfig.getInitParameter(AuthenticationFilter.AUTH_TYPE))
.thenReturn("simple");
when(filterConfig.getInitParameter(
PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)).thenReturn("true");
Enumeration<Object> names = mock(Enumeration.class);
when(names.hasMoreElements()).thenReturn(true, true, false);
when(names.nextElement()).thenReturn(
AuthenticationFilter.AUTH_TYPE,
PseudoAuthenticationHandler.ANONYMOUS_ALLOWED);
when(filterConfig.getInitParameterNames()).thenReturn(names);
try {
taFilter.init(filterConfig);
} catch (ServletException e) {
Assert.fail("Unable to initialize TimelineAuthenticationFilter: " +
e.getMessage());
}
taFilter = spy(taFilter);
try {
doNothing().when(taFilter).init(any(FilterConfig.class));
} catch (ServletException e) {
Assert.fail("Unable to initialize TimelineAuthenticationFilter: " +
e.getMessage());
}
filter("/*").through(taFilter);
} }
}); });
@ -382,6 +409,7 @@ public void testPostEntitiesWithPrimaryFilter() throws Exception {
entities.addEntity(entity); entities.addEntity(entity);
WebResource r = resource(); WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline") ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
@ -401,11 +429,21 @@ public void testPostEntities() throws Exception {
entity.setStartTime(System.currentTimeMillis()); entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity); entities.addEntity(entity);
WebResource r = resource(); WebResource r = resource();
// No owner, will be rejected
ClientResponse response = r.path("ws").path("v1").path("timeline") ClientResponse response = r.path("ws").path("v1").path("timeline")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
assertEquals(ClientResponse.Status.FORBIDDEN,
response.getClientResponseStatus());
response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
TimelinePutResponse putResposne = response.getEntity(TimelinePutResponse.class); TimelinePutResponse putResposne = response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResposne); Assert.assertNotNull(putResposne);
Assert.assertEquals(0, putResposne.getErrors().size()); Assert.assertEquals(0, putResposne.getErrors().size());
@ -425,7 +463,6 @@ public void testPostEntities() throws Exception {
public void testPostEntitiesWithYarnACLsEnabled() throws Exception { public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager = AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager); timelineACLsManager.setAdminACLsManager(adminACLsManager);
remoteUser = "tester";
try { try {
TimelineEntities entities = new TimelineEntities(); TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity(); TimelineEntity entity = new TimelineEntity();
@ -435,6 +472,7 @@ public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
entities.addEntity(entity); entities.addEntity(entity);
WebResource r = resource(); WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline") ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
@ -444,8 +482,8 @@ public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
Assert.assertEquals(0, putResponse.getErrors().size()); Assert.assertEquals(0, putResponse.getErrors().size());
// override/append timeline data in the same entity with different user // override/append timeline data in the same entity with different user
remoteUser = "other";
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "other")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
@ -457,7 +495,6 @@ public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
putResponse.getErrors().get(0).getErrorCode()); putResponse.getErrors().get(0).getErrorCode());
} finally { } finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
remoteUser = null;
} }
} }
@ -465,7 +502,6 @@ public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
public void testGetEntityWithYarnACLsEnabled() throws Exception { public void testGetEntityWithYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager = AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager); timelineACLsManager.setAdminACLsManager(adminACLsManager);
remoteUser = "tester";
try { try {
TimelineEntities entities = new TimelineEntities(); TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity(); TimelineEntity entity = new TimelineEntity();
@ -475,6 +511,7 @@ public void testGetEntityWithYarnACLsEnabled() throws Exception {
entities.addEntity(entity); entities.addEntity(entity);
WebResource r = resource(); WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline") ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
@ -482,6 +519,7 @@ public void testGetEntityWithYarnACLsEnabled() throws Exception {
// 1. No field specification // 1. No field specification
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.path("test type 3").path("test id 3") .path("test type 3").path("test id 3")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class); .get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
@ -492,6 +530,7 @@ public void testGetEntityWithYarnACLsEnabled() throws Exception {
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.path("test type 3").path("test id 3") .path("test type 3").path("test id 3")
.queryParam("fields", "relatedentities") .queryParam("fields", "relatedentities")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class); .get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
@ -502,6 +541,7 @@ public void testGetEntityWithYarnACLsEnabled() throws Exception {
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.path("test type 3").path("test id 3") .path("test type 3").path("test id 3")
.queryParam("fields", "primaryfilters") .queryParam("fields", "primaryfilters")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class); .get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
@ -510,9 +550,9 @@ public void testGetEntityWithYarnACLsEnabled() throws Exception {
TimelineStore.SystemFilter.ENTITY_OWNER.toString())); TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
// get entity with other user // get entity with other user
remoteUser = "other";
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.path("test type 3").path("test id 3") .path("test type 3").path("test id 3")
.queryParam("user.name", "other")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class); .get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
@ -520,7 +560,6 @@ public void testGetEntityWithYarnACLsEnabled() throws Exception {
response.getClientResponseStatus()); response.getClientResponseStatus());
} finally { } finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
remoteUser = null;
} }
} }
@ -528,7 +567,6 @@ public void testGetEntityWithYarnACLsEnabled() throws Exception {
public void testGetEntitiesWithYarnACLsEnabled() { public void testGetEntitiesWithYarnACLsEnabled() {
AdminACLsManager oldAdminACLsManager = AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager); timelineACLsManager.setAdminACLsManager(adminACLsManager);
remoteUser = "tester";
try { try {
TimelineEntities entities = new TimelineEntities(); TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity(); TimelineEntity entity = new TimelineEntity();
@ -538,11 +576,11 @@ public void testGetEntitiesWithYarnACLsEnabled() {
entities.addEntity(entity); entities.addEntity(entity);
WebResource r = resource(); WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline") ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
remoteUser = "other";
entities = new TimelineEntities(); entities = new TimelineEntities();
entity = new TimelineEntity(); entity = new TimelineEntity();
entity.setEntityId("test id 5"); entity.setEntityId("test id 5");
@ -551,11 +589,13 @@ public void testGetEntitiesWithYarnACLsEnabled() {
entities.addEntity(entity); entities.addEntity(entity);
r = resource(); r = resource();
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "other")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "other")
.path("test type 4") .path("test type 4")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class); .get(ClientResponse.class);
@ -566,7 +606,6 @@ public void testGetEntitiesWithYarnACLsEnabled() {
assertEquals("test id 5", entities.getEntities().get(0).getEntityId()); assertEquals("test id 5", entities.getEntities().get(0).getEntityId());
} finally { } finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
remoteUser = null;
} }
} }
@ -574,7 +613,6 @@ public void testGetEntitiesWithYarnACLsEnabled() {
public void testGetEventsWithYarnACLsEnabled() { public void testGetEventsWithYarnACLsEnabled() {
AdminACLsManager oldAdminACLsManager = AdminACLsManager oldAdminACLsManager =
timelineACLsManager.setAdminACLsManager(adminACLsManager); timelineACLsManager.setAdminACLsManager(adminACLsManager);
remoteUser = "tester";
try { try {
TimelineEntities entities = new TimelineEntities(); TimelineEntities entities = new TimelineEntities();
TimelineEntity entity = new TimelineEntity(); TimelineEntity entity = new TimelineEntity();
@ -588,11 +626,11 @@ public void testGetEventsWithYarnACLsEnabled() {
entities.addEntity(entity); entities.addEntity(entity);
WebResource r = resource(); WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline") ClientResponse response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "tester")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
remoteUser = "other";
entities = new TimelineEntities(); entities = new TimelineEntities();
entity = new TimelineEntity(); entity = new TimelineEntity();
entity.setEntityId("test id 6"); entity.setEntityId("test id 6");
@ -605,12 +643,14 @@ public void testGetEventsWithYarnACLsEnabled() {
entities.addEntity(entity); entities.addEntity(entity);
r = resource(); r = resource();
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.queryParam("user.name", "other")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON)
.post(ClientResponse.class, entities); .post(ClientResponse.class, entities);
response = r.path("ws").path("v1").path("timeline") response = r.path("ws").path("v1").path("timeline")
.path("test type 5").path("events") .path("test type 5").path("events")
.queryParam("user.name", "other")
.queryParam("entityId", "test id 5,test id 6") .queryParam("entityId", "test id 5,test id 6")
.accept(MediaType.APPLICATION_JSON) .accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class); .get(ClientResponse.class);
@ -620,43 +660,7 @@ public void testGetEventsWithYarnACLsEnabled() {
assertEquals("test id 6", events.getAllEvents().get(0).getEntityId()); assertEquals("test id 6", events.getAllEvents().get(0).getEntityId());
} finally { } finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
remoteUser = null;
} }
} }
@Singleton
private static class TestFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException, ServletException {
if (request instanceof HttpServletRequest) {
request =
new TestHttpServletRequestWrapper((HttpServletRequest) request);
}
chain.doFilter(request, response);
}
@Override
public void destroy() {
}
}
private static class TestHttpServletRequestWrapper extends HttpServletRequestWrapper {
public TestHttpServletRequestWrapper(HttpServletRequest request) {
super(request);
}
@Override
public String getRemoteUser() {
return TestTimelineWebServices.remoteUser;
}
}
} }

View File

@ -42,7 +42,7 @@ public class BaseNMTokenSecretManager extends
private static Log LOG = LogFactory private static Log LOG = LogFactory
.getLog(BaseNMTokenSecretManager.class); .getLog(BaseNMTokenSecretManager.class);
private int serialNo = new SecureRandom().nextInt(); protected int serialNo = new SecureRandom().nextInt();
protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
protected final Lock readLock = readWriteLock.readLock(); protected final Lock readLock = readWriteLock.readLock();

View File

@ -169,6 +169,15 @@ private void stopRecoveryStore() throws IOException {
} }
} }
private void recoverTokens(NMTokenSecretManagerInNM nmTokenSecretManager,
NMContainerTokenSecretManager containerTokenSecretManager)
throws IOException {
if (nmStore.canRecover()) {
nmTokenSecretManager.recover(nmStore.loadNMTokenState());
// TODO: recover containerTokenSecretManager
}
}
@Override @Override
protected void serviceInit(Configuration conf) throws Exception { protected void serviceInit(Configuration conf) throws Exception {
@ -184,7 +193,9 @@ protected void serviceInit(Configuration conf) throws Exception {
new NMContainerTokenSecretManager(conf); new NMContainerTokenSecretManager(conf);
NMTokenSecretManagerInNM nmTokenSecretManager = NMTokenSecretManagerInNM nmTokenSecretManager =
new NMTokenSecretManagerInNM(); new NMTokenSecretManagerInNM(nmStore);
recoverTokens(nmTokenSecretManager, containerTokenSecretManager);
this.aclsManager = new ApplicationACLsManager(conf); this.aclsManager = new ApplicationACLsManager(conf);

View File

@ -35,11 +35,15 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
import org.apache.hadoop.yarn.server.utils.LeveldbIterator; import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.ConverterUtils;
import org.fusesource.leveldbjni.JniDBFactory; import org.fusesource.leveldbjni.JniDBFactory;
@ -72,6 +76,14 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
private static final String LOCALIZATION_FILECACHE_SUFFIX = "filecache/"; private static final String LOCALIZATION_FILECACHE_SUFFIX = "filecache/";
private static final String LOCALIZATION_APPCACHE_SUFFIX = "appcache/"; private static final String LOCALIZATION_APPCACHE_SUFFIX = "appcache/";
private static final String CURRENT_MASTER_KEY_SUFFIX = "CurrentMasterKey";
private static final String PREV_MASTER_KEY_SUFFIX = "PreviousMasterKey";
private static final String NM_TOKENS_KEY_PREFIX = "NMTokens/";
private static final String NM_TOKENS_CURRENT_MASTER_KEY =
NM_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX;
private static final String NM_TOKENS_PREV_MASTER_KEY =
NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
private DB db; private DB db;
public NMLeveldbStateStoreService() { public NMLeveldbStateStoreService() {
@ -367,6 +379,93 @@ public void removeDeletionTask(int taskId) throws IOException {
} }
@Override
public RecoveredNMTokenState loadNMTokenState() throws IOException {
RecoveredNMTokenState state = new RecoveredNMTokenState();
state.applicationMasterKeys =
new HashMap<ApplicationAttemptId, MasterKey>();
LeveldbIterator iter = null;
try {
iter = new LeveldbIterator(db);
iter.seek(bytes(NM_TOKENS_KEY_PREFIX));
while (iter.hasNext()) {
Entry<byte[], byte[]> entry = iter.next();
String fullKey = asString(entry.getKey());
if (!fullKey.startsWith(NM_TOKENS_KEY_PREFIX)) {
break;
}
String key = fullKey.substring(NM_TOKENS_KEY_PREFIX.length());
if (key.equals(CURRENT_MASTER_KEY_SUFFIX)) {
state.currentMasterKey = parseMasterKey(entry.getValue());
} else if (key.equals(PREV_MASTER_KEY_SUFFIX)) {
state.previousMasterKey = parseMasterKey(entry.getValue());
} else if (key.startsWith(
ApplicationAttemptId.appAttemptIdStrPrefix)) {
ApplicationAttemptId attempt;
try {
attempt = ConverterUtils.toApplicationAttemptId(key);
} catch (IllegalArgumentException e) {
throw new IOException("Bad application master key state for "
+ fullKey, e);
}
state.applicationMasterKeys.put(attempt,
parseMasterKey(entry.getValue()));
}
}
} catch (DBException e) {
throw new IOException(e.getMessage(), e);
} finally {
if (iter != null) {
iter.close();
}
}
return state;
}
@Override
public void storeNMTokenCurrentMasterKey(MasterKey key)
throws IOException {
storeMasterKey(NM_TOKENS_CURRENT_MASTER_KEY, key);
}
@Override
public void storeNMTokenPreviousMasterKey(MasterKey key)
throws IOException {
storeMasterKey(NM_TOKENS_PREV_MASTER_KEY, key);
}
@Override
public void storeNMTokenApplicationMasterKey(
ApplicationAttemptId attempt, MasterKey key) throws IOException {
storeMasterKey(NM_TOKENS_KEY_PREFIX + attempt, key);
}
@Override
public void removeNMTokenApplicationMasterKey(
ApplicationAttemptId attempt) throws IOException {
String key = NM_TOKENS_KEY_PREFIX + attempt;
try {
db.delete(bytes(key));
} catch (DBException e) {
throw new IOException(e.getMessage(), e);
}
}
private MasterKey parseMasterKey(byte[] keyData) throws IOException {
return new MasterKeyPBImpl(MasterKeyProto.parseFrom(keyData));
}
private void storeMasterKey(String dbKey, MasterKey key)
throws IOException {
MasterKeyPBImpl pb = (MasterKeyPBImpl) key;
try {
db.put(bytes(dbKey), pb.getProto().toByteArray());
} catch (DBException e) {
throw new IOException(e.getMessage(), e);
}
}
@Override @Override
protected void initStorage(Configuration conf) protected void initStorage(Configuration conf)
throws IOException { throws IOException {

View File

@ -22,10 +22,12 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
// The state store to use when state isn't being stored // The state store to use when state isn't being stored
public class NMNullStateStoreService extends NMStateStoreService { public class NMNullStateStoreService extends NMStateStoreService {
@ -77,6 +79,32 @@ public void storeDeletionTask(int taskId,
public void removeDeletionTask(int taskId) throws IOException { public void removeDeletionTask(int taskId) throws IOException {
} }
@Override
public RecoveredNMTokenState loadNMTokenState() throws IOException {
throw new UnsupportedOperationException(
"Recovery not supported by this state store");
}
@Override
public void storeNMTokenCurrentMasterKey(MasterKey key)
throws IOException {
}
@Override
public void storeNMTokenPreviousMasterKey(MasterKey key)
throws IOException {
}
@Override
public void storeNMTokenApplicationMasterKey(ApplicationAttemptId attempt,
MasterKey key) throws IOException {
}
@Override
public void removeNMTokenApplicationMasterKey(ApplicationAttemptId attempt)
throws IOException {
}
@Override @Override
protected void initStorage(Configuration conf) throws IOException { protected void initStorage(Configuration conf) throws IOException {
} }

View File

@ -29,10 +29,12 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
@Private @Private
@Unstable @Unstable
@ -100,6 +102,24 @@ public List<DeletionServiceDeleteTaskProto> getTasks() {
} }
} }
public static class RecoveredNMTokenState {
MasterKey currentMasterKey;
MasterKey previousMasterKey;
Map<ApplicationAttemptId, MasterKey> applicationMasterKeys;
public MasterKey getCurrentMasterKey() {
return currentMasterKey;
}
public MasterKey getPreviousMasterKey() {
return previousMasterKey;
}
public Map<ApplicationAttemptId, MasterKey> getApplicationMasterKeys() {
return applicationMasterKeys;
}
}
/** Initialize the state storage */ /** Initialize the state storage */
@Override @Override
public void serviceInit(Configuration conf) throws IOException { public void serviceInit(Configuration conf) throws IOException {
@ -173,6 +193,21 @@ public abstract void storeDeletionTask(int taskId,
public abstract void removeDeletionTask(int taskId) throws IOException; public abstract void removeDeletionTask(int taskId) throws IOException;
public abstract RecoveredNMTokenState loadNMTokenState() throws IOException;
public abstract void storeNMTokenCurrentMasterKey(MasterKey key)
throws IOException;
public abstract void storeNMTokenPreviousMasterKey(MasterKey key)
throws IOException;
public abstract void storeNMTokenApplicationMasterKey(
ApplicationAttemptId attempt, MasterKey key) throws IOException;
public abstract void removeNMTokenApplicationMasterKey(
ApplicationAttemptId attempt) throws IOException;
protected abstract void initStorage(Configuration conf) throws IOException; protected abstract void initStorage(Configuration conf) throws IOException;
protected abstract void startStorage() throws IOException; protected abstract void startStorage() throws IOException;

Some files were not shown because too many files have changed in this diff Show More