Merge r1609845 through r1611528 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-6584@1611531 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
04fd2012fd
|
@ -139,6 +139,17 @@
|
|||
<attach>true</attach>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
|
|
@ -36,10 +36,6 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
|
||||
|
||||
HADOOP-7664. Remove warmings when overriding final parameter configuration
|
||||
if the override value is same as the final parameter value.
|
||||
(Ravi Prakash via suresh)
|
||||
|
||||
HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin
|
||||
Jetly via jitendra)
|
||||
|
||||
|
@ -162,9 +158,6 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
|
||||
|
||||
HADOOP-10607. Create API to separate credential/password storage from
|
||||
applications. (Larry McCay via omalley)
|
||||
|
||||
HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata.
|
||||
(tucu)
|
||||
|
||||
|
@ -182,6 +175,8 @@ Trunk (Unreleased)
|
|||
|
||||
HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
|
||||
|
||||
HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
||||
|
@ -379,6 +374,16 @@ Trunk (Unreleased)
|
|||
NativeAzureFileSystem#NativeAzureFsInputStream#close().
|
||||
(Chen He via cnauroth)
|
||||
|
||||
HADOOP-10831. UserProvider is not thread safe. (Benoy Antony via umamahesh)
|
||||
|
||||
HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
|
||||
|
||||
HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
|
||||
(Mike Yoder via wang)
|
||||
|
||||
HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
|
||||
System. (Shanyu Zhao via cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
@ -397,6 +402,30 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth)
|
||||
|
||||
HADOOP-7664. Remove warmings when overriding final parameter configuration
|
||||
if the override value is same as the final parameter value.
|
||||
(Ravi Prakash via suresh)
|
||||
|
||||
HADOOP-10673. Update rpc metrics when the call throws an exception. (Ming Ma
|
||||
via jing9)
|
||||
|
||||
HADOOP-10845. Add common tests for ACLs in combination with viewfs.
|
||||
(Stephen Chu via cnauroth)
|
||||
|
||||
HADOOP-10839. Add unregisterSource() to MetricsSystem API.
|
||||
(Shanyu Zhao via cnauroth)
|
||||
|
||||
HADOOP-10607. Create an API to separate credentials/password storage
|
||||
from applications (Larry McCay via omalley)
|
||||
|
||||
HADOOP-10732. Fix locking in credential update. (Ted Yu via omalley)
|
||||
|
||||
HADOOP-10733. Fix potential null dereference in CredShell. (Ted Yu via
|
||||
omalley)
|
||||
|
||||
HADOOP-10610. Upgrade S3n s3.fs.buffer.dir to support multi directories.
|
||||
(Ted Malaska via atm)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -412,6 +441,12 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HADOOP-10810. Clean up native code compilation warnings. (cnauroth)
|
||||
|
||||
HADOOP-9921. daemon scripts should remove pid file on stop call after stop
|
||||
or process is found not running ( vinayakumarb )
|
||||
|
||||
HADOOP-10591. Compression codecs must used pooled direct buffers or
|
||||
deallocate direct buffers when stream is closed (cmccabe)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -198,6 +198,7 @@ case $startStop in
|
|||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
rm -f $pid
|
||||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
|
|
|
@ -57,6 +57,16 @@ public class KeyShell extends Configured implements Tool {
|
|||
|
||||
private boolean userSuppliedProvider = false;
|
||||
|
||||
/**
|
||||
* Primary entry point for the KeyShell; called via main().
|
||||
*
|
||||
* @param args Command line arguments.
|
||||
* @return 0 on success and 1 on failure. This value is passed back to
|
||||
* the unix shell, so we must follow shell return code conventions:
|
||||
* the return code is an unsigned character, and 0 means success, and
|
||||
* small positive integers mean failure.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Override
|
||||
public int run(String[] args) throws Exception {
|
||||
int exitCode = 0;
|
||||
|
@ -68,11 +78,11 @@ public class KeyShell extends Configured implements Tool {
|
|||
if (command.validate()) {
|
||||
command.execute();
|
||||
} else {
|
||||
exitCode = -1;
|
||||
exitCode = 1;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace(err);
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
return exitCode;
|
||||
}
|
||||
|
@ -86,8 +96,8 @@ public class KeyShell extends Configured implements Tool {
|
|||
* % hadoop key list [-provider providerPath]
|
||||
* % hadoop key delete keyName [--provider providerPath] [-i]
|
||||
* </pre>
|
||||
* @param args
|
||||
* @return
|
||||
* @param args Command line arguments.
|
||||
* @return 0 on success, 1 on failure.
|
||||
* @throws IOException
|
||||
*/
|
||||
private int init(String[] args) throws IOException {
|
||||
|
@ -105,7 +115,7 @@ public class KeyShell extends Configured implements Tool {
|
|||
command = new CreateCommand(keyName, options);
|
||||
if ("--help".equals(keyName)) {
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
} else if (args[i].equals("delete")) {
|
||||
String keyName = "--help";
|
||||
|
@ -116,7 +126,7 @@ public class KeyShell extends Configured implements Tool {
|
|||
command = new DeleteCommand(keyName);
|
||||
if ("--help".equals(keyName)) {
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
} else if (args[i].equals("roll")) {
|
||||
String keyName = "--help";
|
||||
|
@ -127,7 +137,7 @@ public class KeyShell extends Configured implements Tool {
|
|||
command = new RollCommand(keyName);
|
||||
if ("--help".equals(keyName)) {
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
} else if ("list".equals(args[i])) {
|
||||
command = new ListCommand();
|
||||
|
@ -145,13 +155,13 @@ public class KeyShell extends Configured implements Tool {
|
|||
out.println("\nAttributes must be in attribute=value form, " +
|
||||
"or quoted\nlike \"attribute = value\"\n");
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
if (attributes.containsKey(attr)) {
|
||||
out.println("\nEach attribute must correspond to only one value:\n" +
|
||||
"atttribute \"" + attr + "\" was repeated\n" );
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
attributes.put(attr, val);
|
||||
} else if ("--provider".equals(args[i]) && moreTokens) {
|
||||
|
@ -163,17 +173,17 @@ public class KeyShell extends Configured implements Tool {
|
|||
interactive = true;
|
||||
} else if ("--help".equals(args[i])) {
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
return 1;
|
||||
} else {
|
||||
printKeyShellUsage();
|
||||
ToolRunner.printGenericCommandUsage(System.err);
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (command == null) {
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!attributes.isEmpty()) {
|
||||
|
@ -491,10 +501,11 @@ public class KeyShell extends Configured implements Tool {
|
|||
}
|
||||
|
||||
/**
|
||||
* Main program.
|
||||
* main() entry point for the KeyShell. While strictly speaking the
|
||||
* return is void, it will System.exit() with a return code: 0 is for
|
||||
* success and 1 for failure.
|
||||
*
|
||||
* @param args
|
||||
* Command line arguments
|
||||
* @param args Command line arguments.
|
||||
* @throws Exception
|
||||
*/
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.fs.FSInputStream;
|
|||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocalDirAllocator;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.s3.S3Exception;
|
||||
|
@ -225,6 +226,7 @@ public class NativeS3FileSystem extends FileSystem {
|
|||
private OutputStream backupStream;
|
||||
private MessageDigest digest;
|
||||
private boolean closed;
|
||||
private LocalDirAllocator lDirAlloc;
|
||||
|
||||
public NativeS3FsOutputStream(Configuration conf,
|
||||
NativeFileSystemStore store, String key, Progressable progress,
|
||||
|
@ -246,11 +248,10 @@ public class NativeS3FileSystem extends FileSystem {
|
|||
}
|
||||
|
||||
private File newBackupFile() throws IOException {
|
||||
File dir = new File(conf.get("fs.s3.buffer.dir"));
|
||||
if (!dir.mkdirs() && !dir.exists()) {
|
||||
throw new IOException("Cannot create S3 buffer directory: " + dir);
|
||||
if (lDirAlloc == null) {
|
||||
lDirAlloc = new LocalDirAllocator("fs.s3.buffer.dir");
|
||||
}
|
||||
File result = File.createTempFile("output-", ".tmp", dir);
|
||||
File result = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
|
||||
result.deleteOnExit();
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -37,6 +37,8 @@ import org.apache.hadoop.fs.FsStatus;
|
|||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
@ -279,6 +281,38 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
myFs.setTimes(fullPath(f), mtime, atime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
myFs.modifyAclEntries(fullPath(path), aclSpec);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
myFs.removeAclEntries(fullPath(path), aclSpec);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeDefaultAcl(Path path) throws IOException {
|
||||
myFs.removeDefaultAcl(fullPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAcl(Path path) throws IOException {
|
||||
myFs.removeAcl(fullPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
|
||||
myFs.setAcl(fullPath(path), aclSpec);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
return myFs.getAclStatus(fullPath(path));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setVerifyChecksum(final boolean verifyChecksum)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
|||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.AclUtil;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
|
||||
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
|
||||
|
@ -871,5 +872,46 @@ public class ViewFileSystem extends FileSystem {
|
|||
public short getDefaultReplication(Path f) {
|
||||
throw new NotInMountpointException(f, "getDefaultReplication");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("modifyAclEntries", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("removeAclEntries", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeDefaultAcl(Path path) throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("removeDefaultAcl", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAcl(Path path) throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("removeAcl", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("setAcl", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
return new AclStatus.Builder().owner(ugi.getUserName())
|
||||
.group(ugi.getGroupNames()[0])
|
||||
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
|
||||
.stickyBit(false).build();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,6 +49,9 @@ import org.apache.hadoop.fs.RemoteIterator;
|
|||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||
import org.apache.hadoop.fs.local.LocalConfigKeys;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclUtil;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
|
||||
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
|
||||
|
@ -603,6 +606,51 @@ public class ViewFs extends AbstractFileSystem {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeDefaultAcl(Path path)
|
||||
throws IOException {
|
||||
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
res.targetFileSystem.removeDefaultAcl(res.remainingPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAcl(Path path)
|
||||
throws IOException {
|
||||
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
res.targetFileSystem.removeAcl(res.remainingPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
|
||||
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
res.targetFileSystem.setAcl(res.remainingPath, aclSpec);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||
fsState.resolve(getUriPath(path), true);
|
||||
return res.targetFileSystem.getAclStatus(res.remainingPath);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
|
@ -832,5 +880,46 @@ public class ViewFs extends AbstractFileSystem {
|
|||
throws AccessControlException {
|
||||
throw readOnlyMountTable("setVerifyChecksum", "");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("modifyAclEntries", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("removeAclEntries", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeDefaultAcl(Path path) throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("removeDefaultAcl", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAcl(Path path) throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("removeAcl", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
throw readOnlyMountTable("setAcl", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AclStatus getAclStatus(Path path) throws IOException {
|
||||
checkPathIsSlash(path);
|
||||
return new AclStatus.Builder().owner(ugi.getUserName())
|
||||
.group(ugi.getGroupNames()[0])
|
||||
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
|
||||
.stickyBit(false).build();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,7 +100,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
|
|||
@Override
|
||||
public CompressionOutputStream createOutputStream(OutputStream out)
|
||||
throws IOException {
|
||||
return createOutputStream(out, createCompressor());
|
||||
return CompressionCodec.Util.
|
||||
createOutputStreamWithCodecPool(this, conf, out);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -153,7 +154,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
|
|||
@Override
|
||||
public CompressionInputStream createInputStream(InputStream in)
|
||||
throws IOException {
|
||||
return createInputStream(in, createDecompressor());
|
||||
return CompressionCodec.Util.
|
||||
createInputStreamWithCodecPool(this, conf, in);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.OutputStream;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
/**
|
||||
* This class encapsulates a streaming compression/decompression pair.
|
||||
|
@ -113,4 +114,58 @@ public interface CompressionCodec {
|
|||
* @return the extension including the '.'
|
||||
*/
|
||||
String getDefaultExtension();
|
||||
|
||||
static class Util {
|
||||
/**
|
||||
* Create an output stream with a codec taken from the global CodecPool.
|
||||
*
|
||||
* @param codec The codec to use to create the output stream.
|
||||
* @param conf The configuration to use if we need to create a new codec.
|
||||
* @param out The output stream to wrap.
|
||||
* @return The new output stream
|
||||
* @throws IOException
|
||||
*/
|
||||
static CompressionOutputStream createOutputStreamWithCodecPool(
|
||||
CompressionCodec codec, Configuration conf, OutputStream out)
|
||||
throws IOException {
|
||||
Compressor compressor = CodecPool.getCompressor(codec, conf);
|
||||
CompressionOutputStream stream = null;
|
||||
try {
|
||||
stream = codec.createOutputStream(out, compressor);
|
||||
} finally {
|
||||
if (stream == null) {
|
||||
CodecPool.returnCompressor(compressor);
|
||||
} else {
|
||||
stream.setTrackedCompressor(compressor);
|
||||
}
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an input stream with a codec taken from the global CodecPool.
|
||||
*
|
||||
* @param codec The codec to use to create the input stream.
|
||||
* @param conf The configuration to use if we need to create a new codec.
|
||||
* @param in The input stream to wrap.
|
||||
* @return The new input stream
|
||||
* @throws IOException
|
||||
*/
|
||||
static CompressionInputStream createInputStreamWithCodecPool(
|
||||
CompressionCodec codec, Configuration conf, InputStream in)
|
||||
throws IOException {
|
||||
Decompressor decompressor = CodecPool.getDecompressor(codec);
|
||||
CompressionInputStream stream = null;
|
||||
try {
|
||||
stream = codec.createInputStream(in, decompressor);
|
||||
} finally {
|
||||
if (stream == null) {
|
||||
CodecPool.returnDecompressor(decompressor);
|
||||
} else {
|
||||
stream.setTrackedDecompressor(decompressor);
|
||||
}
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
|
|||
protected final InputStream in;
|
||||
protected long maxAvailableData = 0L;
|
||||
|
||||
private Decompressor trackedDecompressor;
|
||||
|
||||
/**
|
||||
* Create a compression input stream that reads
|
||||
* the decompressed bytes from the given stream.
|
||||
|
@ -58,6 +60,10 @@ public abstract class CompressionInputStream extends InputStream implements Seek
|
|||
@Override
|
||||
public void close() throws IOException {
|
||||
in.close();
|
||||
if (trackedDecompressor != null) {
|
||||
CodecPool.returnDecompressor(trackedDecompressor);
|
||||
trackedDecompressor = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -112,4 +118,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
|
|||
public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
void setTrackedDecompressor(Decompressor decompressor) {
|
||||
trackedDecompressor = decompressor;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,12 @@ public abstract class CompressionOutputStream extends OutputStream {
|
|||
*/
|
||||
protected final OutputStream out;
|
||||
|
||||
/**
|
||||
* If non-null, this is the Compressor object that we should call
|
||||
* CodecPool#returnCompressor on when this stream is closed.
|
||||
*/
|
||||
private Compressor trackedCompressor;
|
||||
|
||||
/**
|
||||
* Create a compression output stream that writes
|
||||
* the compressed bytes to the given stream.
|
||||
|
@ -44,10 +50,18 @@ public abstract class CompressionOutputStream extends OutputStream {
|
|||
this.out = out;
|
||||
}
|
||||
|
||||
void setTrackedCompressor(Compressor compressor) {
|
||||
trackedCompressor = compressor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
finish();
|
||||
out.close();
|
||||
if (trackedCompressor != null) {
|
||||
CodecPool.returnCompressor(trackedCompressor);
|
||||
trackedCompressor = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -51,14 +51,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
|
|||
@Override
|
||||
public CompressionOutputStream createOutputStream(OutputStream out)
|
||||
throws IOException {
|
||||
// This may leak memory if called in a loop. The createCompressor() call
|
||||
// may cause allocation of an untracked direct-backed buffer if native
|
||||
// libs are being used (even if you close the stream). A Compressor
|
||||
// object should be reused between successive calls.
|
||||
LOG.warn("DefaultCodec.createOutputStream() may leak memory. "
|
||||
+ "Create a compressor first.");
|
||||
return new CompressorStream(out, createCompressor(),
|
||||
conf.getInt("io.file.buffer.size", 4*1024));
|
||||
return CompressionCodec.Util.
|
||||
createOutputStreamWithCodecPool(this, conf, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -82,8 +76,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
|
|||
@Override
|
||||
public CompressionInputStream createInputStream(InputStream in)
|
||||
throws IOException {
|
||||
return new DecompressorStream(in, createDecompressor(),
|
||||
conf.getInt("io.file.buffer.size", 4*1024));
|
||||
return CompressionCodec.Util.
|
||||
createInputStreamWithCodecPool(this, conf, in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -159,10 +159,11 @@ public class GzipCodec extends DefaultCodec {
|
|||
@Override
|
||||
public CompressionOutputStream createOutputStream(OutputStream out)
|
||||
throws IOException {
|
||||
return (ZlibFactory.isNativeZlibLoaded(conf)) ?
|
||||
new CompressorStream(out, createCompressor(),
|
||||
conf.getInt("io.file.buffer.size", 4*1024)) :
|
||||
new GzipOutputStream(out);
|
||||
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
|
||||
return new GzipOutputStream(out);
|
||||
}
|
||||
return CompressionCodec.Util.
|
||||
createOutputStreamWithCodecPool(this, conf, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -192,8 +193,9 @@ public class GzipCodec extends DefaultCodec {
|
|||
|
||||
@Override
|
||||
public CompressionInputStream createInputStream(InputStream in)
|
||||
throws IOException {
|
||||
return createInputStream(in, null);
|
||||
throws IOException {
|
||||
return CompressionCodec.Util.
|
||||
createInputStreamWithCodecPool(this, conf, in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -84,7 +84,8 @@ public class Lz4Codec implements Configurable, CompressionCodec {
|
|||
@Override
|
||||
public CompressionOutputStream createOutputStream(OutputStream out)
|
||||
throws IOException {
|
||||
return createOutputStream(out, createCompressor());
|
||||
return CompressionCodec.Util.
|
||||
createOutputStreamWithCodecPool(this, conf, out);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -157,7 +158,8 @@ public class Lz4Codec implements Configurable, CompressionCodec {
|
|||
@Override
|
||||
public CompressionInputStream createInputStream(InputStream in)
|
||||
throws IOException {
|
||||
return createInputStream(in, createDecompressor());
|
||||
return CompressionCodec.Util.
|
||||
createInputStreamWithCodecPool(this, conf, in);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -95,7 +95,8 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
|
|||
@Override
|
||||
public CompressionOutputStream createOutputStream(OutputStream out)
|
||||
throws IOException {
|
||||
return createOutputStream(out, createCompressor());
|
||||
return CompressionCodec.Util.
|
||||
createOutputStreamWithCodecPool(this, conf, out);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -158,7 +159,8 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
|
|||
@Override
|
||||
public CompressionInputStream createInputStream(InputStream in)
|
||||
throws IOException {
|
||||
return createInputStream(in, createDecompressor());
|
||||
return CompressionCodec.Util.
|
||||
createInputStreamWithCodecPool(this, conf, in);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -599,24 +599,35 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
.mergeFrom(request.theRequestRead).build();
|
||||
|
||||
Message result;
|
||||
long startTime = Time.now();
|
||||
int qTime = (int) (startTime - receiveTime);
|
||||
Exception exception = null;
|
||||
try {
|
||||
long startTime = Time.now();
|
||||
server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
|
||||
result = service.callBlockingMethod(methodDescriptor, null, param);
|
||||
int processingTime = (int) (Time.now() - startTime);
|
||||
int qTime = (int) (startTime - receiveTime);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info("Served: " + methodName + " queueTime= " + qTime +
|
||||
" procesingTime= " + processingTime);
|
||||
}
|
||||
server.rpcMetrics.addRpcQueueTime(qTime);
|
||||
server.rpcMetrics.addRpcProcessingTime(processingTime);
|
||||
server.rpcDetailedMetrics.addProcessingTime(methodName,
|
||||
processingTime);
|
||||
} catch (ServiceException e) {
|
||||
exception = (Exception) e.getCause();
|
||||
throw (Exception) e.getCause();
|
||||
} catch (Exception e) {
|
||||
exception = e;
|
||||
throw e;
|
||||
} finally {
|
||||
int processingTime = (int) (Time.now() - startTime);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
String msg = "Served: " + methodName + " queueTime= " + qTime +
|
||||
" procesingTime= " + processingTime;
|
||||
if (exception != null) {
|
||||
msg += " exception= " + exception.getClass().getSimpleName();
|
||||
}
|
||||
LOG.debug(msg);
|
||||
}
|
||||
String detailedMetricsName = (exception == null) ?
|
||||
methodName :
|
||||
exception.getClass().getSimpleName();
|
||||
server.rpcMetrics.addRpcQueueTime(qTime);
|
||||
server.rpcMetrics.addRpcProcessingTime(processingTime);
|
||||
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
|
||||
processingTime);
|
||||
}
|
||||
return new RpcResponseWrapper(result);
|
||||
}
|
||||
|
|
|
@ -355,8 +355,8 @@ public abstract class Server {
|
|||
private int readThreads; // number of read threads
|
||||
private int readerPendingConnectionQueue; // number of connections to queue per read thread
|
||||
private Class<? extends Writable> rpcRequestClass; // class used for deserializing the rpc request
|
||||
protected RpcMetrics rpcMetrics;
|
||||
protected RpcDetailedMetrics rpcDetailedMetrics;
|
||||
final protected RpcMetrics rpcMetrics;
|
||||
final protected RpcDetailedMetrics rpcDetailedMetrics;
|
||||
|
||||
private Configuration conf;
|
||||
private String portRangeConfig = null;
|
||||
|
@ -2494,12 +2494,8 @@ public abstract class Server {
|
|||
listener.doStop();
|
||||
responder.interrupt();
|
||||
notifyAll();
|
||||
if (this.rpcMetrics != null) {
|
||||
this.rpcMetrics.shutdown();
|
||||
}
|
||||
if (this.rpcDetailedMetrics != null) {
|
||||
this.rpcDetailedMetrics.shutdown();
|
||||
}
|
||||
this.rpcMetrics.shutdown();
|
||||
this.rpcDetailedMetrics.shutdown();
|
||||
}
|
||||
|
||||
/** Wait for the server to be stopped.
|
||||
|
|
|
@ -471,8 +471,10 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
|
||||
|
||||
// Invoke the protocol method
|
||||
long startTime = Time.now();
|
||||
int qTime = (int) (startTime-receivedTime);
|
||||
Exception exception = null;
|
||||
try {
|
||||
long startTime = Time.now();
|
||||
Method method =
|
||||
protocolImpl.protocolClass.getMethod(call.getMethodName(),
|
||||
call.getParameterClasses());
|
||||
|
@ -480,28 +482,18 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
|
||||
Object value =
|
||||
method.invoke(protocolImpl.protocolImpl, call.getParameters());
|
||||
int processingTime = (int) (Time.now() - startTime);
|
||||
int qTime = (int) (startTime-receivedTime);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Served: " + call.getMethodName() +
|
||||
" queueTime= " + qTime +
|
||||
" procesingTime= " + processingTime);
|
||||
}
|
||||
server.rpcMetrics.addRpcQueueTime(qTime);
|
||||
server.rpcMetrics.addRpcProcessingTime(processingTime);
|
||||
server.rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
|
||||
processingTime);
|
||||
if (server.verbose) log("Return: "+value);
|
||||
|
||||
return new ObjectWritable(method.getReturnType(), value);
|
||||
|
||||
} catch (InvocationTargetException e) {
|
||||
Throwable target = e.getTargetException();
|
||||
if (target instanceof IOException) {
|
||||
exception = (IOException)target;
|
||||
throw (IOException)target;
|
||||
} else {
|
||||
IOException ioe = new IOException(target.toString());
|
||||
ioe.setStackTrace(target.getStackTrace());
|
||||
exception = ioe;
|
||||
throw ioe;
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
|
@ -510,8 +502,27 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
}
|
||||
IOException ioe = new IOException(e.toString());
|
||||
ioe.setStackTrace(e.getStackTrace());
|
||||
exception = ioe;
|
||||
throw ioe;
|
||||
}
|
||||
} finally {
|
||||
int processingTime = (int) (Time.now() - startTime);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
String msg = "Served: " + call.getMethodName() +
|
||||
" queueTime= " + qTime +
|
||||
" procesingTime= " + processingTime;
|
||||
if (exception != null) {
|
||||
msg += " exception= " + exception.getClass().getSimpleName();
|
||||
}
|
||||
LOG.debug(msg);
|
||||
}
|
||||
String detailedMetricsName = (exception == null) ?
|
||||
call.getMethodName() :
|
||||
exception.getClass().getSimpleName();
|
||||
server.rpcMetrics.addRpcQueueTime(qTime);
|
||||
server.rpcMetrics.addRpcProcessingTime(processingTime);
|
||||
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
|
||||
processingTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,6 +54,12 @@ public abstract class MetricsSystem implements MetricsSystemMXBean {
|
|||
*/
|
||||
public abstract <T> T register(String name, String desc, T source);
|
||||
|
||||
/**
|
||||
* Unregister a metrics source
|
||||
* @param name of the source. This is the name you use to call register()
|
||||
*/
|
||||
public abstract void unregisterSource(String name);
|
||||
|
||||
/**
|
||||
* Register a metrics source (deriving name and description from the object)
|
||||
* @param <T> the actual type of the source object
|
||||
|
|
|
@ -85,7 +85,7 @@ class MetricsConfig extends SubsetConfiguration {
|
|||
private ClassLoader pluginLoader;
|
||||
|
||||
MetricsConfig(Configuration c, String prefix) {
|
||||
super(c, prefix, ".");
|
||||
super(c, prefix.toLowerCase(Locale.US), ".");
|
||||
}
|
||||
|
||||
static MetricsConfig create(String prefix) {
|
||||
|
|
|
@ -232,6 +232,17 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
|||
return source;
|
||||
}
|
||||
|
||||
@Override public synchronized
|
||||
void unregisterSource(String name) {
|
||||
if (sources.containsKey(name)) {
|
||||
sources.get(name).stop();
|
||||
sources.remove(name);
|
||||
}
|
||||
if (allSources.containsKey(name)) {
|
||||
allSources.remove(name);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized
|
||||
void registerSource(String name, String desc, MetricsSource source) {
|
||||
checkNotNull(config, "config");
|
||||
|
|
|
@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
* abstraction to separate credential storage from users of them. It
|
||||
* is intended to support getting or storing passwords in a variety of ways,
|
||||
* including third party bindings.
|
||||
*
|
||||
* <code>CredentialProvider</code> implementations must be thread safe.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Unstable
|
||||
|
|
|
@ -264,7 +264,7 @@ public class CredentialShell extends Configured implements Tool {
|
|||
alias + " from CredentialProvider " + provider.toString() +
|
||||
". Continue?:");
|
||||
if (!cont) {
|
||||
out.println("Nothing has been be deleted.");
|
||||
out.println("Nothing has been deleted.");
|
||||
}
|
||||
return cont;
|
||||
} catch (IOException e) {
|
||||
|
@ -373,12 +373,12 @@ public class CredentialShell extends Configured implements Tool {
|
|||
char[] newPassword2 = c.readPassword("Enter password again: ");
|
||||
noMatch = !Arrays.equals(newPassword1, newPassword2);
|
||||
if (noMatch) {
|
||||
Arrays.fill(newPassword1, ' ');
|
||||
if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
|
||||
c.format("Passwords don't match. Try again.%n");
|
||||
} else {
|
||||
cred = newPassword1;
|
||||
}
|
||||
Arrays.fill(newPassword2, ' ');
|
||||
if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
|
||||
} while (noMatch);
|
||||
return cred;
|
||||
}
|
||||
|
|
|
@ -230,6 +230,7 @@ public class JavaKeyStoreProvider extends CredentialProvider {
|
|||
|
||||
CredentialEntry innerSetCredential(String alias, char[] material)
|
||||
throws IOException {
|
||||
writeLock.lock();
|
||||
try {
|
||||
keyStore.setKeyEntry(alias, new SecretKeySpec(
|
||||
new String(material).getBytes("UTF-8"), "AES"),
|
||||
|
@ -237,6 +238,8 @@ public class JavaKeyStoreProvider extends CredentialProvider {
|
|||
} catch (KeyStoreException e) {
|
||||
throw new IOException("Can't store credential " + alias + " in " + this,
|
||||
e);
|
||||
} finally {
|
||||
writeLock.unlock();
|
||||
}
|
||||
changed = true;
|
||||
return new CredentialEntry(alias, material);
|
||||
|
|
|
@ -55,7 +55,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CredentialEntry getCredentialEntry(String alias) {
|
||||
public synchronized CredentialEntry getCredentialEntry(String alias) {
|
||||
byte[] bytes = credentials.getSecretKey(new Text(alias));
|
||||
if (bytes == null) {
|
||||
return null;
|
||||
|
@ -64,7 +64,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CredentialEntry createCredentialEntry(String name, char[] credential)
|
||||
public synchronized CredentialEntry createCredentialEntry(String name, char[] credential)
|
||||
throws IOException {
|
||||
Text nameT = new Text(name);
|
||||
if (credentials.getSecretKey(nameT) != null) {
|
||||
|
@ -77,7 +77,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void deleteCredentialEntry(String name) throws IOException {
|
||||
public synchronized void deleteCredentialEntry(String name) throws IOException {
|
||||
byte[] cred = credentials.getSecretKey(new Text(name));
|
||||
if (cred != null) {
|
||||
credentials.removeSecretKey(new Text(name));
|
||||
|
@ -95,7 +95,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void flush() {
|
||||
public synchronized void flush() {
|
||||
user.addCredentials(credentials);
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<String> getAliases() throws IOException {
|
||||
public synchronized List<String> getAliases() throws IOException {
|
||||
List<String> list = new ArrayList<String>();
|
||||
List<Text> aliases = credentials.getAllSecretKeys();
|
||||
for (Text key : aliases) {
|
||||
|
|
|
@ -127,7 +127,7 @@ User Commands
|
|||
Runs a HDFS filesystem checking utility.
|
||||
See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
|
||||
|
||||
Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]]>>>
|
||||
Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]] [-showprogress]>>>
|
||||
|
||||
*------------------+---------------------------------------------+
|
||||
|| COMMAND_OPTION || Description
|
||||
|
@ -148,6 +148,8 @@ User Commands
|
|||
*------------------+---------------------------------------------+
|
||||
| -racks | Print out network topology for data-node locations.
|
||||
*------------------+---------------------------------------------+
|
||||
| -showprogress | Print out show progress in output. Default is OFF (no progress).
|
||||
*------------------+---------------------------------------------+
|
||||
|
||||
* <<<fetchdt>>>
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ public class TestKeyShell {
|
|||
KeyShell ks = new KeyShell();
|
||||
ks.setConf(new Configuration());
|
||||
rc = ks.run(args1);
|
||||
assertEquals(-1, rc);
|
||||
assertEquals(1, rc);
|
||||
assertTrue(outContent.toString().contains("key1 has not been created."));
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ public class TestKeyShell {
|
|||
KeyShell ks = new KeyShell();
|
||||
ks.setConf(new Configuration());
|
||||
rc = ks.run(args1);
|
||||
assertEquals(-1, rc);
|
||||
assertEquals(1, rc);
|
||||
assertTrue(outContent.toString().contains("key1 has not been created."));
|
||||
}
|
||||
|
||||
|
@ -187,7 +187,7 @@ public class TestKeyShell {
|
|||
KeyShell ks = new KeyShell();
|
||||
ks.setConf(new Configuration());
|
||||
rc = ks.run(args1);
|
||||
assertEquals(-1, rc);
|
||||
assertEquals(1, rc);
|
||||
assertTrue(outContent.toString().contains("There are no valid " +
|
||||
"KeyProviders configured."));
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ public class TestKeyShell {
|
|||
config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///");
|
||||
ks.setConf(config);
|
||||
rc = ks.run(args1);
|
||||
assertEquals(-1, rc);
|
||||
assertEquals(1, rc);
|
||||
assertTrue(outContent.toString().contains("There are no valid " +
|
||||
"KeyProviders configured."));
|
||||
}
|
||||
|
@ -262,19 +262,19 @@ public class TestKeyShell {
|
|||
final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
|
||||
"--attr", "=bar"};
|
||||
rc = ks.run(args2);
|
||||
assertEquals(-1, rc);
|
||||
assertEquals(1, rc);
|
||||
|
||||
/* Not in attribute = value form */
|
||||
outContent.reset();
|
||||
args2[5] = "foo";
|
||||
rc = ks.run(args2);
|
||||
assertEquals(-1, rc);
|
||||
assertEquals(1, rc);
|
||||
|
||||
/* No attribute or value */
|
||||
outContent.reset();
|
||||
args2[5] = "=";
|
||||
rc = ks.run(args2);
|
||||
assertEquals(-1, rc);
|
||||
assertEquals(1, rc);
|
||||
|
||||
/* Legal: attribute is a, value is b=c */
|
||||
outContent.reset();
|
||||
|
@ -308,7 +308,7 @@ public class TestKeyShell {
|
|||
"--attr", "foo=bar",
|
||||
"--attr", "foo=glarch"};
|
||||
rc = ks.run(args4);
|
||||
assertEquals(-1, rc);
|
||||
assertEquals(1, rc);
|
||||
|
||||
/* Clean up to be a good citizen */
|
||||
deleteKey(ks, "keyattr1");
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.fs.viewfs;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
|
@ -28,9 +29,16 @@ import org.apache.hadoop.fs.BlockLocation;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.AclUtil;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.viewfs.ConfigUtil;
|
||||
|
@ -38,6 +46,7 @@ import org.apache.hadoop.fs.viewfs.ViewFileSystem;
|
|||
import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
@ -96,7 +105,6 @@ public class ViewFileSystemBaseTest {
|
|||
// in the test root
|
||||
|
||||
// Set up the defaultMT in the config with our mount point links
|
||||
//Configuration conf = new Configuration();
|
||||
conf = ViewFileSystemTestSetup.createConfig();
|
||||
setupMountPoints();
|
||||
fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
|
||||
|
@ -720,4 +728,49 @@ public class ViewFileSystemBaseTest {
|
|||
Assert.assertTrue("Other-readable permission not set!",
|
||||
perms.getOtherAction().implies(FsAction.READ));
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify the behavior of ACL operations on paths above the root of
|
||||
* any mount table entry.
|
||||
*/
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalModifyAclEntries() throws IOException {
|
||||
fsView.modifyAclEntries(new Path("/internalDir"),
|
||||
new ArrayList<AclEntry>());
|
||||
}
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalRemoveAclEntries() throws IOException {
|
||||
fsView.removeAclEntries(new Path("/internalDir"),
|
||||
new ArrayList<AclEntry>());
|
||||
}
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalRemoveDefaultAcl() throws IOException {
|
||||
fsView.removeDefaultAcl(new Path("/internalDir"));
|
||||
}
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalRemoveAcl() throws IOException {
|
||||
fsView.removeAcl(new Path("/internalDir"));
|
||||
}
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalSetAcl() throws IOException {
|
||||
fsView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInternalGetAclStatus() throws IOException {
|
||||
final UserGroupInformation currentUser =
|
||||
UserGroupInformation.getCurrentUser();
|
||||
AclStatus aclStatus = fsView.getAclStatus(new Path("/internalDir"));
|
||||
assertEquals(aclStatus.getOwner(), currentUser.getUserName());
|
||||
assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
|
||||
assertEquals(aclStatus.getEntries(),
|
||||
AclUtil.getMinimalAcl(PERMISSION_555));
|
||||
assertFalse(aclStatus.isStickyBit());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,10 +22,14 @@ import static org.apache.hadoop.fs.FileContextTestHelper.checkFileStatus;
|
|||
import static org.apache.hadoop.fs.FileContextTestHelper.exists;
|
||||
import static org.apache.hadoop.fs.FileContextTestHelper.isDir;
|
||||
import static org.apache.hadoop.fs.FileContextTestHelper.isFile;
|
||||
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -39,8 +43,12 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.AclUtil;
|
||||
import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
@ -695,4 +703,48 @@ public class ViewFsBaseTest {
|
|||
public void testInternalSetOwner() throws IOException {
|
||||
fcView.setOwner(new Path("/internalDir"), "foo", "bar");
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify the behavior of ACL operations on paths above the root of
|
||||
* any mount table entry.
|
||||
*/
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalModifyAclEntries() throws IOException {
|
||||
fcView.modifyAclEntries(new Path("/internalDir"),
|
||||
new ArrayList<AclEntry>());
|
||||
}
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalRemoveAclEntries() throws IOException {
|
||||
fcView.removeAclEntries(new Path("/internalDir"),
|
||||
new ArrayList<AclEntry>());
|
||||
}
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalRemoveDefaultAcl() throws IOException {
|
||||
fcView.removeDefaultAcl(new Path("/internalDir"));
|
||||
}
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalRemoveAcl() throws IOException {
|
||||
fcView.removeAcl(new Path("/internalDir"));
|
||||
}
|
||||
|
||||
@Test(expected=AccessControlException.class)
|
||||
public void testInternalSetAcl() throws IOException {
|
||||
fcView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInternalGetAclStatus() throws IOException {
|
||||
final UserGroupInformation currentUser =
|
||||
UserGroupInformation.getCurrentUser();
|
||||
AclStatus aclStatus = fcView.getAclStatus(new Path("/internalDir"));
|
||||
assertEquals(aclStatus.getOwner(), currentUser.getUserName());
|
||||
assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
|
||||
assertEquals(aclStatus.getEntries(),
|
||||
AclUtil.getMinimalAcl(PERMISSION_555));
|
||||
assertFalse(aclStatus.isStickyBit());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -496,6 +496,8 @@ public class TestRPC {
|
|||
caught = true;
|
||||
}
|
||||
assertTrue(caught);
|
||||
rb = getMetrics(server.rpcDetailedMetrics.name());
|
||||
assertCounter("IOExceptionNumOps", 1L, rb);
|
||||
|
||||
proxy.testServerGet();
|
||||
|
||||
|
|
|
@ -60,12 +60,12 @@ public class TestGangliaMetrics {
|
|||
@Test
|
||||
public void testTagsForPrefix() throws Exception {
|
||||
ConfigBuilder cb = new ConfigBuilder()
|
||||
.add("Test.sink.ganglia.tagsForPrefix.all", "*")
|
||||
.add("Test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
|
||||
.add("test.sink.ganglia.tagsForPrefix.all", "*")
|
||||
.add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
|
||||
"NumActiveSources")
|
||||
.add("Test.sink.ganglia.tagsForPrefix.none", "");
|
||||
.add("test.sink.ganglia.tagsForPrefix.none", "");
|
||||
GangliaSink30 sink = new GangliaSink30();
|
||||
sink.init(cb.subset("Test.sink.ganglia"));
|
||||
sink.init(cb.subset("test.sink.ganglia"));
|
||||
|
||||
List<MetricsTag> tags = new ArrayList<MetricsTag>();
|
||||
tags.add(new MetricsTag(MsInfo.Context, "all"));
|
||||
|
@ -98,8 +98,8 @@ public class TestGangliaMetrics {
|
|||
|
||||
@Test public void testGangliaMetrics2() throws Exception {
|
||||
ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
|
||||
.add("Test.sink.gsink30.context", "test") // filter out only "test"
|
||||
.add("Test.sink.gsink31.context", "test") // filter out only "test"
|
||||
.add("test.sink.gsink30.context", "test") // filter out only "test"
|
||||
.add("test.sink.gsink31.context", "test") // filter out only "test"
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
|
||||
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
|
|
|
@ -88,11 +88,11 @@ public class TestMetricsSystemImpl {
|
|||
DefaultMetricsSystem.shutdown();
|
||||
new ConfigBuilder().add("*.period", 8)
|
||||
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
|
||||
.add("Test.sink.test.class", TestSink.class.getName())
|
||||
.add("Test.*.source.filter.exclude", "s0")
|
||||
.add("Test.source.s1.metric.filter.exclude", "X*")
|
||||
.add("Test.sink.sink1.metric.filter.exclude", "Y*")
|
||||
.add("Test.sink.sink2.metric.filter.exclude", "Y*")
|
||||
.add("test.sink.test.class", TestSink.class.getName())
|
||||
.add("test.*.source.filter.exclude", "s0")
|
||||
.add("test.source.s1.metric.filter.exclude", "X*")
|
||||
.add("test.sink.sink1.metric.filter.exclude", "Y*")
|
||||
.add("test.sink.sink2.metric.filter.exclude", "Y*")
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
ms.start();
|
||||
|
@ -130,11 +130,11 @@ public class TestMetricsSystemImpl {
|
|||
DefaultMetricsSystem.shutdown();
|
||||
new ConfigBuilder().add("*.period", 8)
|
||||
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
|
||||
.add("Test.sink.test.class", TestSink.class.getName())
|
||||
.add("Test.*.source.filter.exclude", "s0")
|
||||
.add("Test.source.s1.metric.filter.exclude", "X*")
|
||||
.add("Test.sink.sink1.metric.filter.exclude", "Y*")
|
||||
.add("Test.sink.sink2.metric.filter.exclude", "Y*")
|
||||
.add("test.sink.test.class", TestSink.class.getName())
|
||||
.add("test.*.source.filter.exclude", "s0")
|
||||
.add("test.source.s1.metric.filter.exclude", "X*")
|
||||
.add("test.sink.sink1.metric.filter.exclude", "Y*")
|
||||
.add("test.sink.sink2.metric.filter.exclude", "Y*")
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
ms.start();
|
||||
|
@ -169,13 +169,14 @@ public class TestMetricsSystemImpl {
|
|||
@Test public void testMultiThreadedPublish() throws Exception {
|
||||
final int numThreads = 10;
|
||||
new ConfigBuilder().add("*.period", 80)
|
||||
.add("Test.sink.Collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
|
||||
.add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
|
||||
numThreads)
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
ms.start();
|
||||
|
||||
final CollectingSink sink = new CollectingSink(numThreads);
|
||||
ms.registerSink("Collector",
|
||||
ms.registerSink("collector",
|
||||
"Collector of values from all threads.", sink);
|
||||
final TestSource[] sources = new TestSource[numThreads];
|
||||
final Thread[] threads = new Thread[numThreads];
|
||||
|
@ -280,10 +281,10 @@ public class TestMetricsSystemImpl {
|
|||
|
||||
@Test public void testHangingSink() {
|
||||
new ConfigBuilder().add("*.period", 8)
|
||||
.add("Test.sink.test.class", TestSink.class.getName())
|
||||
.add("Test.sink.hanging.retry.delay", "1")
|
||||
.add("Test.sink.hanging.retry.backoff", "1.01")
|
||||
.add("Test.sink.hanging.retry.count", "0")
|
||||
.add("test.sink.test.class", TestSink.class.getName())
|
||||
.add("test.sink.hanging.retry.delay", "1")
|
||||
.add("test.sink.hanging.retry.backoff", "1.01")
|
||||
.add("test.sink.hanging.retry.count", "0")
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
ms.start();
|
||||
|
@ -379,6 +380,23 @@ public class TestMetricsSystemImpl {
|
|||
ms.shutdown();
|
||||
}
|
||||
|
||||
@Test public void testUnregisterSource() {
|
||||
MetricsSystem ms = new MetricsSystemImpl();
|
||||
TestSource ts1 = new TestSource("ts1");
|
||||
TestSource ts2 = new TestSource("ts2");
|
||||
ms.register("ts1", "", ts1);
|
||||
ms.register("ts2", "", ts2);
|
||||
MetricsSource s1 = ms.getSource("ts1");
|
||||
assertNotNull(s1);
|
||||
// should work when metrics system is not started
|
||||
ms.unregisterSource("ts1");
|
||||
s1 = ms.getSource("ts1");
|
||||
assertNull(s1);
|
||||
MetricsSource s2 = ms.getSource("ts2");
|
||||
assertNotNull(s2);
|
||||
ms.shutdown();
|
||||
}
|
||||
|
||||
private void checkMetricsRecords(List<MetricsRecord> recs) {
|
||||
LOG.debug(recs);
|
||||
MetricsRecord r = recs.get(0);
|
||||
|
|
|
@ -127,6 +127,22 @@ public class TestCredShell {
|
|||
"CredentialProviders configured."));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPromptForCredentialWithEmptyPasswd() throws Exception {
|
||||
String[] args1 = {"create", "credential1", "--provider",
|
||||
"jceks://file" + tmpDir + "/credstore.jceks"};
|
||||
ArrayList<String> passwords = new ArrayList<String>();
|
||||
passwords.add(null);
|
||||
passwords.add("p@ssw0rd");
|
||||
int rc = 0;
|
||||
CredentialShell shell = new CredentialShell();
|
||||
shell.setConf(new Configuration());
|
||||
shell.setPasswordReader(new MockPasswordReader(passwords));
|
||||
rc = shell.run(args1);
|
||||
assertEquals(outContent.toString(), -1, rc);
|
||||
assertTrue(outContent.toString().contains("Passwords don't match"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPromptForCredential() throws Exception {
|
||||
String[] args1 = {"create", "credential1", "--provider",
|
||||
|
@ -162,7 +178,7 @@ public class TestCredShell {
|
|||
public char[] readPassword(String prompt) {
|
||||
if (passwords.size() == 0) return null;
|
||||
String pass = passwords.remove(0);
|
||||
return pass.toCharArray();
|
||||
return pass == null ? null : pass.toCharArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,8 +28,6 @@ import java.util.Map;
|
|||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
/**
|
||||
* Provides access to the <code>AccessControlList</code>s used by KMS,
|
||||
|
@ -52,13 +50,11 @@ public class KMSACLs implements Runnable {
|
|||
|
||||
public static final int RELOADER_SLEEP_MILLIS = 1000;
|
||||
|
||||
Map<Type, AccessControlList> acls;
|
||||
private ReadWriteLock lock;
|
||||
private volatile Map<Type, AccessControlList> acls;
|
||||
private ScheduledExecutorService executorService;
|
||||
private long lastReload;
|
||||
|
||||
KMSACLs(Configuration conf) {
|
||||
lock = new ReentrantReadWriteLock();
|
||||
if (conf == null) {
|
||||
conf = loadACLs();
|
||||
}
|
||||
|
@ -70,17 +66,13 @@ public class KMSACLs implements Runnable {
|
|||
}
|
||||
|
||||
private void setACLs(Configuration conf) {
|
||||
lock.writeLock().lock();
|
||||
try {
|
||||
acls = new HashMap<Type, AccessControlList>();
|
||||
for (Type aclType : Type.values()) {
|
||||
String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
|
||||
acls.put(aclType, new AccessControlList(aclStr));
|
||||
LOG.info("'{}' ACL '{}'", aclType, aclStr);
|
||||
}
|
||||
} finally {
|
||||
lock.writeLock().unlock();
|
||||
Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
|
||||
for (Type aclType : Type.values()) {
|
||||
String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
|
||||
tempAcls.put(aclType, new AccessControlList(aclStr));
|
||||
LOG.info("'{}' ACL '{}'", aclType, aclStr);
|
||||
}
|
||||
acls = tempAcls;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -120,14 +112,7 @@ public class KMSACLs implements Runnable {
|
|||
|
||||
public boolean hasAccess(Type type, String user) {
|
||||
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
|
||||
AccessControlList acl = null;
|
||||
lock.readLock().lock();
|
||||
try {
|
||||
acl = acls.get(type);
|
||||
} finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
return acl.isUserAllowed(ugi);
|
||||
return acls.get(type).isUserAllowed(ugi);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,12 +19,16 @@ package org.apache.hadoop.mount;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.oncrpc.RpcProgram;
|
||||
import org.apache.hadoop.oncrpc.SimpleTcpServer;
|
||||
import org.apache.hadoop.oncrpc.SimpleUdpServer;
|
||||
import org.apache.hadoop.portmap.PortmapMapping;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||
|
||||
/**
|
||||
* Main class for starting mountd daemon. This daemon implements the NFS
|
||||
* mount protocol. When receiving a MOUNT request from an NFS client, it checks
|
||||
|
@ -33,6 +37,7 @@ import org.apache.hadoop.util.ShutdownHookManager;
|
|||
* handle for requested directory and returns it to the client.
|
||||
*/
|
||||
abstract public class MountdBase {
|
||||
public static final Log LOG = LogFactory.getLog(MountdBase.class);
|
||||
private final RpcProgram rpcProgram;
|
||||
private int udpBoundPort; // Will set after server starts
|
||||
private int tcpBoundPort; // Will set after server starts
|
||||
|
@ -74,8 +79,13 @@ abstract public class MountdBase {
|
|||
if (register) {
|
||||
ShutdownHookManager.get().addShutdownHook(new Unregister(),
|
||||
SHUTDOWN_HOOK_PRIORITY);
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
|
||||
try {
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
|
||||
} catch (Throwable e) {
|
||||
LOG.fatal("Failed to start the server. Cause:", e);
|
||||
terminate(1, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -72,6 +72,15 @@ public class NfsExports {
|
|||
private static final Pattern CIDR_FORMAT_LONG =
|
||||
Pattern.compile(SLASH_FORMAT_LONG);
|
||||
|
||||
// Hostnames are composed of series of 'labels' concatenated with dots.
|
||||
// Labels can be between 1-63 characters long, and can only take
|
||||
// letters, digits & hyphens. They cannot start and end with hyphens. For
|
||||
// more details, refer RFC-1123 & http://en.wikipedia.org/wiki/Hostname
|
||||
private static final String LABEL_FORMAT =
|
||||
"[a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?";
|
||||
private static final Pattern HOSTNAME_FORMAT =
|
||||
Pattern.compile("^(" + LABEL_FORMAT + "\\.)*" + LABEL_FORMAT + "$");
|
||||
|
||||
static class AccessCacheEntry implements LightWeightCache.Entry{
|
||||
private final String hostAddr;
|
||||
private AccessPrivilege access;
|
||||
|
@ -381,10 +390,14 @@ public class NfsExports {
|
|||
LOG.debug("Using Regex match for '" + host + "' and " + privilege);
|
||||
}
|
||||
return new RegexMatch(privilege, host);
|
||||
} else if (HOSTNAME_FORMAT.matcher(host).matches()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Using exact match for '" + host + "' and " + privilege);
|
||||
}
|
||||
return new ExactMatch(privilege, host);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Invalid hostname provided '" + host
|
||||
+ "'");
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Using exact match for '" + host + "' and " + privilege);
|
||||
}
|
||||
return new ExactMatch(privilege, host);
|
||||
}
|
||||
}
|
|
@ -25,6 +25,8 @@ import org.apache.hadoop.oncrpc.SimpleTcpServer;
|
|||
import org.apache.hadoop.portmap.PortmapMapping;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||
|
||||
/**
|
||||
* Nfs server. Supports NFS v3 using {@link RpcProgram}.
|
||||
* Currently Mountd program is also started inside this class.
|
||||
|
@ -50,7 +52,12 @@ public abstract class Nfs3Base {
|
|||
if (register) {
|
||||
ShutdownHookManager.get().addShutdownHook(new Unregister(),
|
||||
SHUTDOWN_HOOK_PRIORITY);
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
|
||||
try {
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
|
||||
} catch (Throwable e) {
|
||||
LOG.fatal("Failed to start the server. Cause:", e);
|
||||
terminate(1, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
|
|||
} catch (IOException e) {
|
||||
String request = set ? "Registration" : "Unregistration";
|
||||
LOG.error(request + " failure with " + host + ":" + port
|
||||
+ ", portmap entry: " + mapEntry, e);
|
||||
+ ", portmap entry: " + mapEntry);
|
||||
throw new RuntimeException(request + " failure", e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ public class SimpleUdpClient {
|
|||
DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
|
||||
IPAddress, port);
|
||||
socket.send(sendPacket);
|
||||
socket.setSoTimeout(500);
|
||||
DatagramPacket receivePacket = new DatagramPacket(receiveData,
|
||||
receiveData.length);
|
||||
socket.receive(receivePacket);
|
||||
|
|
|
@ -194,4 +194,16 @@ public class TestNfsExports {
|
|||
} while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
|
||||
Assert.assertEquals(AccessPrivilege.NONE, ap);
|
||||
}
|
||||
|
||||
@Test(expected=IllegalArgumentException.class)
|
||||
public void testInvalidHost() {
|
||||
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
|
||||
"foo#bar");
|
||||
}
|
||||
|
||||
@Test(expected=IllegalArgumentException.class)
|
||||
public void testInvalidSeparator() {
|
||||
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
|
||||
"foo ro : bar rw");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -154,6 +154,8 @@ public class Nfs3Utils {
|
|||
if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
|
||||
if (type == NfsFileType.NFSREG.toValue()) {
|
||||
rtn |= Nfs3Constant.ACCESS3_EXECUTE;
|
||||
} else {
|
||||
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
|
||||
}
|
||||
}
|
||||
return rtn;
|
||||
|
|
|
@ -68,5 +68,12 @@ public class TestNfs3Utils {
|
|||
0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr));
|
||||
assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",
|
||||
0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr));
|
||||
|
||||
Mockito.when(attr.getUid()).thenReturn(2);
|
||||
Mockito.when(attr.getGid()).thenReturn(10);
|
||||
Mockito.when(attr.getMode()).thenReturn(457); // 711
|
||||
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
|
||||
assertEquals("Access should be allowed for dir as mode is 711 and GID matches",
|
||||
2 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 11}, attr));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ Trunk (Unreleased)
|
|||
HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs.
|
||||
(wheat9)
|
||||
|
||||
HDFS-2538. option to disable fsck dots (Mohammad Kamrul Islam via aw)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
|
||||
|
@ -298,8 +300,13 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
|
||||
(cnauroth)
|
||||
|
||||
HDFS-5624. Add HDFS tests for ACLs in combination with viewfs.
|
||||
(Stephen Chu via cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
|
||||
|
@ -314,6 +321,25 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-8158. Interrupting hadoop fs -put from the command line
|
||||
causes a LeaseExpiredException. (daryn via harsh)
|
||||
|
||||
HDFS-6678. MiniDFSCluster may still be partially running after initialization
|
||||
fails. (cnauroth)
|
||||
|
||||
HDFS-5809. BlockPoolSliceScanner and high speed hdfs appending make
|
||||
datanode to drop into infinite loop (cmccabe)
|
||||
|
||||
HDFS-6456. NFS should throw error for invalid entry in
|
||||
dfs.nfs.exports.allowed.hosts (Abhiraj Butala via brandonli)
|
||||
|
||||
HDFS-6689. NFS doesn't return correct lookup access for direcories (brandonli)
|
||||
|
||||
HDFS-6478. RemoteException can't be retried properly for non-HA scenario.
|
||||
(Ming Ma via jing9)
|
||||
|
||||
HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb )
|
||||
|
||||
HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails
|
||||
with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -836,6 +862,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted
|
||||
file present in snapshot (kihwal)
|
||||
|
||||
HDFS-6378. NFS registration should timeout instead of hanging when
|
||||
portmap/rpcbind is not available (Abhiraj Butala via brandonli)
|
||||
|
||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||
|
|
|
@ -26,7 +26,6 @@ import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX
|
|||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
@ -38,14 +37,13 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -259,12 +257,11 @@ public class HAUtil {
|
|||
/**
|
||||
* Parse the file system URI out of the provided token.
|
||||
*/
|
||||
public static URI getServiceUriFromToken(final String scheme,
|
||||
Token<?> token) {
|
||||
public static URI getServiceUriFromToken(final String scheme, Token<?> token) {
|
||||
String tokStr = token.getService().toString();
|
||||
|
||||
if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) {
|
||||
tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, "");
|
||||
final String prefix = buildTokenServicePrefixForLogicalUri(scheme);
|
||||
if (tokStr.startsWith(prefix)) {
|
||||
tokStr = tokStr.replaceFirst(prefix, "");
|
||||
}
|
||||
return URI.create(scheme + "://" + tokStr);
|
||||
}
|
||||
|
@ -273,10 +270,13 @@ public class HAUtil {
|
|||
* Get the service name used in the delegation token for the given logical
|
||||
* HA service.
|
||||
* @param uri the logical URI of the cluster
|
||||
* @param scheme the scheme of the corresponding FileSystem
|
||||
* @return the service name
|
||||
*/
|
||||
public static Text buildTokenServiceForLogicalUri(URI uri) {
|
||||
return new Text(HA_DT_SERVICE_PREFIX + uri.getHost());
|
||||
public static Text buildTokenServiceForLogicalUri(final URI uri,
|
||||
final String scheme) {
|
||||
return new Text(buildTokenServicePrefixForLogicalUri(scheme)
|
||||
+ uri.getHost());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -287,6 +287,10 @@ public class HAUtil {
|
|||
return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
|
||||
}
|
||||
|
||||
public static String buildTokenServicePrefixForLogicalUri(String scheme) {
|
||||
return HA_DT_SERVICE_PREFIX + scheme + ":";
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate a delegation token associated with the given HA cluster URI, and if
|
||||
* one is found, clone it to also represent the underlying namenode address.
|
||||
|
@ -298,7 +302,9 @@ public class HAUtil {
|
|||
public static void cloneDelegationTokenForLogicalUri(
|
||||
UserGroupInformation ugi, URI haUri,
|
||||
Collection<InetSocketAddress> nnAddrs) {
|
||||
Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri);
|
||||
// this cloning logic is only used by hdfs
|
||||
Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME);
|
||||
Token<DelegationTokenIdentifier> haToken =
|
||||
tokenSelector.selectToken(haService, ugi.getTokens());
|
||||
if (haToken != null) {
|
||||
|
@ -309,8 +315,9 @@ public class HAUtil {
|
|||
Token<DelegationTokenIdentifier> specificToken =
|
||||
new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
|
||||
SecurityUtil.setTokenService(specificToken, singleNNAddr);
|
||||
Text alias =
|
||||
new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService());
|
||||
Text alias = new Text(
|
||||
buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
|
||||
+ "//" + specificToken.getService());
|
||||
ugi.addToken(alias, specificToken);
|
||||
LOG.debug("Mapped HA service delegation token for logical URI " +
|
||||
haUri + " to namenode " + singleNNAddr);
|
||||
|
|
|
@ -163,7 +163,8 @@ public class NameNodeProxies {
|
|||
|
||||
Text dtService;
|
||||
if (failoverProxyProvider.useLogicalURI()) {
|
||||
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
|
||||
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME);
|
||||
} else {
|
||||
dtService = SecurityUtil.buildTokenService(
|
||||
NameNode.getAddress(nameNodeUri));
|
||||
|
@ -224,7 +225,8 @@ public class NameNodeProxies {
|
|||
new Class[] { xface }, dummyHandler);
|
||||
Text dtService;
|
||||
if (failoverProxyProvider.useLogicalURI()) {
|
||||
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
|
||||
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME);
|
||||
} else {
|
||||
dtService = SecurityUtil.buildTokenService(
|
||||
NameNode.getAddress(nameNodeUri));
|
||||
|
@ -333,19 +335,18 @@ public class NameNodeProxies {
|
|||
address, conf, ugi, NamenodeProtocolPB.class, 0);
|
||||
if (withRetries) { // create the proxy with retries
|
||||
RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
|
||||
TimeUnit.MILLISECONDS);
|
||||
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
|
||||
= new HashMap<Class<? extends Exception>, RetryPolicy>();
|
||||
RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy,
|
||||
exceptionToPolicyMap);
|
||||
TimeUnit.MILLISECONDS);
|
||||
Map<String, RetryPolicy> methodNameToPolicyMap
|
||||
= new HashMap<String, RetryPolicy>();
|
||||
methodNameToPolicyMap.put("getBlocks", methodPolicy);
|
||||
methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
|
||||
proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
|
||||
proxy, methodNameToPolicyMap);
|
||||
= new HashMap<String, RetryPolicy>();
|
||||
methodNameToPolicyMap.put("getBlocks", timeoutPolicy);
|
||||
methodNameToPolicyMap.put("getAccessKeys", timeoutPolicy);
|
||||
NamenodeProtocol translatorProxy =
|
||||
new NamenodeProtocolTranslatorPB(proxy);
|
||||
return (NamenodeProtocol) RetryProxy.create(
|
||||
NamenodeProtocol.class, translatorProxy, methodNameToPolicyMap);
|
||||
} else {
|
||||
return new NamenodeProtocolTranslatorPB(proxy);
|
||||
}
|
||||
return new NamenodeProtocolTranslatorPB(proxy);
|
||||
}
|
||||
|
||||
private static ClientProtocol createNNProxyWithClientProtocol(
|
||||
|
@ -380,26 +381,24 @@ public class NameNodeProxies {
|
|||
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
|
||||
createPolicy);
|
||||
|
||||
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
|
||||
= new HashMap<Class<? extends Exception>, RetryPolicy>();
|
||||
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
|
||||
.retryByRemoteException(defaultPolicy,
|
||||
remoteExceptionToPolicyMap));
|
||||
RetryPolicy methodPolicy = RetryPolicies.retryByException(
|
||||
defaultPolicy, exceptionToPolicyMap);
|
||||
RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
|
||||
defaultPolicy, remoteExceptionToPolicyMap);
|
||||
Map<String, RetryPolicy> methodNameToPolicyMap
|
||||
= new HashMap<String, RetryPolicy>();
|
||||
|
||||
methodNameToPolicyMap.put("create", methodPolicy);
|
||||
|
||||
proxy = (ClientNamenodeProtocolPB) RetryProxy.create(
|
||||
ClientNamenodeProtocolPB.class,
|
||||
new DefaultFailoverProxyProvider<ClientNamenodeProtocolPB>(
|
||||
ClientNamenodeProtocolPB.class, proxy),
|
||||
ClientProtocol translatorProxy =
|
||||
new ClientNamenodeProtocolTranslatorPB(proxy);
|
||||
return (ClientProtocol) RetryProxy.create(
|
||||
ClientProtocol.class,
|
||||
new DefaultFailoverProxyProvider<ClientProtocol>(
|
||||
ClientProtocol.class, translatorProxy),
|
||||
methodNameToPolicyMap,
|
||||
defaultPolicy);
|
||||
} else {
|
||||
return new ClientNamenodeProtocolTranslatorPB(proxy);
|
||||
}
|
||||
return new ClientNamenodeProtocolTranslatorPB(proxy);
|
||||
}
|
||||
|
||||
private static Object createNameNodeProxy(InetSocketAddress address,
|
||||
|
|
|
@ -124,7 +124,7 @@ public class HdfsConstants {
|
|||
* of a delgation token, indicating that the URI is a logical (HA)
|
||||
* URI.
|
||||
*/
|
||||
public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:";
|
||||
public static final String HA_DT_SERVICE_PREFIX = "ha-";
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
@ -97,7 +97,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
|
|||
RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||
rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
|
||||
rpcProxy = createNamenode(nameNodeAddr, conf, ugi);
|
||||
}
|
||||
|
||||
private static DatanodeProtocolPB createNamenode(
|
||||
|
@ -109,33 +109,6 @@ public class DatanodeProtocolClientSideTranslatorPB implements
|
|||
org.apache.hadoop.ipc.Client.getPingInterval(conf), null).getProxy();
|
||||
}
|
||||
|
||||
/** Create a {@link NameNode} proxy */
|
||||
static DatanodeProtocolPB createNamenodeWithRetry(
|
||||
DatanodeProtocolPB rpcNamenode) {
|
||||
RetryPolicy createPolicy = RetryPolicies
|
||||
.retryUpToMaximumCountWithFixedSleep(5,
|
||||
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
|
||||
|
||||
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap =
|
||||
new HashMap<Class<? extends Exception>, RetryPolicy>();
|
||||
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
|
||||
createPolicy);
|
||||
|
||||
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
|
||||
new HashMap<Class<? extends Exception>, RetryPolicy>();
|
||||
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
|
||||
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
|
||||
remoteExceptionToPolicyMap));
|
||||
RetryPolicy methodPolicy = RetryPolicies.retryByException(
|
||||
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
|
||||
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
|
||||
|
||||
methodNameToPolicyMap.put("create", methodPolicy);
|
||||
|
||||
return (DatanodeProtocolPB) RetryProxy.create(DatanodeProtocolPB.class,
|
||||
rpcNamenode, methodNameToPolicyMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
RPC.stopProxy(rpcProxy);
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
|
||||
|
@ -61,7 +62,7 @@ import com.google.protobuf.ServiceException;
|
|||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Stable
|
||||
public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
|
||||
ProtocolMetaInterface, Closeable {
|
||||
ProtocolMetaInterface, Closeable, ProtocolTranslator {
|
||||
/** RpcController is not used and hence is set to null */
|
||||
private final static RpcController NULL_CONTROLLER = null;
|
||||
|
||||
|
@ -88,6 +89,11 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
|
|||
RPC.stopProxy(rpcProxy);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getUnderlyingProxyObject() {
|
||||
return rpcProxy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
|
||||
throws IOException {
|
||||
|
|
|
@ -310,17 +310,10 @@ class BlockPoolSliceScanner {
|
|||
}
|
||||
}
|
||||
|
||||
private synchronized void updateScanStatus(Block block,
|
||||
private synchronized void updateScanStatus(BlockScanInfo info,
|
||||
ScanType type,
|
||||
boolean scanOk) {
|
||||
BlockScanInfo info = blockMap.get(block);
|
||||
|
||||
if ( info != null ) {
|
||||
delBlockInfo(info);
|
||||
} else {
|
||||
// It might already be removed. Thats ok, it will be caught next time.
|
||||
info = new BlockScanInfo(block);
|
||||
}
|
||||
delBlockInfo(info);
|
||||
|
||||
long now = Time.monotonicNow();
|
||||
info.lastScanType = type;
|
||||
|
@ -334,8 +327,8 @@ class BlockPoolSliceScanner {
|
|||
}
|
||||
|
||||
if (verificationLog != null) {
|
||||
verificationLog.append(now, block.getGenerationStamp(),
|
||||
block.getBlockId());
|
||||
verificationLog.append(now, info.getGenerationStamp(),
|
||||
info.getBlockId());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -434,11 +427,13 @@ class BlockPoolSliceScanner {
|
|||
totalTransientErrors++;
|
||||
}
|
||||
|
||||
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, true);
|
||||
updateScanStatus((BlockScanInfo)block.getLocalBlock(),
|
||||
ScanType.VERIFICATION_SCAN, true);
|
||||
|
||||
return;
|
||||
} catch (IOException e) {
|
||||
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false);
|
||||
updateScanStatus((BlockScanInfo)block.getLocalBlock(),
|
||||
ScanType.VERIFICATION_SCAN, false);
|
||||
|
||||
// If the block does not exists anymore, then its not an error
|
||||
if (!dataset.contains(block)) {
|
||||
|
@ -497,7 +492,7 @@ class BlockPoolSliceScanner {
|
|||
|
||||
// Picks one block and verifies it
|
||||
private void verifyFirstBlock() {
|
||||
Block block = null;
|
||||
BlockScanInfo block = null;
|
||||
synchronized (this) {
|
||||
if (!blockInfoSet.isEmpty()) {
|
||||
block = blockInfoSet.first();
|
||||
|
|
|
@ -128,7 +128,8 @@ public class DatanodeWebHdfsMethods {
|
|||
"://" + nnId);
|
||||
boolean isLogical = HAUtil.isLogicalUri(conf, nnUri);
|
||||
if (isLogical) {
|
||||
token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri));
|
||||
token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME));
|
||||
} else {
|
||||
token.setService(SecurityUtil.buildTokenService(nnUri));
|
||||
}
|
||||
|
|
|
@ -126,6 +126,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
private boolean showBlocks = false;
|
||||
private boolean showLocations = false;
|
||||
private boolean showRacks = false;
|
||||
private boolean showprogress = false;
|
||||
private boolean showCorruptFileBlocks = false;
|
||||
|
||||
/**
|
||||
|
@ -203,6 +204,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
else if (key.equals("blocks")) { this.showBlocks = true; }
|
||||
else if (key.equals("locations")) { this.showLocations = true; }
|
||||
else if (key.equals("racks")) { this.showRacks = true; }
|
||||
else if (key.equals("showprogress")) { this.showprogress = true; }
|
||||
else if (key.equals("openforwrite")) {this.showOpenFiles = true; }
|
||||
else if (key.equals("listcorruptfileblocks")) {
|
||||
this.showCorruptFileBlocks = true;
|
||||
|
@ -381,10 +383,13 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
} else if (showFiles) {
|
||||
out.print(path + " " + fileLen + " bytes, " +
|
||||
blocks.locatedBlockCount() + " block(s): ");
|
||||
} else {
|
||||
} else if (showprogress) {
|
||||
out.print('.');
|
||||
}
|
||||
if (res.totalFiles % 100 == 0) { out.println(); out.flush(); }
|
||||
if ((showprogress) && res.totalFiles % 100 == 0) {
|
||||
out.println();
|
||||
out.flush();
|
||||
}
|
||||
int missing = 0;
|
||||
int corrupt = 0;
|
||||
long missize = 0;
|
||||
|
|
|
@ -19,13 +19,14 @@
|
|||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* XAttrStorage is used to read and set xattrs for an inode.
|
||||
|
@ -33,10 +34,15 @@ import com.google.common.collect.ImmutableList;
|
|||
@InterfaceAudience.Private
|
||||
public class XAttrStorage {
|
||||
|
||||
private static final Map<String, String> internedNames = Maps.newHashMap();
|
||||
|
||||
/**
|
||||
* Reads the existing extended attributes of an inode. If the
|
||||
* inode does not have an <code>XAttr</code>, then this method
|
||||
* returns an empty list.
|
||||
* <p/>
|
||||
* Must be called while holding the FSDirectory read lock.
|
||||
*
|
||||
* @param inode INode to read
|
||||
* @param snapshotId
|
||||
* @return List<XAttr> <code>XAttr</code> list.
|
||||
|
@ -48,6 +54,9 @@ public class XAttrStorage {
|
|||
|
||||
/**
|
||||
* Reads the existing extended attributes of an inode.
|
||||
* <p/>
|
||||
* Must be called while holding the FSDirectory read lock.
|
||||
*
|
||||
* @param inode INode to read.
|
||||
* @return List<XAttr> <code>XAttr</code> list.
|
||||
*/
|
||||
|
@ -58,6 +67,9 @@ public class XAttrStorage {
|
|||
|
||||
/**
|
||||
* Update xattrs of inode.
|
||||
* <p/>
|
||||
* Must be called while holding the FSDirectory write lock.
|
||||
*
|
||||
* @param inode INode to update
|
||||
* @param xAttrs to update xAttrs.
|
||||
* @param snapshotId id of the latest snapshot of the inode
|
||||
|
@ -70,8 +82,24 @@ public class XAttrStorage {
|
|||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(xAttrs);
|
||||
// Dedupe the xAttr name and save them into a new interned list
|
||||
List<XAttr> internedXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
|
||||
for (XAttr xAttr : xAttrs) {
|
||||
final String name = xAttr.getName();
|
||||
String internedName = internedNames.get(name);
|
||||
if (internedName == null) {
|
||||
internedName = name;
|
||||
internedNames.put(internedName, internedName);
|
||||
}
|
||||
XAttr internedXAttr = new XAttr.Builder()
|
||||
.setName(internedName)
|
||||
.setNameSpace(xAttr.getNameSpace())
|
||||
.setValue(xAttr.getValue())
|
||||
.build();
|
||||
internedXAttrs.add(internedXAttr);
|
||||
}
|
||||
// Save the list of interned xattrs
|
||||
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(internedXAttrs);
|
||||
if (inode.getXAttrFeature() != null) {
|
||||
inode.removeXAttrFeature(snapshotId);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public class DFSck extends Configured implements Tool {
|
|||
private static final String USAGE = "Usage: DFSck <path> "
|
||||
+ "[-list-corruptfileblocks | "
|
||||
+ "[-move | -delete | -openforwrite] "
|
||||
+ "[-files [-blocks [-locations | -racks]]]]\n"
|
||||
+ "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
|
||||
+ "\t<path>\tstart checking from this path\n"
|
||||
+ "\t-move\tmove corrupted files to /lost+found\n"
|
||||
+ "\t-delete\tdelete corrupted files\n"
|
||||
|
@ -90,7 +90,8 @@ public class DFSck extends Configured implements Tool {
|
|||
+ "blocks and files they belong to\n"
|
||||
+ "\t-blocks\tprint out block report\n"
|
||||
+ "\t-locations\tprint out locations for every block\n"
|
||||
+ "\t-racks\tprint out network topology for data-node locations\n\n"
|
||||
+ "\t-racks\tprint out network topology for data-node locations\n"
|
||||
+ "\t-showprogress\tshow progress in output. Default is OFF (no progress)\n\n"
|
||||
+ "Please Note:\n"
|
||||
+ "\t1. By default fsck ignores files opened for write, "
|
||||
+ "use -openforwrite to report such files. They are usually "
|
||||
|
@ -270,6 +271,7 @@ public class DFSck extends Configured implements Tool {
|
|||
else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
|
||||
else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
|
||||
else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
|
||||
else if (args[idx].equals("-showprogress")) { url.append("&showprogress=1"); }
|
||||
else if (args[idx].equals("-list-corruptfileblocks")) {
|
||||
url.append("&listcorruptfileblocks=1");
|
||||
doListCorruptFileBlocks = true;
|
||||
|
|
|
@ -158,7 +158,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
// getCanonicalUri() in order to handle the case where no port is
|
||||
// specified in the URI
|
||||
this.tokenServiceName = isLogicalUri ?
|
||||
HAUtil.buildTokenServiceForLogicalUri(uri)
|
||||
HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
|
||||
: SecurityUtil.buildTokenService(getCanonicalUri());
|
||||
|
||||
if (!isHA) {
|
||||
|
|
|
@ -0,0 +1,190 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.viewfs;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
|
||||
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
|
||||
import static org.apache.hadoop.fs.permission.AclEntryType.*;
|
||||
import static org.apache.hadoop.fs.permission.FsAction.*;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Verify ACL through ViewFileSystem functionality.
|
||||
*/
|
||||
public class TestViewFileSystemWithAcls {
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Configuration clusterConf = new Configuration();
|
||||
private static FileSystem fHdfs;
|
||||
private static FileSystem fHdfs2;
|
||||
private FileSystem fsView;
|
||||
private Configuration fsViewConf;
|
||||
private FileSystem fsTarget, fsTarget2;
|
||||
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
|
||||
private FileSystemTestHelper fileSystemTestHelper =
|
||||
new FileSystemTestHelper("/tmp/TestViewFileSystemWithAcls");
|
||||
|
||||
@BeforeClass
|
||||
public static void clusterSetupAtBeginning() throws IOException {
|
||||
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(clusterConf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
|
||||
.numDataNodes(2)
|
||||
.build();
|
||||
cluster.waitClusterUp();
|
||||
|
||||
fHdfs = cluster.getFileSystem(0);
|
||||
fHdfs2 = cluster.getFileSystem(1);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void ClusterShutdownAtEnd() throws Exception {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
fsTarget = fHdfs;
|
||||
fsTarget2 = fHdfs2;
|
||||
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
|
||||
targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
|
||||
|
||||
fsTarget.delete(targetTestRoot, true);
|
||||
fsTarget2.delete(targetTestRoot2, true);
|
||||
fsTarget.mkdirs(targetTestRoot);
|
||||
fsTarget2.mkdirs(targetTestRoot2);
|
||||
|
||||
fsViewConf = ViewFileSystemTestSetup.createConfig();
|
||||
setupMountPoints();
|
||||
fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
|
||||
}
|
||||
|
||||
private void setupMountPoints() {
|
||||
mountOnNn1 = new Path("/mountOnNn1");
|
||||
mountOnNn2 = new Path("/mountOnNn2");
|
||||
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
|
||||
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
|
||||
fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify a ViewFs wrapped over multiple federated NameNodes will
|
||||
* dispatch the ACL operations to the correct NameNode.
|
||||
*/
|
||||
@Test
|
||||
public void testAclOnMountEntry() throws Exception {
|
||||
// Set ACLs on the first namespace and verify they are correct
|
||||
List<AclEntry> aclSpec = Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, READ_WRITE),
|
||||
aclEntry(ACCESS, USER, "foo", READ),
|
||||
aclEntry(ACCESS, GROUP, READ),
|
||||
aclEntry(ACCESS, OTHER, NONE));
|
||||
fsView.setAcl(mountOnNn1, aclSpec);
|
||||
|
||||
AclEntry[] expected = new AclEntry[] {
|
||||
aclEntry(ACCESS, USER, "foo", READ),
|
||||
aclEntry(ACCESS, GROUP, READ) };
|
||||
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
|
||||
// Double-check by getting ACL status using FileSystem
|
||||
// instead of ViewFs
|
||||
assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
|
||||
|
||||
// Modify the ACL entries on the first namespace
|
||||
aclSpec = Lists.newArrayList(
|
||||
aclEntry(DEFAULT, USER, "foo", READ));
|
||||
fsView.modifyAclEntries(mountOnNn1, aclSpec);
|
||||
expected = new AclEntry[] {
|
||||
aclEntry(ACCESS, USER, "foo", READ),
|
||||
aclEntry(ACCESS, GROUP, READ),
|
||||
aclEntry(DEFAULT, USER, READ_WRITE),
|
||||
aclEntry(DEFAULT, USER, "foo", READ),
|
||||
aclEntry(DEFAULT, GROUP, READ),
|
||||
aclEntry(DEFAULT, MASK, READ),
|
||||
aclEntry(DEFAULT, OTHER, NONE) };
|
||||
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
|
||||
|
||||
fsView.removeDefaultAcl(mountOnNn1);
|
||||
expected = new AclEntry[] {
|
||||
aclEntry(ACCESS, USER, "foo", READ),
|
||||
aclEntry(ACCESS, GROUP, READ) };
|
||||
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
|
||||
assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
|
||||
|
||||
// Paranoid check: verify the other namespace does not
|
||||
// have ACLs set on the same path.
|
||||
assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
|
||||
assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
|
||||
|
||||
// Remove the ACL entries on the first namespace
|
||||
fsView.removeAcl(mountOnNn1);
|
||||
assertEquals(0, fsView.getAclStatus(mountOnNn1).getEntries().size());
|
||||
assertEquals(0, fHdfs.getAclStatus(targetTestRoot).getEntries().size());
|
||||
|
||||
// Now set ACLs on the second namespace
|
||||
aclSpec = Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, "bar", READ));
|
||||
fsView.modifyAclEntries(mountOnNn2, aclSpec);
|
||||
expected = new AclEntry[] {
|
||||
aclEntry(ACCESS, USER, "bar", READ),
|
||||
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
|
||||
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn2)));
|
||||
assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
|
||||
|
||||
// Remove the ACL entries on the second namespace
|
||||
fsView.removeAclEntries(mountOnNn2, Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, "bar", READ)
|
||||
));
|
||||
expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
|
||||
assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
|
||||
fsView.removeAcl(mountOnNn2);
|
||||
assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
|
||||
assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
|
||||
}
|
||||
|
||||
private AclEntry[] aclEntryArray(AclStatus aclStatus) {
|
||||
return aclStatus.getEntries().toArray(new AclEntry[0]);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,190 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs.viewfs;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileContextTestHelper;
|
||||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import java.util.List;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
|
||||
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
|
||||
import static org.apache.hadoop.fs.permission.AclEntryType.*;
|
||||
import static org.apache.hadoop.fs.permission.FsAction.*;
|
||||
import static org.apache.hadoop.fs.permission.FsAction.NONE;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
/**
|
||||
* Verify ACL through ViewFs functionality.
|
||||
*/
|
||||
public class TestViewFsWithAcls {
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static Configuration clusterConf = new Configuration();
|
||||
private static FileContext fc, fc2;
|
||||
private FileContext fcView, fcTarget, fcTarget2;
|
||||
private Configuration fsViewConf;
|
||||
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
|
||||
private FileContextTestHelper fileContextTestHelper =
|
||||
new FileContextTestHelper("/tmp/TestViewFsWithAcls");
|
||||
|
||||
@BeforeClass
|
||||
public static void clusterSetupAtBeginning() throws IOException {
|
||||
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
cluster = new MiniDFSCluster.Builder(clusterConf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
|
||||
.numDataNodes(2)
|
||||
.build();
|
||||
cluster.waitClusterUp();
|
||||
|
||||
fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
|
||||
fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void ClusterShutdownAtEnd() throws Exception {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
fcTarget = fc;
|
||||
fcTarget2 = fc2;
|
||||
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
|
||||
targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
|
||||
|
||||
fcTarget.delete(targetTestRoot, true);
|
||||
fcTarget2.delete(targetTestRoot2, true);
|
||||
fcTarget.mkdir(targetTestRoot, new FsPermission((short)0750), true);
|
||||
fcTarget2.mkdir(targetTestRoot2, new FsPermission((short)0750), true);
|
||||
|
||||
fsViewConf = ViewFileSystemTestSetup.createConfig();
|
||||
setupMountPoints();
|
||||
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
|
||||
}
|
||||
|
||||
private void setupMountPoints() {
|
||||
mountOnNn1 = new Path("/mountOnNn1");
|
||||
mountOnNn2 = new Path("/mountOnNn2");
|
||||
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
|
||||
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
|
||||
fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify a ViewFs wrapped over multiple federated NameNodes will
|
||||
* dispatch the ACL operations to the correct NameNode.
|
||||
*/
|
||||
@Test
|
||||
public void testAclOnMountEntry() throws Exception {
|
||||
// Set ACLs on the first namespace and verify they are correct
|
||||
List<AclEntry> aclSpec = Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, READ_WRITE),
|
||||
aclEntry(ACCESS, USER, "foo", READ),
|
||||
aclEntry(ACCESS, GROUP, READ),
|
||||
aclEntry(ACCESS, OTHER, NONE));
|
||||
fcView.setAcl(mountOnNn1, aclSpec);
|
||||
|
||||
AclEntry[] expected = new AclEntry[] {
|
||||
aclEntry(ACCESS, USER, "foo", READ),
|
||||
aclEntry(ACCESS, GROUP, READ) };
|
||||
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
|
||||
// Double-check by getting ACL status using FileSystem
|
||||
// instead of ViewFs
|
||||
assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
|
||||
|
||||
// Modify the ACL entries on the first namespace
|
||||
aclSpec = Lists.newArrayList(
|
||||
aclEntry(DEFAULT, USER, "foo", READ));
|
||||
fcView.modifyAclEntries(mountOnNn1, aclSpec);
|
||||
expected = new AclEntry[] {
|
||||
aclEntry(ACCESS, USER, "foo", READ),
|
||||
aclEntry(ACCESS, GROUP, READ),
|
||||
aclEntry(DEFAULT, USER, READ_WRITE),
|
||||
aclEntry(DEFAULT, USER, "foo", READ),
|
||||
aclEntry(DEFAULT, GROUP, READ),
|
||||
aclEntry(DEFAULT, MASK, READ),
|
||||
aclEntry(DEFAULT, OTHER, NONE) };
|
||||
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
|
||||
|
||||
fcView.removeDefaultAcl(mountOnNn1);
|
||||
expected = new AclEntry[] {
|
||||
aclEntry(ACCESS, USER, "foo", READ),
|
||||
aclEntry(ACCESS, GROUP, READ) };
|
||||
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
|
||||
assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
|
||||
|
||||
// Paranoid check: verify the other namespace does not
|
||||
// have ACLs set on the same path.
|
||||
assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
|
||||
assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
|
||||
|
||||
// Remove the ACL entries on the first namespace
|
||||
fcView.removeAcl(mountOnNn1);
|
||||
assertEquals(0, fcView.getAclStatus(mountOnNn1).getEntries().size());
|
||||
assertEquals(0, fc.getAclStatus(targetTestRoot).getEntries().size());
|
||||
|
||||
// Now set ACLs on the second namespace
|
||||
aclSpec = Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, "bar", READ));
|
||||
fcView.modifyAclEntries(mountOnNn2, aclSpec);
|
||||
expected = new AclEntry[] {
|
||||
aclEntry(ACCESS, USER, "bar", READ),
|
||||
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
|
||||
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn2)));
|
||||
assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
|
||||
|
||||
// Remove the ACL entries on the second namespace
|
||||
fcView.removeAclEntries(mountOnNn2, Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, "bar", READ)
|
||||
));
|
||||
expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
|
||||
assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
|
||||
fcView.removeAcl(mountOnNn2);
|
||||
assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
|
||||
assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
|
||||
}
|
||||
|
||||
private AclEntry[] aclEntryArray(AclStatus aclStatus) {
|
||||
return aclStatus.getEntries().toArray(new AclEntry[0]);
|
||||
}
|
||||
|
||||
}
|
|
@ -663,73 +663,81 @@ public class MiniDFSCluster {
|
|||
boolean checkDataNodeHostConfig,
|
||||
Configuration[] dnConfOverlays)
|
||||
throws IOException {
|
||||
ExitUtil.disableSystemExit();
|
||||
|
||||
synchronized (MiniDFSCluster.class) {
|
||||
instanceId = instanceCount++;
|
||||
}
|
||||
|
||||
this.conf = conf;
|
||||
base_dir = new File(determineDfsBaseDir());
|
||||
data_dir = new File(base_dir, "data");
|
||||
this.waitSafeMode = waitSafeMode;
|
||||
this.checkExitOnShutdown = checkExitOnShutdown;
|
||||
|
||||
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
|
||||
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
|
||||
int safemodeExtension = conf.getInt(
|
||||
DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
|
||||
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
|
||||
conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
|
||||
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||
StaticMapping.class, DNSToSwitchMapping.class);
|
||||
|
||||
// In an HA cluster, in order for the StandbyNode to perform checkpoints,
|
||||
// it needs to know the HTTP port of the Active. So, if ephemeral ports
|
||||
// are chosen, disable checkpoints for the test.
|
||||
if (!nnTopology.allHttpPortsSpecified() &&
|
||||
nnTopology.isHA()) {
|
||||
LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
|
||||
"since no HTTP ports have been specified.");
|
||||
conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
|
||||
}
|
||||
if (!nnTopology.allIpcPortsSpecified() &&
|
||||
nnTopology.isHA()) {
|
||||
LOG.info("MiniDFSCluster disabling log-roll triggering in the "
|
||||
+ "Standby node since no IPC ports have been specified.");
|
||||
conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
|
||||
}
|
||||
|
||||
federation = nnTopology.isFederated();
|
||||
boolean success = false;
|
||||
try {
|
||||
createNameNodesAndSetConf(
|
||||
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
|
||||
enableManagedDfsDirsRedundancy,
|
||||
format, startOpt, clusterId, conf);
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("IOE creating namenodes. Permissions dump:\n" +
|
||||
createPermissionsDiagnosisString(data_dir));
|
||||
throw ioe;
|
||||
}
|
||||
if (format) {
|
||||
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
|
||||
throw new IOException("Cannot remove data directory: " + data_dir +
|
||||
ExitUtil.disableSystemExit();
|
||||
|
||||
synchronized (MiniDFSCluster.class) {
|
||||
instanceId = instanceCount++;
|
||||
}
|
||||
|
||||
this.conf = conf;
|
||||
base_dir = new File(determineDfsBaseDir());
|
||||
data_dir = new File(base_dir, "data");
|
||||
this.waitSafeMode = waitSafeMode;
|
||||
this.checkExitOnShutdown = checkExitOnShutdown;
|
||||
|
||||
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
|
||||
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
|
||||
int safemodeExtension = conf.getInt(
|
||||
DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
|
||||
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
|
||||
conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
|
||||
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||
StaticMapping.class, DNSToSwitchMapping.class);
|
||||
|
||||
// In an HA cluster, in order for the StandbyNode to perform checkpoints,
|
||||
// it needs to know the HTTP port of the Active. So, if ephemeral ports
|
||||
// are chosen, disable checkpoints for the test.
|
||||
if (!nnTopology.allHttpPortsSpecified() &&
|
||||
nnTopology.isHA()) {
|
||||
LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
|
||||
"since no HTTP ports have been specified.");
|
||||
conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
|
||||
}
|
||||
if (!nnTopology.allIpcPortsSpecified() &&
|
||||
nnTopology.isHA()) {
|
||||
LOG.info("MiniDFSCluster disabling log-roll triggering in the "
|
||||
+ "Standby node since no IPC ports have been specified.");
|
||||
conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
|
||||
}
|
||||
|
||||
federation = nnTopology.isFederated();
|
||||
try {
|
||||
createNameNodesAndSetConf(
|
||||
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
|
||||
enableManagedDfsDirsRedundancy,
|
||||
format, startOpt, clusterId, conf);
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("IOE creating namenodes. Permissions dump:\n" +
|
||||
createPermissionsDiagnosisString(data_dir));
|
||||
throw ioe;
|
||||
}
|
||||
if (format) {
|
||||
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
|
||||
throw new IOException("Cannot remove data directory: " + data_dir +
|
||||
createPermissionsDiagnosisString(data_dir));
|
||||
}
|
||||
}
|
||||
|
||||
if (startOpt == StartupOption.RECOVER) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Start the DataNodes
|
||||
startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
|
||||
dnStartOpt != null ? dnStartOpt : startOpt,
|
||||
racks, hosts, simulatedCapacities, setupHostsFile,
|
||||
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
|
||||
waitClusterUp();
|
||||
//make sure ProxyUsers uses the latest conf
|
||||
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
if (startOpt == StartupOption.RECOVER) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Start the DataNodes
|
||||
startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
|
||||
dnStartOpt != null ? dnStartOpt : startOpt,
|
||||
racks, hosts, simulatedCapacities, setupHostsFile,
|
||||
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
|
||||
waitClusterUp();
|
||||
//make sure ProxyUsers uses the latest conf
|
||||
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,6 +30,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHEC
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
@ -79,6 +81,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
|||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
@ -97,6 +100,8 @@ public class TestFileCreation {
|
|||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
private static final String RPC_DETAILED_METRICS =
|
||||
"RpcDetailedActivityForPort";
|
||||
|
||||
static final long seed = 0xDEADBEEFL;
|
||||
static final int blockSize = 8192;
|
||||
|
@ -381,11 +386,15 @@ public class TestFileCreation {
|
|||
}
|
||||
});
|
||||
|
||||
String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();
|
||||
|
||||
try {
|
||||
Path p = new Path("/testfile");
|
||||
FSDataOutputStream stm1 = fs.create(p);
|
||||
stm1.write(1);
|
||||
|
||||
assertCounter("CreateNumOps", 1L, getMetrics(metricsName));
|
||||
|
||||
// Create file again without overwrite
|
||||
try {
|
||||
fs2.create(p, false);
|
||||
|
@ -394,7 +403,9 @@ public class TestFileCreation {
|
|||
GenericTestUtils.assertExceptionContains("already being created by",
|
||||
abce);
|
||||
}
|
||||
|
||||
// NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
|
||||
assertCounter("AlreadyBeingCreatedExceptionNumOps",
|
||||
6L, getMetrics(metricsName));
|
||||
FSDataOutputStream stm2 = fs2.create(p, true);
|
||||
stm2.write(2);
|
||||
stm2.close();
|
||||
|
|
|
@ -25,14 +25,16 @@ import java.net.InetSocketAddress;
|
|||
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -76,13 +78,19 @@ public class TestIsMethodSupported {
|
|||
|
||||
@Test
|
||||
public void testNamenodeProtocol() throws IOException {
|
||||
NamenodeProtocolTranslatorPB translator =
|
||||
(NamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(conf,
|
||||
NamenodeProtocol np =
|
||||
NameNodeProxies.createNonHAProxy(conf,
|
||||
nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
|
||||
true).getProxy();
|
||||
boolean exists = translator.isMethodSupported("rollEditLog");
|
||||
|
||||
boolean exists = RpcClientUtil.isMethodSupported(np,
|
||||
NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
|
||||
RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
|
||||
|
||||
assertTrue(exists);
|
||||
exists = translator.isMethodSupported("bogusMethod");
|
||||
exists = RpcClientUtil.isMethodSupported(np,
|
||||
NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
|
||||
RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
|
||||
assertFalse(exists);
|
||||
}
|
||||
|
||||
|
@ -110,11 +118,13 @@ public class TestIsMethodSupported {
|
|||
|
||||
@Test
|
||||
public void testClientNamenodeProtocol() throws IOException {
|
||||
ClientNamenodeProtocolTranslatorPB translator =
|
||||
(ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
|
||||
ClientProtocol cp =
|
||||
NameNodeProxies.createNonHAProxy(
|
||||
conf, nnAddress, ClientProtocol.class,
|
||||
UserGroupInformation.getCurrentUser(), true).getProxy();
|
||||
assertTrue(translator.isMethodSupported("mkdirs"));
|
||||
RpcClientUtil.isMethodSupported(cp,
|
||||
ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
|
||||
RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -116,7 +116,8 @@ public class DataNodeTestUtils {
|
|||
|
||||
public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
|
||||
BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
|
||||
bpScanner.verifyBlock(b);
|
||||
bpScanner.verifyBlock(new ExtendedBlock(b.getBlockPoolId(),
|
||||
new BlockPoolSliceScanner.BlockScanInfo(b.getLocalBlock())));
|
||||
}
|
||||
|
||||
private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||
|
@ -299,7 +300,8 @@ public class TestDelegationTokensWithHA {
|
|||
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
|
||||
|
||||
URI haUri = new URI("hdfs://my-ha-uri/");
|
||||
token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri));
|
||||
token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME));
|
||||
ugi.addToken(token);
|
||||
|
||||
Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
|
||||
|
@ -355,7 +357,8 @@ public class TestDelegationTokensWithHA {
|
|||
@Test
|
||||
public void testDFSGetCanonicalServiceName() throws Exception {
|
||||
URI hAUri = HATestUtil.getLogicalUri(cluster);
|
||||
String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
|
||||
String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME).toString();
|
||||
assertEquals(haService, dfs.getCanonicalServiceName());
|
||||
final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
|
||||
final Token<DelegationTokenIdentifier> token =
|
||||
|
@ -371,7 +374,8 @@ public class TestDelegationTokensWithHA {
|
|||
Configuration conf = dfs.getConf();
|
||||
URI haUri = HATestUtil.getLogicalUri(cluster);
|
||||
AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
|
||||
String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString();
|
||||
String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
|
||||
HdfsConstants.HDFS_URI_SCHEME).toString();
|
||||
assertEquals(haService, afs.getCanonicalServiceName());
|
||||
Token<?> token = afs.getDelegationTokens(
|
||||
UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream;
|
|||
import java.io.PrintStream;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
|
@ -46,6 +47,7 @@ public class TestDFSAdminWithHA {
|
|||
private PrintStream originErr;
|
||||
|
||||
private static final String NSID = "ns1";
|
||||
private static String newLine = System.getProperty("line.separator");
|
||||
|
||||
private void assertOutputMatches(String string) {
|
||||
String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
|
||||
|
@ -99,6 +101,14 @@ public class TestDFSAdminWithHA {
|
|||
System.err.flush();
|
||||
System.setOut(originOut);
|
||||
System.setErr(originErr);
|
||||
if (admin != null) {
|
||||
admin.close();
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
out.reset();
|
||||
err.reset();
|
||||
}
|
||||
|
||||
@Test(timeout = 30000)
|
||||
|
@ -108,25 +118,25 @@ public class TestDFSAdminWithHA {
|
|||
int exitCode = admin.run(new String[] {"-safemode", "enter"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Safe mode is ON in.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
|
||||
// Get safemode
|
||||
exitCode = admin.run(new String[] {"-safemode", "get"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
message = "Safe mode is ON in.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
|
||||
// Leave safemode
|
||||
exitCode = admin.run(new String[] {"-safemode", "leave"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
message = "Safe mode is OFF in.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
|
||||
// Get safemode
|
||||
exitCode = admin.run(new String[] {"-safemode", "get"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
message = "Safe mode is OFF in.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -136,12 +146,12 @@ public class TestDFSAdminWithHA {
|
|||
int exitCode = admin.run(new String[] {"-safemode", "enter"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Safe mode is ON in.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
|
||||
exitCode = admin.run(new String[] {"-saveNamespace"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
message = "Save namespace successful for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -151,17 +161,17 @@ public class TestDFSAdminWithHA {
|
|||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "restoreFailedStorage is set to false for.*";
|
||||
// Default is false
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
|
||||
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
message = "restoreFailedStorage is set to true for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
|
||||
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
message = "restoreFailedStorage is set to false for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -170,7 +180,7 @@ public class TestDFSAdminWithHA {
|
|||
int exitCode = admin.run(new String[] {"-refreshNodes"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Refresh nodes successful for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -179,7 +189,7 @@ public class TestDFSAdminWithHA {
|
|||
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Balancer bandwidth is set to 10 for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -189,7 +199,7 @@ public class TestDFSAdminWithHA {
|
|||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Created metasave file dfs.meta in the log directory"
|
||||
+ " of namenode.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -198,7 +208,7 @@ public class TestDFSAdminWithHA {
|
|||
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Refresh service acl successful for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -207,7 +217,7 @@ public class TestDFSAdminWithHA {
|
|||
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Refresh user to groups mapping successful for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -217,7 +227,7 @@ public class TestDFSAdminWithHA {
|
|||
new String[] {"-refreshSuperUserGroupsConfiguration"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Refresh super user groups configuration successful for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
|
@ -226,6 +236,6 @@ public class TestDFSAdminWithHA {
|
|||
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
|
||||
assertEquals(err.toString().trim(), 0, exitCode);
|
||||
String message = "Refresh call queue successful for.*";
|
||||
assertOutputMatches(message + "\n" + message + "\n");
|
||||
assertOutputMatches(message + newLine + message + newLine);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,9 @@ Trunk (Unreleased)
|
|||
MAPREDUCE-5232. Add a configuration to be able to log classpath and other
|
||||
system properties on mapreduce JVMs startup. (Sangjin Lee via vinodkv)
|
||||
|
||||
MAPREDUCE-5910. Make MR AM resync with RM in case of work-preserving
|
||||
RM-restart. (Rohith via jianhe)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
MAPREDUCE-3481. [Gridmix] Improve Gridmix STRESS mode. (amarrk)
|
||||
|
@ -153,6 +156,9 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
IMPROVEMENTS
|
||||
|
||||
MAPREDUCE-5971. Move the default options for distcp -p to
|
||||
DistCpOptionSwitch. (clamb via wang)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -237,6 +243,9 @@ Release 2.5.0 - UNRELEASED
|
|||
MAPREDUCE-5844. Add a configurable delay to reducer-preemption.
|
||||
(Maysam Yabandeh via kasha)
|
||||
|
||||
MAPREDUCE-5790. Made it easier to enable hprof profile options by default.
|
||||
(Gera Shegalov via vinodkv)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -304,6 +313,9 @@ Release 2.5.0 - UNRELEASED
|
|||
resource configuration for deciding uber-mode on map-only jobs. (Siqi Li via
|
||||
vinodkv)
|
||||
|
||||
MAPREDUCE-5952. LocalContainerLauncher#renameMapOutputForReduce incorrectly
|
||||
assumes a single dir for mapOutIndex. (Gera Shegalov via kasha)
|
||||
|
||||
Release 2.4.1 - 2014-06-23
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -133,6 +133,7 @@ case $startStop in
|
|||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
rm -f $pid
|
||||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.concurrent.Executors;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FSError;
|
||||
|
@ -437,43 +438,6 @@ public class LocalContainerLauncher extends AbstractService implements
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Within the _local_ filesystem (not HDFS), all activity takes place within
|
||||
* a single subdir (${local.dir}/usercache/$user/appcache/$appId/$contId/),
|
||||
* and all sub-MapTasks create the same filename ("file.out"). Rename that
|
||||
* to something unique (e.g., "map_0.out") to avoid collisions.
|
||||
*
|
||||
* Longer-term, we'll modify [something] to use TaskAttemptID-based
|
||||
* filenames instead of "file.out". (All of this is entirely internal,
|
||||
* so there are no particular compatibility issues.)
|
||||
*/
|
||||
private MapOutputFile renameMapOutputForReduce(JobConf conf,
|
||||
TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
|
||||
FileSystem localFs = FileSystem.getLocal(conf);
|
||||
// move map output to reduce input
|
||||
Path mapOut = subMapOutputFile.getOutputFile();
|
||||
FileStatus mStatus = localFs.getFileStatus(mapOut);
|
||||
Path reduceIn = subMapOutputFile.getInputFileForWrite(
|
||||
TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
|
||||
Path mapOutIndex = new Path(mapOut.toString() + ".index");
|
||||
Path reduceInIndex = new Path(reduceIn.toString() + ".index");
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Renaming map output file for task attempt "
|
||||
+ mapId.toString() + " from original location " + mapOut.toString()
|
||||
+ " to destination " + reduceIn.toString());
|
||||
}
|
||||
if (!localFs.mkdirs(reduceIn.getParent())) {
|
||||
throw new IOException("Mkdirs failed to create "
|
||||
+ reduceIn.getParent().toString());
|
||||
}
|
||||
if (!localFs.rename(mapOut, reduceIn))
|
||||
throw new IOException("Couldn't rename " + mapOut);
|
||||
if (!localFs.rename(mapOutIndex, reduceInIndex))
|
||||
throw new IOException("Couldn't rename " + mapOutIndex);
|
||||
|
||||
return new RenamedMapOutputFile(reduceIn);
|
||||
}
|
||||
|
||||
/**
|
||||
* Also within the local filesystem, we need to restore the initial state
|
||||
* of the directory as much as possible. Compare current contents against
|
||||
|
@ -507,6 +471,45 @@ public class LocalContainerLauncher extends AbstractService implements
|
|||
|
||||
} // end EventHandler
|
||||
|
||||
/**
|
||||
* Within the _local_ filesystem (not HDFS), all activity takes place within
|
||||
* a subdir inside one of the LOCAL_DIRS
|
||||
* (${local.dir}/usercache/$user/appcache/$appId/$contId/),
|
||||
* and all sub-MapTasks create the same filename ("file.out"). Rename that
|
||||
* to something unique (e.g., "map_0.out") to avoid possible collisions.
|
||||
*
|
||||
* Longer-term, we'll modify [something] to use TaskAttemptID-based
|
||||
* filenames instead of "file.out". (All of this is entirely internal,
|
||||
* so there are no particular compatibility issues.)
|
||||
*/
|
||||
@VisibleForTesting
|
||||
protected static MapOutputFile renameMapOutputForReduce(JobConf conf,
|
||||
TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
|
||||
FileSystem localFs = FileSystem.getLocal(conf);
|
||||
// move map output to reduce input
|
||||
Path mapOut = subMapOutputFile.getOutputFile();
|
||||
FileStatus mStatus = localFs.getFileStatus(mapOut);
|
||||
Path reduceIn = subMapOutputFile.getInputFileForWrite(
|
||||
TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
|
||||
Path mapOutIndex = subMapOutputFile.getOutputIndexFile();
|
||||
Path reduceInIndex = new Path(reduceIn.toString() + ".index");
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Renaming map output file for task attempt "
|
||||
+ mapId.toString() + " from original location " + mapOut.toString()
|
||||
+ " to destination " + reduceIn.toString());
|
||||
}
|
||||
if (!localFs.mkdirs(reduceIn.getParent())) {
|
||||
throw new IOException("Mkdirs failed to create "
|
||||
+ reduceIn.getParent().toString());
|
||||
}
|
||||
if (!localFs.rename(mapOut, reduceIn))
|
||||
throw new IOException("Couldn't rename " + mapOut);
|
||||
if (!localFs.rename(mapOutIndex, reduceInIndex))
|
||||
throw new IOException("Couldn't rename " + mapOutIndex);
|
||||
|
||||
return new RenamedMapOutputFile(reduceIn);
|
||||
}
|
||||
|
||||
private static class RenamedMapOutputFile extends MapOutputFile {
|
||||
private Path path;
|
||||
|
||||
|
|
|
@ -64,6 +64,7 @@ public class LocalContainerAllocator extends RMCommunicator
|
|||
private int nmPort;
|
||||
private int nmHttpPort;
|
||||
private ContainerId containerId;
|
||||
protected int lastResponseID;
|
||||
|
||||
private final RecordFactory recordFactory =
|
||||
RecordFactoryProvider.getRecordFactory(null);
|
||||
|
@ -119,6 +120,11 @@ public class LocalContainerAllocator extends RMCommunicator
|
|||
if (allocateResponse.getAMCommand() != null) {
|
||||
switch(allocateResponse.getAMCommand()) {
|
||||
case AM_RESYNC:
|
||||
LOG.info("ApplicationMaster is out of sync with ResourceManager,"
|
||||
+ " hence resyncing.");
|
||||
this.lastResponseID = 0;
|
||||
register();
|
||||
break;
|
||||
case AM_SHUTDOWN:
|
||||
LOG.info("Event from RM: shutting down Application Master");
|
||||
// This can happen if the RM has been restarted. If it is in that state,
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
|
|||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.client.ClientRMProxy;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
|
@ -216,20 +217,27 @@ public abstract class RMCommunicator extends AbstractService
|
|||
FinishApplicationMasterRequest request =
|
||||
FinishApplicationMasterRequest.newInstance(finishState,
|
||||
sb.toString(), historyUrl);
|
||||
while (true) {
|
||||
FinishApplicationMasterResponse response =
|
||||
scheduler.finishApplicationMaster(request);
|
||||
if (response.getIsUnregistered()) {
|
||||
// When excepting ClientService, other services are already stopped,
|
||||
// it is safe to let clients know the final states. ClientService
|
||||
// should wait for some time so clients have enough time to know the
|
||||
// final states.
|
||||
RunningAppContext raContext = (RunningAppContext) context;
|
||||
raContext.markSuccessfulUnregistration();
|
||||
break;
|
||||
try {
|
||||
while (true) {
|
||||
FinishApplicationMasterResponse response =
|
||||
scheduler.finishApplicationMaster(request);
|
||||
if (response.getIsUnregistered()) {
|
||||
// When excepting ClientService, other services are already stopped,
|
||||
// it is safe to let clients know the final states. ClientService
|
||||
// should wait for some time so clients have enough time to know the
|
||||
// final states.
|
||||
RunningAppContext raContext = (RunningAppContext) context;
|
||||
raContext.markSuccessfulUnregistration();
|
||||
break;
|
||||
}
|
||||
LOG.info("Waiting for application to be successfully unregistered.");
|
||||
Thread.sleep(rmPollInterval);
|
||||
}
|
||||
LOG.info("Waiting for application to be successfully unregistered.");
|
||||
Thread.sleep(rmPollInterval);
|
||||
} catch (ApplicationMasterNotRegisteredException e) {
|
||||
// RM might have restarted or failed over and so lost the fact that AM had
|
||||
// registered before.
|
||||
register();
|
||||
doUnregistration();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -389,6 +389,7 @@ public class RMContainerAllocator extends RMContainerRequestor
|
|||
removed = true;
|
||||
assignedRequests.remove(aId);
|
||||
containersReleased++;
|
||||
pendingRelease.add(containerId);
|
||||
release(containerId);
|
||||
}
|
||||
}
|
||||
|
@ -641,6 +642,15 @@ public class RMContainerAllocator extends RMContainerRequestor
|
|||
if (response.getAMCommand() != null) {
|
||||
switch(response.getAMCommand()) {
|
||||
case AM_RESYNC:
|
||||
LOG.info("ApplicationMaster is out of sync with ResourceManager,"
|
||||
+ " hence resyncing.");
|
||||
lastResponseID = 0;
|
||||
|
||||
// Registering to allow RM to discover an active AM for this
|
||||
// application
|
||||
register();
|
||||
addOutstandingRequestOnResync();
|
||||
break;
|
||||
case AM_SHUTDOWN:
|
||||
// This can happen if the RM has been restarted. If it is in that state,
|
||||
// this application must clean itself up.
|
||||
|
@ -700,6 +710,7 @@ public class RMContainerAllocator extends RMContainerRequestor
|
|||
LOG.error("Container complete event for unknown container id "
|
||||
+ cont.getContainerId());
|
||||
} else {
|
||||
pendingRelease.remove(cont.getContainerId());
|
||||
assignedRequests.remove(attemptID);
|
||||
|
||||
// send the container completed event to Task attempt
|
||||
|
@ -991,6 +1002,7 @@ public class RMContainerAllocator extends RMContainerRequestor
|
|||
|
||||
private void containerNotAssigned(Container allocated) {
|
||||
containersReleased++;
|
||||
pendingRelease.add(allocated.getId());
|
||||
release(allocated.getId());
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
|||
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
|
||||
import org.apache.hadoop.yarn.api.records.AMCommand;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.Priority;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
|
@ -58,7 +59,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class);
|
||||
|
||||
private int lastResponseID;
|
||||
protected int lastResponseID;
|
||||
private Resource availableResources;
|
||||
|
||||
private final RecordFactory recordFactory =
|
||||
|
@ -78,7 +79,10 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>(
|
||||
new org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator());
|
||||
private final Set<ContainerId> release = new TreeSet<ContainerId>();
|
||||
|
||||
// pendingRelease holds history or release requests.request is removed only if
|
||||
// RM sends completedContainer.
|
||||
// How it different from release? --> release is for per allocate() request.
|
||||
protected Set<ContainerId> pendingRelease = new TreeSet<ContainerId>();
|
||||
private boolean nodeBlacklistingEnabled;
|
||||
private int blacklistDisablePercent;
|
||||
private AtomicBoolean ignoreBlacklisting = new AtomicBoolean(false);
|
||||
|
@ -186,6 +190,10 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
} catch (YarnException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
if (isResyncCommand(allocateResponse)) {
|
||||
return allocateResponse;
|
||||
}
|
||||
lastResponseID = allocateResponse.getResponseId();
|
||||
availableResources = allocateResponse.getAvailableResources();
|
||||
lastClusterNmCount = clusterNmCount;
|
||||
|
@ -214,6 +222,28 @@ public abstract class RMContainerRequestor extends RMCommunicator {
|
|||
return allocateResponse;
|
||||
}
|
||||
|
||||
protected boolean isResyncCommand(AllocateResponse allocateResponse) {
|
||||
return allocateResponse.getAMCommand() != null
|
||||
&& allocateResponse.getAMCommand() == AMCommand.AM_RESYNC;
|
||||
}
|
||||
|
||||
protected void addOutstandingRequestOnResync() {
|
||||
for (Map<String, Map<Resource, ResourceRequest>> rr : remoteRequestsTable
|
||||
.values()) {
|
||||
for (Map<Resource, ResourceRequest> capabalities : rr.values()) {
|
||||
for (ResourceRequest request : capabalities.values()) {
|
||||
addResourceRequestToAsk(request);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!ignoreBlacklisting.get()) {
|
||||
blacklistAdditions.addAll(blacklistedNodes);
|
||||
}
|
||||
if (!pendingRelease.isEmpty()) {
|
||||
release.addAll(pendingRelease);
|
||||
}
|
||||
}
|
||||
|
||||
// May be incorrect if there's multiple NodeManagers running on a single host.
|
||||
// knownNodeCount is based on node managers, not hosts. blacklisting is
|
||||
// currently based on hosts.
|
||||
|
|
|
@ -18,17 +18,26 @@
|
|||
|
||||
package org.apache.hadoop.mapred;
|
||||
|
||||
import static org.apache.hadoop.fs.CreateFlag.CREATE;
|
||||
import static org.mockito.Matchers.isA;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapreduce.MRConfig;
|
||||
import org.apache.hadoop.mapreduce.TypeConverter;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
|
@ -46,6 +55,9 @@ import org.apache.hadoop.yarn.api.records.Container;
|
|||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.event.Event;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
@ -53,6 +65,36 @@ import org.mockito.stubbing.Answer;
|
|||
public class TestLocalContainerLauncher {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestLocalContainerLauncher.class);
|
||||
private static File testWorkDir;
|
||||
private static final String[] localDirs = new String[2];
|
||||
|
||||
private static void delete(File dir) throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
Path p = fs.makeQualified(new Path(dir.getAbsolutePath()));
|
||||
fs.delete(p, true);
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void setupTestDirs() throws IOException {
|
||||
testWorkDir = new File("target",
|
||||
TestLocalContainerLauncher.class.getCanonicalName());
|
||||
testWorkDir.delete();
|
||||
testWorkDir.mkdirs();
|
||||
testWorkDir = testWorkDir.getAbsoluteFile();
|
||||
for (int i = 0; i < localDirs.length; i++) {
|
||||
final File dir = new File(testWorkDir, "local-" + i);
|
||||
dir.mkdirs();
|
||||
localDirs[i] = dir.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void cleanupTestDirs() throws IOException {
|
||||
if (testWorkDir != null) {
|
||||
delete(testWorkDir);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test(timeout=10000)
|
||||
|
@ -141,4 +183,35 @@ public class TestLocalContainerLauncher {
|
|||
when(container.getNodeId()).thenReturn(nodeId);
|
||||
return container;
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testRenameMapOutputForReduce() throws Exception {
|
||||
final JobConf conf = new JobConf();
|
||||
|
||||
final MROutputFiles mrOutputFiles = new MROutputFiles();
|
||||
mrOutputFiles.setConf(conf);
|
||||
|
||||
// make sure both dirs are distinct
|
||||
//
|
||||
conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString());
|
||||
final Path mapOut = mrOutputFiles.getOutputFileForWrite(1);
|
||||
conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString());
|
||||
final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1);
|
||||
Assert.assertNotEquals("Paths must be different!",
|
||||
mapOut.getParent(), mapOutIdx.getParent());
|
||||
|
||||
// make both dirs part of LOCAL_DIR
|
||||
conf.setStrings(MRConfig.LOCAL_DIR, localDirs);
|
||||
|
||||
final FileContext lfc = FileContext.getLocalFSFileContext(conf);
|
||||
lfc.create(mapOut, EnumSet.of(CREATE)).close();
|
||||
lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();
|
||||
|
||||
final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
|
||||
final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
|
||||
final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);
|
||||
|
||||
LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
|
@ -87,6 +88,7 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
|
|||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.event.DrainDispatcher;
|
||||
import org.apache.hadoop.yarn.event.Event;
|
||||
|
@ -95,9 +97,13 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
|||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
|
||||
import org.apache.hadoop.yarn.server.api.records.NodeAction;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
|
@ -618,6 +624,10 @@ public class TestRMContainerAllocator {
|
|||
super(conf);
|
||||
}
|
||||
|
||||
public MyResourceManager(Configuration conf, RMStateStore store) {
|
||||
super(conf, store);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serviceStart() throws Exception {
|
||||
super.serviceStart();
|
||||
|
@ -1426,6 +1436,13 @@ public class TestRMContainerAllocator {
|
|||
rm.getMyFifoScheduler().lastBlacklistRemovals.size());
|
||||
}
|
||||
|
||||
private static void assertAsksAndReleases(int expectedAsk,
|
||||
int expectedRelease, MyResourceManager rm) {
|
||||
Assert.assertEquals(expectedAsk, rm.getMyFifoScheduler().lastAsk.size());
|
||||
Assert.assertEquals(expectedRelease,
|
||||
rm.getMyFifoScheduler().lastRelease.size());
|
||||
}
|
||||
|
||||
private static class MyFifoScheduler extends FifoScheduler {
|
||||
|
||||
public MyFifoScheduler(RMContext rmContext) {
|
||||
|
@ -1440,6 +1457,7 @@ public class TestRMContainerAllocator {
|
|||
}
|
||||
|
||||
List<ResourceRequest> lastAsk = null;
|
||||
List<ContainerId> lastRelease = null;
|
||||
List<String> lastBlacklistAdditions;
|
||||
List<String> lastBlacklistRemovals;
|
||||
|
||||
|
@ -1458,6 +1476,7 @@ public class TestRMContainerAllocator {
|
|||
askCopy.add(reqCopy);
|
||||
}
|
||||
lastAsk = ask;
|
||||
lastRelease = release;
|
||||
lastBlacklistAdditions = blacklistAdditions;
|
||||
lastBlacklistRemovals = blacklistRemovals;
|
||||
return super.allocate(
|
||||
|
@ -1505,6 +1524,20 @@ public class TestRMContainerAllocator {
|
|||
return new ContainerFailedEvent(attemptId, host);
|
||||
}
|
||||
|
||||
private ContainerAllocatorEvent createDeallocateEvent(JobId jobId,
|
||||
int taskAttemptId, boolean reduce) {
|
||||
TaskId taskId;
|
||||
if (reduce) {
|
||||
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
|
||||
} else {
|
||||
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
|
||||
}
|
||||
TaskAttemptId attemptId =
|
||||
MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId);
|
||||
return new ContainerAllocatorEvent(attemptId,
|
||||
ContainerAllocator.EventType.CONTAINER_DEALLOCATE);
|
||||
}
|
||||
|
||||
private void checkAssignments(ContainerRequestEvent[] requests,
|
||||
List<TaskAttemptContainerAssignedEvent> assignments,
|
||||
boolean checkHostMatch) {
|
||||
|
@ -1557,6 +1590,7 @@ public class TestRMContainerAllocator {
|
|||
= new ArrayList<JobUpdatedNodesEvent>();
|
||||
private MyResourceManager rm;
|
||||
private boolean isUnregistered = false;
|
||||
private AllocateResponse allocateResponse;
|
||||
private static AppContext createAppContext(
|
||||
ApplicationAttemptId appAttemptId, Job job) {
|
||||
AppContext context = mock(AppContext.class);
|
||||
|
@ -1668,6 +1702,10 @@ public class TestRMContainerAllocator {
|
|||
super.handleEvent(f);
|
||||
}
|
||||
|
||||
public void sendDeallocate(ContainerAllocatorEvent f) {
|
||||
super.handleEvent(f);
|
||||
}
|
||||
|
||||
// API to be used by tests
|
||||
public List<TaskAttemptContainerAssignedEvent> schedule()
|
||||
throws Exception {
|
||||
|
@ -1713,6 +1751,20 @@ public class TestRMContainerAllocator {
|
|||
public boolean isUnregistered() {
|
||||
return isUnregistered;
|
||||
}
|
||||
|
||||
public void updateSchedulerProxy(MyResourceManager rm) {
|
||||
scheduler = rm.getApplicationMasterService();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AllocateResponse makeRemoteRequest() throws IOException {
|
||||
allocateResponse = super.makeRemoteRequest();
|
||||
return allocateResponse;
|
||||
}
|
||||
|
||||
public boolean isResyncCommand() {
|
||||
return super.isResyncCommand(allocateResponse);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -2022,6 +2074,198 @@ public class TestRMContainerAllocator {
|
|||
Assert.assertTrue(allocator.isUnregistered());
|
||||
}
|
||||
|
||||
// Step-1 : AM send allocate request for 2 ContainerRequests and 1
|
||||
// blackListeNode
|
||||
// Step-2 : 2 containers are allocated by RM.
|
||||
// Step-3 : AM Send 1 containerRequest(event3) and 1 releaseRequests to
|
||||
// RM
|
||||
// Step-4 : On RM restart, AM(does not know RM is restarted) sends
|
||||
// additional containerRequest(event4) and blacklisted nodes.
|
||||
// Intern RM send resync command
|
||||
// Step-5 : On Resync,AM sends all outstanding
|
||||
// asks,release,blacklistAaddition
|
||||
// and another containerRequest(event5)
|
||||
// Step-6 : RM allocates containers i.e event3,event4 and cRequest5
|
||||
@Test
|
||||
public void testRMContainerAllocatorResendsRequestsOnRMRestart()
|
||||
throws Exception {
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
|
||||
|
||||
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
|
||||
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
|
||||
conf.setInt(
|
||||
MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MyResourceManager rm1 = new MyResourceManager(conf, memStore);
|
||||
rm1.start();
|
||||
DrainDispatcher dispatcher =
|
||||
(DrainDispatcher) rm1.getRMContext().getDispatcher();
|
||||
|
||||
// Submit the application
|
||||
RMApp app = rm1.submitApp(1024);
|
||||
dispatcher.await();
|
||||
|
||||
MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService());
|
||||
nm1.registerNode();
|
||||
nm1.nodeHeartbeat(true); // Node heartbeat
|
||||
dispatcher.await();
|
||||
|
||||
ApplicationAttemptId appAttemptId =
|
||||
app.getCurrentAppAttempt().getAppAttemptId();
|
||||
rm1.sendAMLaunched(appAttemptId);
|
||||
dispatcher.await();
|
||||
|
||||
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
|
||||
Job mockJob = mock(Job.class);
|
||||
when(mockJob.getReport()).thenReturn(
|
||||
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
|
||||
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
|
||||
MyContainerAllocator allocator =
|
||||
new MyContainerAllocator(rm1, conf, appAttemptId, mockJob);
|
||||
|
||||
// Step-1 : AM send allocate request for 2 ContainerRequests and 1
|
||||
// blackListeNode
|
||||
// create the container request
|
||||
// send MAP request
|
||||
ContainerRequestEvent event1 =
|
||||
createReq(jobId, 1, 1024, new String[] { "h1" });
|
||||
allocator.sendRequest(event1);
|
||||
|
||||
ContainerRequestEvent event2 =
|
||||
createReq(jobId, 2, 2048, new String[] { "h1", "h2" });
|
||||
allocator.sendRequest(event2);
|
||||
|
||||
// Send events to blacklist h2
|
||||
ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h2", false);
|
||||
allocator.sendFailure(f1);
|
||||
|
||||
// send allocate request and 1 blacklisted nodes
|
||||
List<TaskAttemptContainerAssignedEvent> assignedContainers =
|
||||
allocator.schedule();
|
||||
dispatcher.await();
|
||||
Assert.assertEquals("No of assignments must be 0", 0,
|
||||
assignedContainers.size());
|
||||
// Why ask is 3, not 4? --> ask from blacklisted node h2 is removed
|
||||
assertAsksAndReleases(3, 0, rm1);
|
||||
assertBlacklistAdditionsAndRemovals(1, 0, rm1);
|
||||
|
||||
nm1.nodeHeartbeat(true); // Node heartbeat
|
||||
dispatcher.await();
|
||||
|
||||
// Step-2 : 2 containers are allocated by RM.
|
||||
assignedContainers = allocator.schedule();
|
||||
dispatcher.await();
|
||||
Assert.assertEquals("No of assignments must be 2", 2,
|
||||
assignedContainers.size());
|
||||
assertAsksAndReleases(0, 0, rm1);
|
||||
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
|
||||
|
||||
assignedContainers = allocator.schedule();
|
||||
Assert.assertEquals("No of assignments must be 0", 0,
|
||||
assignedContainers.size());
|
||||
assertAsksAndReleases(3, 0, rm1);
|
||||
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
|
||||
|
||||
// Step-3 : AM Send 1 containerRequest(event3) and 1 releaseRequests to
|
||||
// RM
|
||||
// send container request
|
||||
ContainerRequestEvent event3 =
|
||||
createReq(jobId, 3, 1000, new String[] { "h1" });
|
||||
allocator.sendRequest(event3);
|
||||
|
||||
// send deallocate request
|
||||
ContainerAllocatorEvent deallocate1 =
|
||||
createDeallocateEvent(jobId, 1, false);
|
||||
allocator.sendDeallocate(deallocate1);
|
||||
|
||||
assignedContainers = allocator.schedule();
|
||||
Assert.assertEquals("No of assignments must be 0", 0,
|
||||
assignedContainers.size());
|
||||
assertAsksAndReleases(3, 1, rm1);
|
||||
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
|
||||
|
||||
// Phase-2 start 2nd RM is up
|
||||
MyResourceManager rm2 = new MyResourceManager(conf, memStore);
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
allocator.updateSchedulerProxy(rm2);
|
||||
dispatcher = (DrainDispatcher) rm2.getRMContext().getDispatcher();
|
||||
|
||||
// NM should be rebooted on heartbeat, even first heartbeat for nm2
|
||||
NodeHeartbeatResponse hbResponse = nm1.nodeHeartbeat(true);
|
||||
Assert.assertEquals(NodeAction.RESYNC, hbResponse.getNodeAction());
|
||||
|
||||
// new NM to represent NM re-register
|
||||
nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService());
|
||||
nm1.registerNode();
|
||||
nm1.nodeHeartbeat(true);
|
||||
dispatcher.await();
|
||||
|
||||
// Step-4 : On RM restart, AM(does not know RM is restarted) sends
|
||||
// additional containerRequest(event4) and blacklisted nodes.
|
||||
// Intern RM send resync command
|
||||
|
||||
// send deallocate request, release=1
|
||||
ContainerAllocatorEvent deallocate2 =
|
||||
createDeallocateEvent(jobId, 2, false);
|
||||
allocator.sendDeallocate(deallocate2);
|
||||
|
||||
// Send events to blacklist nodes h3
|
||||
ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h3", false);
|
||||
allocator.sendFailure(f2);
|
||||
|
||||
ContainerRequestEvent event4 =
|
||||
createReq(jobId, 4, 2000, new String[] { "h1", "h2" });
|
||||
allocator.sendRequest(event4);
|
||||
|
||||
// send allocate request to 2nd RM and get resync command
|
||||
allocator.schedule();
|
||||
dispatcher.await();
|
||||
Assert.assertTrue("Last allocate response is not RESYNC",
|
||||
allocator.isResyncCommand());
|
||||
|
||||
// Step-5 : On Resync,AM sends all outstanding
|
||||
// asks,release,blacklistAaddition
|
||||
// and another containerRequest(event5)
|
||||
ContainerRequestEvent event5 =
|
||||
createReq(jobId, 5, 3000, new String[] { "h1", "h2", "h3" });
|
||||
allocator.sendRequest(event5);
|
||||
|
||||
// send all outstanding request again.
|
||||
assignedContainers = allocator.schedule();
|
||||
dispatcher.await();
|
||||
assertAsksAndReleases(3, 2, rm2);
|
||||
assertBlacklistAdditionsAndRemovals(2, 0, rm2);
|
||||
|
||||
nm1.nodeHeartbeat(true);
|
||||
dispatcher.await();
|
||||
|
||||
// Step-6 : RM allocates containers i.e event3,event4 and cRequest5
|
||||
assignedContainers = allocator.schedule();
|
||||
dispatcher.await();
|
||||
|
||||
Assert.assertEquals("Number of container should be 3", 3,
|
||||
assignedContainers.size());
|
||||
|
||||
for (TaskAttemptContainerAssignedEvent assig : assignedContainers) {
|
||||
Assert.assertTrue("Assigned count not correct",
|
||||
"h1".equals(assig.getContainer().getNodeId().getHost()));
|
||||
}
|
||||
|
||||
rm1.stop();
|
||||
rm2.stop();
|
||||
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
TestRMContainerAllocator t = new TestRMContainerAllocator();
|
||||
t.testSimple();
|
||||
|
|
|
@ -671,7 +671,7 @@
|
|||
|
||||
<property>
|
||||
<name>mapreduce.task.profile.params</name>
|
||||
<value></value>
|
||||
<value>-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s</value>
|
||||
<description>JVM profiler parameters used to profile map and reduce task
|
||||
attempts. This string may contain a single format specifier %s that will
|
||||
be replaced by the path to profile.out in the task attempt log directory.
|
||||
|
|
|
@ -29,11 +29,7 @@ public class TestJobConf {
|
|||
@Test
|
||||
public void testProfileParamsDefaults() {
|
||||
JobConf configuration = new JobConf();
|
||||
|
||||
Assert.assertNull(configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
|
||||
|
||||
String result = configuration.getProfileParams();
|
||||
|
||||
Assert.assertNotNull(result);
|
||||
Assert.assertTrue(result.contains("file=%s"));
|
||||
Assert.assertTrue(result.startsWith("-agentlib:hprof"));
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.*;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -39,8 +40,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestMRJobsWithProfiler {
|
||||
|
@ -51,6 +51,8 @@ public class TestMRJobsWithProfiler {
|
|||
private static final EnumSet<RMAppState> TERMINAL_RM_APP_STATES =
|
||||
EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED);
|
||||
|
||||
private static final int PROFILED_TASK_ID = 1;
|
||||
|
||||
private static MiniMRYarnCluster mrCluster;
|
||||
|
||||
private static final Configuration CONF = new Configuration();
|
||||
|
@ -69,8 +71,8 @@ public class TestMRJobsWithProfiler {
|
|||
|
||||
private static final Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
|
||||
|
||||
@Before
|
||||
public void setup() throws InterruptedException, IOException {
|
||||
@BeforeClass
|
||||
public static void setup() throws InterruptedException, IOException {
|
||||
|
||||
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
|
||||
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
|
||||
|
@ -79,7 +81,7 @@ public class TestMRJobsWithProfiler {
|
|||
}
|
||||
|
||||
if (mrCluster == null) {
|
||||
mrCluster = new MiniMRYarnCluster(getClass().getName());
|
||||
mrCluster = new MiniMRYarnCluster(TestMRJobsWithProfiler.class.getName());
|
||||
mrCluster.init(CONF);
|
||||
mrCluster.start();
|
||||
}
|
||||
|
@ -90,8 +92,8 @@ public class TestMRJobsWithProfiler {
|
|||
localFs.setPermission(APP_JAR, new FsPermission("700"));
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
|
||||
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
|
||||
+ " not found. Not running test.");
|
||||
|
@ -103,10 +105,19 @@ public class TestMRJobsWithProfiler {
|
|||
}
|
||||
}
|
||||
|
||||
@Test (timeout = 150000)
|
||||
public void testDefaultProfiler() throws Exception {
|
||||
LOG.info("Starting testDefaultProfiler");
|
||||
testProfilerInternal(true);
|
||||
}
|
||||
|
||||
@Test (timeout = 150000)
|
||||
public void testProfiler() throws IOException, InterruptedException,
|
||||
ClassNotFoundException {
|
||||
public void testDifferentProfilers() throws Exception {
|
||||
LOG.info("Starting testDefaultProfiler");
|
||||
testProfilerInternal(false);
|
||||
}
|
||||
|
||||
private void testProfilerInternal(boolean useDefault) throws Exception {
|
||||
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
|
||||
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
|
||||
+ " not found. Not running test.");
|
||||
|
@ -117,18 +128,19 @@ public class TestMRJobsWithProfiler {
|
|||
final JobConf sleepConf = new JobConf(mrCluster.getConfig());
|
||||
|
||||
sleepConf.setProfileEnabled(true);
|
||||
// profile map split 1
|
||||
sleepConf.setProfileTaskRange(true, "1");
|
||||
// profile reduce of map output partitions 1
|
||||
sleepConf.setProfileTaskRange(false, "1");
|
||||
sleepConf.setProfileTaskRange(true, String.valueOf(PROFILED_TASK_ID));
|
||||
sleepConf.setProfileTaskRange(false, String.valueOf(PROFILED_TASK_ID));
|
||||
|
||||
// use hprof for map to profile.out
|
||||
sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS,
|
||||
"-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n,"
|
||||
+ "file=%s");
|
||||
if (!useDefault) {
|
||||
// use hprof for map to profile.out
|
||||
sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS,
|
||||
"-agentlib:hprof=cpu=times,heap=sites,force=n,thread=y,verbose=n,"
|
||||
+ "file=%s");
|
||||
|
||||
// use Xprof for reduce to stdout
|
||||
sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof");
|
||||
}
|
||||
|
||||
// use Xprof for reduce to stdout
|
||||
sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof");
|
||||
sleepJob.setConf(sleepConf);
|
||||
|
||||
// 2-map-2-reduce SleepJob
|
||||
|
@ -205,8 +217,8 @@ public class TestMRJobsWithProfiler {
|
|||
TaskLog.LogName.PROFILE.toString());
|
||||
final Path stdoutPath = new Path(dirEntry.getValue(),
|
||||
TaskLog.LogName.STDOUT.toString());
|
||||
if (tid.getTaskType() == TaskType.MAP) {
|
||||
if (tid.getTaskID().getId() == 1) {
|
||||
if (useDefault || tid.getTaskType() == TaskType.MAP) {
|
||||
if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
|
||||
// verify profile.out
|
||||
final BufferedReader br = new BufferedReader(new InputStreamReader(
|
||||
localFs.open(profilePath)));
|
||||
|
@ -222,7 +234,8 @@ public class TestMRJobsWithProfiler {
|
|||
} else {
|
||||
Assert.assertFalse("hprof file should not exist",
|
||||
localFs.exists(profilePath));
|
||||
if (tid.getTaskID().getId() == 1) {
|
||||
if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
|
||||
// reducer is profiled with Xprof
|
||||
final BufferedReader br = new BufferedReader(new InputStreamReader(
|
||||
localFs.open(stdoutPath)));
|
||||
boolean flatProfFound = false;
|
||||
|
|
|
@ -373,6 +373,8 @@ public class NativeAzureFileSystem extends FileSystem {
|
|||
private Path workingDir;
|
||||
private long blockSize = MAX_AZURE_BLOCK_SIZE;
|
||||
private AzureFileSystemInstrumentation instrumentation;
|
||||
private String metricsSourceName;
|
||||
private boolean isClosed = false;
|
||||
private static boolean suppressRetryPolicy = false;
|
||||
// A counter to create unique (within-process) names for my metrics sources.
|
||||
private static AtomicInteger metricsSourceNameCounter = new AtomicInteger();
|
||||
|
@ -482,11 +484,10 @@ public class NativeAzureFileSystem extends FileSystem {
|
|||
|
||||
// Make sure the metrics system is available before interacting with Azure
|
||||
AzureFileSystemMetricsSystem.fileSystemStarted();
|
||||
String sourceName = newMetricsSourceName(),
|
||||
sourceDesc = "Azure Storage Volume File System metrics";
|
||||
instrumentation = DefaultMetricsSystem.instance().register(sourceName,
|
||||
sourceDesc, new AzureFileSystemInstrumentation(conf));
|
||||
AzureFileSystemMetricsSystem.registerSource(sourceName, sourceDesc,
|
||||
metricsSourceName = newMetricsSourceName();
|
||||
String sourceDesc = "Azure Storage Volume File System metrics";
|
||||
instrumentation = new AzureFileSystemInstrumentation(conf);
|
||||
AzureFileSystemMetricsSystem.registerSource(metricsSourceName, sourceDesc,
|
||||
instrumentation);
|
||||
|
||||
store.initialize(uri, conf, instrumentation);
|
||||
|
@ -502,7 +503,6 @@ public class NativeAzureFileSystem extends FileSystem {
|
|||
LOG.debug(" blockSize = "
|
||||
+ conf.getLong(AZURE_BLOCK_SIZE_PROPERTY_NAME, MAX_AZURE_BLOCK_SIZE));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private NativeFileSystemStore createDefaultStore(Configuration conf) {
|
||||
|
@ -1337,7 +1337,11 @@ public class NativeAzureFileSystem extends FileSystem {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
public synchronized void close() throws IOException {
|
||||
if (isClosed) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Call the base close() to close any resources there.
|
||||
super.close();
|
||||
// Close the store
|
||||
|
@ -1349,12 +1353,14 @@ public class NativeAzureFileSystem extends FileSystem {
|
|||
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
AzureFileSystemMetricsSystem.unregisterSource(metricsSourceName);
|
||||
AzureFileSystemMetricsSystem.fileSystemClosed();
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Submitting metrics when file system closed took "
|
||||
+ (System.currentTimeMillis() - startTime) + " ms.");
|
||||
}
|
||||
isClosed = true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1498,6 +1504,13 @@ public class NativeAzureFileSystem extends FileSystem {
|
|||
handleFilesWithDanglingTempData(root, new DanglingFileDeleter());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
LOG.debug("finalize() called.");
|
||||
close();
|
||||
super.finalize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode the key with a random prefix for load balancing in Azure storage.
|
||||
* Upload data to a random temporary file then do storage side renaming to
|
||||
|
|
|
@ -44,10 +44,8 @@ public final class AzureFileSystemMetricsSystem {
|
|||
}
|
||||
|
||||
public static synchronized void fileSystemClosed() {
|
||||
if (instance != null) {
|
||||
instance.publishMetricsNow();
|
||||
}
|
||||
if (numFileSystems == 1) {
|
||||
instance.publishMetricsNow();
|
||||
instance.stop();
|
||||
instance.shutdown();
|
||||
instance = null;
|
||||
|
@ -57,8 +55,15 @@ public final class AzureFileSystemMetricsSystem {
|
|||
|
||||
public static void registerSource(String name, String desc,
|
||||
MetricsSource source) {
|
||||
// Register the source with the name appended with -WasbSystem
|
||||
// so that the name is globally unique.
|
||||
instance.register(name + "-WasbSystem", desc, source);
|
||||
//caller has to use unique name to register source
|
||||
instance.register(name, desc, source);
|
||||
}
|
||||
|
||||
public static synchronized void unregisterSource(String name) {
|
||||
if (instance != null) {
|
||||
//publish metrics before unregister a metrics source
|
||||
instance.publishMetricsNow();
|
||||
instance.unregisterSource(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -324,9 +324,7 @@ public final class AzureBlobStorageTestAccount {
|
|||
String sourceName = NativeAzureFileSystem.newMetricsSourceName();
|
||||
String sourceDesc = "Azure Storage Volume File System metrics";
|
||||
|
||||
AzureFileSystemInstrumentation instrumentation =
|
||||
DefaultMetricsSystem.instance().register(sourceName,
|
||||
sourceDesc, new AzureFileSystemInstrumentation(conf));
|
||||
AzureFileSystemInstrumentation instrumentation = new AzureFileSystemInstrumentation(conf);
|
||||
|
||||
AzureFileSystemMetricsSystem.registerSource(
|
||||
sourceName, sourceDesc, instrumentation);
|
||||
|
|
|
@ -516,6 +516,13 @@ public abstract class NativeAzureFileSystemBaseTest {
|
|||
assertNotNull(status);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCloseFileSystemTwice() throws Exception {
|
||||
//make sure close() can be called multiple times without doing any harm
|
||||
fs.close();
|
||||
fs.close();
|
||||
}
|
||||
|
||||
private boolean testModifiedTime(Path testPath, long time) throws Exception {
|
||||
FileStatus fileStatus = fs.getFileStatus(testPath);
|
||||
final long errorMargin = modifiedTimeErrorMargin;
|
||||
|
|
|
@ -162,6 +162,7 @@ public enum DistCpOptionSwitch {
|
|||
BANDWIDTH(DistCpConstants.CONF_LABEL_BANDWIDTH_MB,
|
||||
new Option("bandwidth", true, "Specify bandwidth per map in MB"));
|
||||
|
||||
static final String PRESERVE_STATUS_DEFAULT = "-prbugpc";
|
||||
private final String confLabel;
|
||||
private final Option option;
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ public class OptionsParser {
|
|||
protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) {
|
||||
for (int index = 0; index < arguments.length; index++) {
|
||||
if (arguments[index].equals("-" + DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
|
||||
arguments[index] = "-prbugpc";
|
||||
arguments[index] = DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT;
|
||||
}
|
||||
}
|
||||
return super.flatten(options, arguments, stopAtNonOption);
|
||||
|
|
|
@ -43,6 +43,12 @@ Release 2.6.0 - UNRELEASED
|
|||
YARN-2274. FairScheduler: Add debug information about cluster capacity,
|
||||
availability and reservations. (kasha)
|
||||
|
||||
YARN-2228. Augmented TimelineServer to load pseudo authentication filter when
|
||||
authentication = simple. (Zhijie Shen via vinodkv)
|
||||
|
||||
YARN-1341. Recover NMTokens upon nodemanager restart. (Jason Lowe via
|
||||
junping_du)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -53,6 +59,16 @@ Release 2.6.0 - UNRELEASED
|
|||
YARN-2088. Fixed a bug in GetApplicationsRequestPBImpl#mergeLocalToBuilder.
|
||||
(Binglin Chang via jianhe)
|
||||
|
||||
YARN-2260. Fixed ResourceManager's RMNode to correctly remember containers
|
||||
when nodes resync during work-preserving RM restart. (Jian He via vinodkv)
|
||||
|
||||
YARN-2264. Fixed a race condition in DrainDispatcher which may cause random
|
||||
test failures. (Li Lu via jianhe)
|
||||
|
||||
YARN-2219. Changed ResourceManager to avoid AMs and NMs getting exceptions
|
||||
after RM recovery but before scheduler learns about apps and app-attempts.
|
||||
(Jian He via vinodkv)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -89,6 +105,9 @@ Release 2.5.0 - UNRELEASED
|
|||
YARN-1713. Added get-new-app and submit-app functionality to RM web services.
|
||||
(Varun Vasudev via vinodkv)
|
||||
|
||||
YARN-2233. Implemented ResourceManager web-services to create, renew and
|
||||
cancel delegation tokens. (Varun Vasudev via vinodkv)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
YARN-1479. Invalid NaN values in Hadoop REST API JSON response (Chen He via
|
||||
|
@ -253,6 +272,9 @@ Release 2.5.0 - UNRELEASED
|
|||
YARN-2241. ZKRMStateStore: On startup, show nicer messages if znodes already
|
||||
exist. (Robert Kanter via kasha)
|
||||
|
||||
YARN-1408 Preemption caused Invalid State Event: ACQUIRED at KILLED and
|
||||
caused a task timeout for 30mins. (Sunil G via mayank)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
|
|
@ -145,6 +145,7 @@ case $startStop in
|
|||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
rm -f $pid
|
||||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
|
|
|
@ -72,6 +72,7 @@ public class TimelineClientImpl extends TimelineClient {
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(TimelineClientImpl.class);
|
||||
private static final String RESOURCE_URI_STR = "/ws/v1/timeline/";
|
||||
private static final String URL_PARAM_USER_NAME = "user.name";
|
||||
private static final Joiner JOINER = Joiner.on("");
|
||||
private static Options opts;
|
||||
static {
|
||||
|
@ -84,17 +85,18 @@ public class TimelineClientImpl extends TimelineClient {
|
|||
private Client client;
|
||||
private URI resURI;
|
||||
private boolean isEnabled;
|
||||
private TimelineAuthenticatedURLConnectionFactory urlFactory;
|
||||
private KerberosAuthenticatedURLConnectionFactory urlFactory;
|
||||
|
||||
public TimelineClientImpl() {
|
||||
super(TimelineClientImpl.class.getName());
|
||||
ClientConfig cc = new DefaultClientConfig();
|
||||
cc.getClasses().add(YarnJacksonJaxbJsonProvider.class);
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
urlFactory = new TimelineAuthenticatedURLConnectionFactory();
|
||||
urlFactory = new KerberosAuthenticatedURLConnectionFactory();
|
||||
client = new Client(new URLConnectionClientHandler(urlFactory), cc);
|
||||
} else {
|
||||
client = Client.create(cc);
|
||||
client = new Client(new URLConnectionClientHandler(
|
||||
new PseudoAuthenticatedURLConnectionFactory()), cc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -177,7 +179,23 @@ public class TimelineClientImpl extends TimelineClient {
|
|||
.post(ClientResponse.class, entities);
|
||||
}
|
||||
|
||||
private static class TimelineAuthenticatedURLConnectionFactory
|
||||
private static class PseudoAuthenticatedURLConnectionFactory
|
||||
implements HttpURLConnectionFactory {
|
||||
|
||||
@Override
|
||||
public HttpURLConnection getHttpURLConnection(URL url) throws IOException {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
params.put(URL_PARAM_USER_NAME,
|
||||
UserGroupInformation.getCurrentUser().getShortUserName());
|
||||
url = TimelineAuthenticator.appendParams(url, params);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("URL with delegation token: " + url);
|
||||
}
|
||||
return (HttpURLConnection) url.openConnection();
|
||||
}
|
||||
|
||||
}
|
||||
private static class KerberosAuthenticatedURLConnectionFactory
|
||||
implements HttpURLConnectionFactory {
|
||||
|
||||
private AuthenticatedURL.Token token;
|
||||
|
@ -185,7 +203,7 @@ public class TimelineClientImpl extends TimelineClient {
|
|||
private Token<TimelineDelegationTokenIdentifier> dToken;
|
||||
private Text service;
|
||||
|
||||
public TimelineAuthenticatedURLConnectionFactory() {
|
||||
public KerberosAuthenticatedURLConnectionFactory() {
|
||||
token = new AuthenticatedURL.Token();
|
||||
authenticator = new TimelineAuthenticator();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.webapp;
|
||||
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.Response.Status;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
|
||||
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
|
||||
public class ForbiddenException extends WebApplicationException {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public ForbiddenException() {
|
||||
super(Status.FORBIDDEN);
|
||||
}
|
||||
|
||||
public ForbiddenException(java.lang.Throwable cause) {
|
||||
super(cause, Status.FORBIDDEN);
|
||||
}
|
||||
|
||||
public ForbiddenException(String msg) {
|
||||
super(new Exception(msg), Status.FORBIDDEN);
|
||||
}
|
||||
}
|
|
@ -81,6 +81,8 @@ public class GenericExceptionHandler implements ExceptionMapper<Exception> {
|
|||
s = Response.Status.NOT_FOUND;
|
||||
} else if (e instanceof IOException) {
|
||||
s = Response.Status.NOT_FOUND;
|
||||
} else if (e instanceof ForbiddenException) {
|
||||
s = Response.Status.FORBIDDEN;
|
||||
} else if (e instanceof UnsupportedOperationException) {
|
||||
s = Response.Status.BAD_REQUEST;
|
||||
} else if (e instanceof IllegalArgumentException) {
|
||||
|
|
|
@ -1217,6 +1217,24 @@
|
|||
<value>10</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.http-authentication.type</name>
|
||||
<value>simple</value>
|
||||
<description>
|
||||
Defines authentication used for the timeline server HTTP endpoint.
|
||||
Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
|
||||
<value>true</value>
|
||||
<description>
|
||||
Indicates if anonymous requests are allowed by the timeline server when using
|
||||
'simple' authentication.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<description>The Kerberos principal for the timeline server.</description>
|
||||
<name>yarn.timeline-service.principal</name>
|
||||
|
|
|
@ -28,6 +28,7 @@ public class DrainDispatcher extends AsyncDispatcher {
|
|||
// and similar grotesqueries
|
||||
private volatile boolean drained = false;
|
||||
private final BlockingQueue<Event> queue;
|
||||
final Object mutex;
|
||||
|
||||
public DrainDispatcher() {
|
||||
this(new LinkedBlockingQueue<Event>());
|
||||
|
@ -36,6 +37,7 @@ public class DrainDispatcher extends AsyncDispatcher {
|
|||
private DrainDispatcher(BlockingQueue<Event> eventQueue) {
|
||||
super(eventQueue);
|
||||
this.queue = eventQueue;
|
||||
this.mutex = this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -53,8 +55,10 @@ public class DrainDispatcher extends AsyncDispatcher {
|
|||
@Override
|
||||
public void run() {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
// !drained if dispatch queued new events on this dispatcher
|
||||
drained = queue.isEmpty();
|
||||
synchronized (mutex) {
|
||||
// !drained if dispatch queued new events on this dispatcher
|
||||
drained = queue.isEmpty();
|
||||
}
|
||||
Event event;
|
||||
try {
|
||||
event = queue.take();
|
||||
|
@ -75,8 +79,10 @@ public class DrainDispatcher extends AsyncDispatcher {
|
|||
return new EventHandler() {
|
||||
@Override
|
||||
public void handle(Event event) {
|
||||
drained = false;
|
||||
actual.handle(event);
|
||||
synchronized (mutex) {
|
||||
actual.handle(event);
|
||||
drained = false;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.service.CompositeService;
|
||||
import org.apache.hadoop.service.Service;
|
||||
import org.apache.hadoop.util.ExitUtil;
|
||||
|
@ -178,23 +177,20 @@ public class ApplicationHistoryServer extends CompositeService {
|
|||
|
||||
protected void startWebApp() {
|
||||
Configuration conf = getConfig();
|
||||
// Play trick to make the customized filter will only be loaded by the
|
||||
// timeline server when security is enabled and Kerberos authentication
|
||||
// is used.
|
||||
if (UserGroupInformation.isSecurityEnabled()
|
||||
&& conf
|
||||
.get(TimelineAuthenticationFilterInitializer.PREFIX + "type", "")
|
||||
.equals("kerberos")) {
|
||||
String initializers = conf.get("hadoop.http.filter.initializers");
|
||||
initializers =
|
||||
initializers == null || initializers.length() == 0 ? "" : ","
|
||||
+ initializers;
|
||||
if (!initializers.contains(
|
||||
TimelineAuthenticationFilterInitializer.class.getName())) {
|
||||
conf.set("hadoop.http.filter.initializers",
|
||||
TimelineAuthenticationFilterInitializer.class.getName()
|
||||
+ initializers);
|
||||
}
|
||||
// Always load pseudo authentication filter to parse "user.name" in an URL
|
||||
// to identify a HTTP request's user in insecure mode.
|
||||
// When Kerberos authentication type is set (i.e., secure mode is turned on),
|
||||
// the customized filter will be loaded by the timeline server to do Kerberos
|
||||
// + DT authentication.
|
||||
String initializers = conf.get("hadoop.http.filter.initializers");
|
||||
initializers =
|
||||
initializers == null || initializers.length() == 0 ? "" : ","
|
||||
+ initializers;
|
||||
if (!initializers.contains(
|
||||
TimelineAuthenticationFilterInitializer.class.getName())) {
|
||||
conf.set("hadoop.http.filter.initializers",
|
||||
TimelineAuthenticationFilterInitializer.class.getName()
|
||||
+ initializers);
|
||||
}
|
||||
String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(conf);
|
||||
LOG.info("Instantiating AHSWebApp at " + bindAddress);
|
||||
|
|
|
@ -51,7 +51,8 @@ public class TimelineACLsManager {
|
|||
public boolean checkAccess(UserGroupInformation callerUGI,
|
||||
TimelineEntity entity) throws YarnException, IOException {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Verifying the access of " + callerUGI.getShortUserName()
|
||||
LOG.debug("Verifying the access of "
|
||||
+ (callerUGI == null ? null : callerUGI.getShortUserName())
|
||||
+ " on the timeline entity "
|
||||
+ new EntityIdentifier(entity.getEntityId(), entity.getEntityType()));
|
||||
}
|
||||
|
|
|
@ -38,7 +38,8 @@ public class TimelineAuthenticationFilter extends AuthenticationFilter {
|
|||
// to replace the name here to use the customized Kerberos + DT service
|
||||
// instead of the standard Kerberos handler.
|
||||
Properties properties = super.getConfiguration(configPrefix, filterConfig);
|
||||
if (properties.getProperty(AUTH_TYPE).equals("kerberos")) {
|
||||
String authType = properties.getProperty(AUTH_TYPE);
|
||||
if (authType != null && authType.equals("kerberos")) {
|
||||
properties.setProperty(
|
||||
AUTH_TYPE, TimelineClientAuthenticationService.class.getName());
|
||||
}
|
||||
|
|
|
@ -47,9 +47,9 @@ import org.apache.hadoop.security.SecurityUtil;
|
|||
public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
|
||||
|
||||
/**
|
||||
* The configuration prefix of timeline Kerberos + DT authentication
|
||||
* The configuration prefix of timeline HTTP authentication
|
||||
*/
|
||||
public static final String PREFIX = "yarn.timeline-service.http.authentication.";
|
||||
public static final String PREFIX = "yarn.timeline-service.http-authentication.";
|
||||
|
||||
private static final String SIGNATURE_SECRET_FILE =
|
||||
TimelineAuthenticationFilter.SIGNATURE_SECRET + ".file";
|
||||
|
|
|
@ -62,11 +62,12 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
|
|||
import org.apache.hadoop.yarn.server.timeline.EntityIdentifier;
|
||||
import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
|
||||
import org.apache.hadoop.yarn.server.timeline.NameValuePair;
|
||||
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
|
||||
import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field;
|
||||
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
|
||||
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
|
||||
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
|
||||
import org.apache.hadoop.yarn.webapp.BadRequestException;
|
||||
import org.apache.hadoop.yarn.webapp.ForbiddenException;
|
||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
|
@ -336,6 +337,11 @@ public class TimelineWebServices {
|
|||
return new TimelinePutResponse();
|
||||
}
|
||||
UserGroupInformation callerUGI = getUser(req);
|
||||
if (callerUGI == null) {
|
||||
String msg = "The owner of the posted timeline entities is not set";
|
||||
LOG.error(msg);
|
||||
throw new ForbiddenException(msg);
|
||||
}
|
||||
try {
|
||||
List<EntityIdentifier> entityIDs = new ArrayList<EntityIdentifier>();
|
||||
TimelineEntities entitiesToPut = new TimelineEntities();
|
||||
|
@ -375,8 +381,7 @@ public class TimelineWebServices {
|
|||
// the timeline data.
|
||||
try {
|
||||
if (existingEntity == null) {
|
||||
injectOwnerInfo(entity,
|
||||
callerUGI == null ? "" : callerUGI.getShortUserName());
|
||||
injectOwnerInfo(entity, callerUGI.getShortUserName());
|
||||
}
|
||||
} catch (YarnException e) {
|
||||
// Skip the entity which messes up the primary filter and record the
|
||||
|
|
|
@ -198,7 +198,7 @@ public class TestMemoryApplicationHistoryStore extends
|
|||
writeContainerFinishData(containerId);
|
||||
}
|
||||
long usedMemoryAfter = (runtime.totalMemory() - runtime.freeMemory()) / mb;
|
||||
Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 200);
|
||||
Assert.assertTrue((usedMemoryAfter - usedMemoryBefore) < 400);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,26 +19,26 @@
|
|||
package org.apache.hadoop.yarn.server.timeline.webapp;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.spy;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.inject.Singleton;
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.ServletRequest;
|
||||
import javax.servlet.ServletResponse;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletRequestWrapper;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
|
||||
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
|
||||
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
|
||||
import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
|
||||
|
@ -46,12 +46,11 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEvents;
|
|||
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
|
||||
import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.security.AdminACLsManager;
|
||||
import org.apache.hadoop.yarn.server.timeline.TestMemoryTimelineStore;
|
||||
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
|
||||
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
|
||||
import org.apache.hadoop.yarn.server.timeline.webapp.TimelineWebServices.AboutInfo;
|
||||
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
|
||||
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
|
||||
import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
|
||||
import org.junit.Assert;
|
||||
|
@ -74,11 +73,11 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
private static TimelineStore store;
|
||||
private static TimelineACLsManager timelineACLsManager;
|
||||
private static AdminACLsManager adminACLsManager;
|
||||
private static String remoteUser;
|
||||
private long beforeTime;
|
||||
|
||||
private Injector injector = Guice.createInjector(new ServletModule() {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
protected void configureServlets() {
|
||||
bind(YarnJacksonJaxbJsonProvider.class);
|
||||
|
@ -98,7 +97,35 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
adminACLsManager = new AdminACLsManager(conf);
|
||||
bind(TimelineACLsManager.class).toInstance(timelineACLsManager);
|
||||
serve("/*").with(GuiceContainer.class);
|
||||
filter("/*").through(TestFilter.class);
|
||||
TimelineAuthenticationFilter taFilter = new TimelineAuthenticationFilter();
|
||||
FilterConfig filterConfig = mock(FilterConfig.class);
|
||||
when(filterConfig.getInitParameter(AuthenticationFilter.CONFIG_PREFIX))
|
||||
.thenReturn(null);
|
||||
when(filterConfig.getInitParameter(AuthenticationFilter.AUTH_TYPE))
|
||||
.thenReturn("simple");
|
||||
when(filterConfig.getInitParameter(
|
||||
PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)).thenReturn("true");
|
||||
Enumeration<Object> names = mock(Enumeration.class);
|
||||
when(names.hasMoreElements()).thenReturn(true, true, false);
|
||||
when(names.nextElement()).thenReturn(
|
||||
AuthenticationFilter.AUTH_TYPE,
|
||||
PseudoAuthenticationHandler.ANONYMOUS_ALLOWED);
|
||||
when(filterConfig.getInitParameterNames()).thenReturn(names);
|
||||
try {
|
||||
taFilter.init(filterConfig);
|
||||
} catch (ServletException e) {
|
||||
Assert.fail("Unable to initialize TimelineAuthenticationFilter: " +
|
||||
e.getMessage());
|
||||
}
|
||||
|
||||
taFilter = spy(taFilter);
|
||||
try {
|
||||
doNothing().when(taFilter).init(any(FilterConfig.class));
|
||||
} catch (ServletException e) {
|
||||
Assert.fail("Unable to initialize TimelineAuthenticationFilter: " +
|
||||
e.getMessage());
|
||||
}
|
||||
filter("/*").through(taFilter);
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -382,6 +409,7 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
entities.addEntity(entity);
|
||||
WebResource r = resource();
|
||||
ClientResponse response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
|
@ -401,11 +429,21 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
entity.setStartTime(System.currentTimeMillis());
|
||||
entities.addEntity(entity);
|
||||
WebResource r = resource();
|
||||
// No owner, will be rejected
|
||||
ClientResponse response = r.path("ws").path("v1").path("timeline")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||
assertEquals(ClientResponse.Status.FORBIDDEN,
|
||||
response.getClientResponseStatus());
|
||||
|
||||
response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||
TimelinePutResponse putResposne = response.getEntity(TimelinePutResponse.class);
|
||||
Assert.assertNotNull(putResposne);
|
||||
Assert.assertEquals(0, putResposne.getErrors().size());
|
||||
|
@ -425,7 +463,6 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
|
||||
AdminACLsManager oldAdminACLsManager =
|
||||
timelineACLsManager.setAdminACLsManager(adminACLsManager);
|
||||
remoteUser = "tester";
|
||||
try {
|
||||
TimelineEntities entities = new TimelineEntities();
|
||||
TimelineEntity entity = new TimelineEntity();
|
||||
|
@ -435,6 +472,7 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
entities.addEntity(entity);
|
||||
WebResource r = resource();
|
||||
ClientResponse response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
|
@ -444,8 +482,8 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
Assert.assertEquals(0, putResponse.getErrors().size());
|
||||
|
||||
// override/append timeline data in the same entity with different user
|
||||
remoteUser = "other";
|
||||
response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "other")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
|
@ -457,7 +495,6 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
putResponse.getErrors().get(0).getErrorCode());
|
||||
} finally {
|
||||
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
|
||||
remoteUser = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -465,7 +502,6 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
public void testGetEntityWithYarnACLsEnabled() throws Exception {
|
||||
AdminACLsManager oldAdminACLsManager =
|
||||
timelineACLsManager.setAdminACLsManager(adminACLsManager);
|
||||
remoteUser = "tester";
|
||||
try {
|
||||
TimelineEntities entities = new TimelineEntities();
|
||||
TimelineEntity entity = new TimelineEntity();
|
||||
|
@ -475,6 +511,7 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
entities.addEntity(entity);
|
||||
WebResource r = resource();
|
||||
ClientResponse response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
|
@ -482,6 +519,7 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
// 1. No field specification
|
||||
response = r.path("ws").path("v1").path("timeline")
|
||||
.path("test type 3").path("test id 3")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.get(ClientResponse.class);
|
||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||
|
@ -492,6 +530,7 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
response = r.path("ws").path("v1").path("timeline")
|
||||
.path("test type 3").path("test id 3")
|
||||
.queryParam("fields", "relatedentities")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.get(ClientResponse.class);
|
||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||
|
@ -502,6 +541,7 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
response = r.path("ws").path("v1").path("timeline")
|
||||
.path("test type 3").path("test id 3")
|
||||
.queryParam("fields", "primaryfilters")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.get(ClientResponse.class);
|
||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||
|
@ -510,9 +550,9 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
|
||||
|
||||
// get entity with other user
|
||||
remoteUser = "other";
|
||||
response = r.path("ws").path("v1").path("timeline")
|
||||
.path("test type 3").path("test id 3")
|
||||
.queryParam("user.name", "other")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.get(ClientResponse.class);
|
||||
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
|
||||
|
@ -520,7 +560,6 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
response.getClientResponseStatus());
|
||||
} finally {
|
||||
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
|
||||
remoteUser = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -528,7 +567,6 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
public void testGetEntitiesWithYarnACLsEnabled() {
|
||||
AdminACLsManager oldAdminACLsManager =
|
||||
timelineACLsManager.setAdminACLsManager(adminACLsManager);
|
||||
remoteUser = "tester";
|
||||
try {
|
||||
TimelineEntities entities = new TimelineEntities();
|
||||
TimelineEntity entity = new TimelineEntity();
|
||||
|
@ -538,11 +576,11 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
entities.addEntity(entity);
|
||||
WebResource r = resource();
|
||||
ClientResponse response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
|
||||
remoteUser = "other";
|
||||
entities = new TimelineEntities();
|
||||
entity = new TimelineEntity();
|
||||
entity.setEntityId("test id 5");
|
||||
|
@ -551,11 +589,13 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
entities.addEntity(entity);
|
||||
r = resource();
|
||||
response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "other")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
|
||||
response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "other")
|
||||
.path("test type 4")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.get(ClientResponse.class);
|
||||
|
@ -566,7 +606,6 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
assertEquals("test id 5", entities.getEntities().get(0).getEntityId());
|
||||
} finally {
|
||||
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
|
||||
remoteUser = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -574,7 +613,6 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
public void testGetEventsWithYarnACLsEnabled() {
|
||||
AdminACLsManager oldAdminACLsManager =
|
||||
timelineACLsManager.setAdminACLsManager(adminACLsManager);
|
||||
remoteUser = "tester";
|
||||
try {
|
||||
TimelineEntities entities = new TimelineEntities();
|
||||
TimelineEntity entity = new TimelineEntity();
|
||||
|
@ -588,11 +626,11 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
entities.addEntity(entity);
|
||||
WebResource r = resource();
|
||||
ClientResponse response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "tester")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
|
||||
remoteUser = "other";
|
||||
entities = new TimelineEntities();
|
||||
entity = new TimelineEntity();
|
||||
entity.setEntityId("test id 6");
|
||||
|
@ -605,12 +643,14 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
entities.addEntity(entity);
|
||||
r = resource();
|
||||
response = r.path("ws").path("v1").path("timeline")
|
||||
.queryParam("user.name", "other")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.type(MediaType.APPLICATION_JSON)
|
||||
.post(ClientResponse.class, entities);
|
||||
|
||||
response = r.path("ws").path("v1").path("timeline")
|
||||
.path("test type 5").path("events")
|
||||
.queryParam("user.name", "other")
|
||||
.queryParam("entityId", "test id 5,test id 6")
|
||||
.accept(MediaType.APPLICATION_JSON)
|
||||
.get(ClientResponse.class);
|
||||
|
@ -620,43 +660,7 @@ public class TestTimelineWebServices extends JerseyTest {
|
|||
assertEquals("test id 6", events.getAllEvents().get(0).getEntityId());
|
||||
} finally {
|
||||
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
|
||||
remoteUser = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Singleton
|
||||
private static class TestFilter implements Filter {
|
||||
|
||||
@Override
|
||||
public void init(FilterConfig filterConfig) throws ServletException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doFilter(ServletRequest request, ServletResponse response,
|
||||
FilterChain chain) throws IOException, ServletException {
|
||||
if (request instanceof HttpServletRequest) {
|
||||
request =
|
||||
new TestHttpServletRequestWrapper((HttpServletRequest) request);
|
||||
}
|
||||
chain.doFilter(request, response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class TestHttpServletRequestWrapper extends HttpServletRequestWrapper {
|
||||
|
||||
public TestHttpServletRequestWrapper(HttpServletRequest request) {
|
||||
super(request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRemoteUser() {
|
||||
return TestTimelineWebServices.remoteUser;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ public class BaseNMTokenSecretManager extends
|
|||
private static Log LOG = LogFactory
|
||||
.getLog(BaseNMTokenSecretManager.class);
|
||||
|
||||
private int serialNo = new SecureRandom().nextInt();
|
||||
protected int serialNo = new SecureRandom().nextInt();
|
||||
|
||||
protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
|
||||
protected final Lock readLock = readWriteLock.readLock();
|
||||
|
|
|
@ -169,6 +169,15 @@ public class NodeManager extends CompositeService
|
|||
}
|
||||
}
|
||||
|
||||
private void recoverTokens(NMTokenSecretManagerInNM nmTokenSecretManager,
|
||||
NMContainerTokenSecretManager containerTokenSecretManager)
|
||||
throws IOException {
|
||||
if (nmStore.canRecover()) {
|
||||
nmTokenSecretManager.recover(nmStore.loadNMTokenState());
|
||||
// TODO: recover containerTokenSecretManager
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void serviceInit(Configuration conf) throws Exception {
|
||||
|
||||
|
@ -184,7 +193,9 @@ public class NodeManager extends CompositeService
|
|||
new NMContainerTokenSecretManager(conf);
|
||||
|
||||
NMTokenSecretManagerInNM nmTokenSecretManager =
|
||||
new NMTokenSecretManagerInNM();
|
||||
new NMTokenSecretManagerInNM(nmStore);
|
||||
|
||||
recoverTokens(nmTokenSecretManager, containerTokenSecretManager);
|
||||
|
||||
this.aclsManager = new ApplicationACLsManager(conf);
|
||||
|
||||
|
|
|
@ -35,11 +35,15 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
|
||||
import org.apache.hadoop.yarn.server.api.records.MasterKey;
|
||||
import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
|
||||
import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.fusesource.leveldbjni.JniDBFactory;
|
||||
|
@ -72,6 +76,14 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
|
|||
private static final String LOCALIZATION_FILECACHE_SUFFIX = "filecache/";
|
||||
private static final String LOCALIZATION_APPCACHE_SUFFIX = "appcache/";
|
||||
|
||||
private static final String CURRENT_MASTER_KEY_SUFFIX = "CurrentMasterKey";
|
||||
private static final String PREV_MASTER_KEY_SUFFIX = "PreviousMasterKey";
|
||||
private static final String NM_TOKENS_KEY_PREFIX = "NMTokens/";
|
||||
private static final String NM_TOKENS_CURRENT_MASTER_KEY =
|
||||
NM_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX;
|
||||
private static final String NM_TOKENS_PREV_MASTER_KEY =
|
||||
NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
|
||||
|
||||
private DB db;
|
||||
|
||||
public NMLeveldbStateStoreService() {
|
||||
|
@ -367,6 +379,93 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
|
|||
}
|
||||
|
||||
|
||||
@Override
|
||||
public RecoveredNMTokenState loadNMTokenState() throws IOException {
|
||||
RecoveredNMTokenState state = new RecoveredNMTokenState();
|
||||
state.applicationMasterKeys =
|
||||
new HashMap<ApplicationAttemptId, MasterKey>();
|
||||
LeveldbIterator iter = null;
|
||||
try {
|
||||
iter = new LeveldbIterator(db);
|
||||
iter.seek(bytes(NM_TOKENS_KEY_PREFIX));
|
||||
while (iter.hasNext()) {
|
||||
Entry<byte[], byte[]> entry = iter.next();
|
||||
String fullKey = asString(entry.getKey());
|
||||
if (!fullKey.startsWith(NM_TOKENS_KEY_PREFIX)) {
|
||||
break;
|
||||
}
|
||||
String key = fullKey.substring(NM_TOKENS_KEY_PREFIX.length());
|
||||
if (key.equals(CURRENT_MASTER_KEY_SUFFIX)) {
|
||||
state.currentMasterKey = parseMasterKey(entry.getValue());
|
||||
} else if (key.equals(PREV_MASTER_KEY_SUFFIX)) {
|
||||
state.previousMasterKey = parseMasterKey(entry.getValue());
|
||||
} else if (key.startsWith(
|
||||
ApplicationAttemptId.appAttemptIdStrPrefix)) {
|
||||
ApplicationAttemptId attempt;
|
||||
try {
|
||||
attempt = ConverterUtils.toApplicationAttemptId(key);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IOException("Bad application master key state for "
|
||||
+ fullKey, e);
|
||||
}
|
||||
state.applicationMasterKeys.put(attempt,
|
||||
parseMasterKey(entry.getValue()));
|
||||
}
|
||||
}
|
||||
} catch (DBException e) {
|
||||
throw new IOException(e.getMessage(), e);
|
||||
} finally {
|
||||
if (iter != null) {
|
||||
iter.close();
|
||||
}
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void storeNMTokenCurrentMasterKey(MasterKey key)
|
||||
throws IOException {
|
||||
storeMasterKey(NM_TOKENS_CURRENT_MASTER_KEY, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void storeNMTokenPreviousMasterKey(MasterKey key)
|
||||
throws IOException {
|
||||
storeMasterKey(NM_TOKENS_PREV_MASTER_KEY, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void storeNMTokenApplicationMasterKey(
|
||||
ApplicationAttemptId attempt, MasterKey key) throws IOException {
|
||||
storeMasterKey(NM_TOKENS_KEY_PREFIX + attempt, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNMTokenApplicationMasterKey(
|
||||
ApplicationAttemptId attempt) throws IOException {
|
||||
String key = NM_TOKENS_KEY_PREFIX + attempt;
|
||||
try {
|
||||
db.delete(bytes(key));
|
||||
} catch (DBException e) {
|
||||
throw new IOException(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private MasterKey parseMasterKey(byte[] keyData) throws IOException {
|
||||
return new MasterKeyPBImpl(MasterKeyProto.parseFrom(keyData));
|
||||
}
|
||||
|
||||
private void storeMasterKey(String dbKey, MasterKey key)
|
||||
throws IOException {
|
||||
MasterKeyPBImpl pb = (MasterKeyPBImpl) key;
|
||||
try {
|
||||
db.put(bytes(dbKey), pb.getProto().toByteArray());
|
||||
} catch (DBException e) {
|
||||
throw new IOException(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void initStorage(Configuration conf)
|
||||
throws IOException {
|
||||
|
|
|
@ -22,10 +22,12 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
|
||||
import org.apache.hadoop.yarn.server.api.records.MasterKey;
|
||||
|
||||
// The state store to use when state isn't being stored
|
||||
public class NMNullStateStoreService extends NMStateStoreService {
|
||||
|
@ -77,6 +79,32 @@ public class NMNullStateStoreService extends NMStateStoreService {
|
|||
public void removeDeletionTask(int taskId) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public RecoveredNMTokenState loadNMTokenState() throws IOException {
|
||||
throw new UnsupportedOperationException(
|
||||
"Recovery not supported by this state store");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void storeNMTokenCurrentMasterKey(MasterKey key)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void storeNMTokenPreviousMasterKey(MasterKey key)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void storeNMTokenApplicationMasterKey(ApplicationAttemptId attempt,
|
||||
MasterKey key) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNMTokenApplicationMasterKey(ApplicationAttemptId attempt)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void initStorage(Configuration conf) throws IOException {
|
||||
}
|
||||
|
|
|
@ -29,10 +29,12 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.service.AbstractService;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
|
||||
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
|
||||
import org.apache.hadoop.yarn.server.api.records.MasterKey;
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
|
@ -100,6 +102,24 @@ public abstract class NMStateStoreService extends AbstractService {
|
|||
}
|
||||
}
|
||||
|
||||
public static class RecoveredNMTokenState {
|
||||
MasterKey currentMasterKey;
|
||||
MasterKey previousMasterKey;
|
||||
Map<ApplicationAttemptId, MasterKey> applicationMasterKeys;
|
||||
|
||||
public MasterKey getCurrentMasterKey() {
|
||||
return currentMasterKey;
|
||||
}
|
||||
|
||||
public MasterKey getPreviousMasterKey() {
|
||||
return previousMasterKey;
|
||||
}
|
||||
|
||||
public Map<ApplicationAttemptId, MasterKey> getApplicationMasterKeys() {
|
||||
return applicationMasterKeys;
|
||||
}
|
||||
}
|
||||
|
||||
/** Initialize the state storage */
|
||||
@Override
|
||||
public void serviceInit(Configuration conf) throws IOException {
|
||||
|
@ -173,6 +193,21 @@ public abstract class NMStateStoreService extends AbstractService {
|
|||
public abstract void removeDeletionTask(int taskId) throws IOException;
|
||||
|
||||
|
||||
public abstract RecoveredNMTokenState loadNMTokenState() throws IOException;
|
||||
|
||||
public abstract void storeNMTokenCurrentMasterKey(MasterKey key)
|
||||
throws IOException;
|
||||
|
||||
public abstract void storeNMTokenPreviousMasterKey(MasterKey key)
|
||||
throws IOException;
|
||||
|
||||
public abstract void storeNMTokenApplicationMasterKey(
|
||||
ApplicationAttemptId attempt, MasterKey key) throws IOException;
|
||||
|
||||
public abstract void removeNMTokenApplicationMasterKey(
|
||||
ApplicationAttemptId attempt) throws IOException;
|
||||
|
||||
|
||||
protected abstract void initStorage(Configuration conf) throws IOException;
|
||||
|
||||
protected abstract void startStorage() throws IOException;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue