Merge all changes from trunk to branch HDFS-2832.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1517887 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-08-27 17:05:13 +00:00
commit 8a9db6782e
57 changed files with 2119 additions and 437 deletions

View File

@ -189,4 +189,16 @@ public class Nfs3Constant {
public final static int CREATE_UNCHECKED = 0;
public final static int CREATE_GUARDED = 1;
public final static int CREATE_EXCLUSIVE = 2;
public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
/** Allowed hosts for nfs exports */
public static final String EXPORTS_ALLOWED_HOSTS_KEY = "hdfs.nfs.exports.allowed.hosts";
public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
/** Size for nfs exports cache */
public static final String EXPORTS_CACHE_SIZE_KEY = "hdfs.nfs.exports.cache.size";
public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512;
/** Expiration time for nfs exports cache entry */
public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "hdfs.nfs.exports.cache.expirytime.millis";
public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
}

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.nfs.nfs3;
import java.net.InetAddress;
import org.apache.hadoop.nfs.nfs3.response.NFS3Response;
import org.apache.hadoop.oncrpc.RpcAuthSys;
import org.apache.hadoop.oncrpc.XDR;
@ -31,53 +33,54 @@ public interface Nfs3Interface {
public NFS3Response nullProcedure();
/** GETATTR: Get file attributes */
public NFS3Response getattr(XDR xdr, RpcAuthSys authSys);
public NFS3Response getattr(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** SETATTR: Set file attributes */
public NFS3Response setattr(XDR xdr, RpcAuthSys authSys);
public NFS3Response setattr(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** LOOKUP: Lookup filename */
public NFS3Response lookup(XDR xdr, RpcAuthSys authSys);
public NFS3Response lookup(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** ACCESS: Check access permission */
public NFS3Response access(XDR xdr, RpcAuthSys authSys);
public NFS3Response access(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** READ: Read from file */
public NFS3Response read(XDR xdr, RpcAuthSys authSys);
public NFS3Response read(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** WRITE: Write to file */
public NFS3Response write(XDR xdr, Channel channel, int xid, RpcAuthSys authSys);
public NFS3Response write(XDR xdr, Channel channel, int xid,
RpcAuthSys authSys, InetAddress client);
/** CREATE: Create a file */
public NFS3Response create(XDR xdr, RpcAuthSys authSys);
public NFS3Response create(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** MKDIR: Create a directory */
public NFS3Response mkdir(XDR xdr, RpcAuthSys authSys);
public NFS3Response mkdir(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** REMOVE: Remove a file */
public NFS3Response remove(XDR xdr, RpcAuthSys authSys);
public NFS3Response remove(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** RMDIR: Remove a directory */
public NFS3Response rmdir(XDR xdr, RpcAuthSys authSys);
public NFS3Response rmdir(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** RENAME: Rename a file or directory */
public NFS3Response rename(XDR xdr, RpcAuthSys authSys);
public NFS3Response rename(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** SYMLINK: Create a symbolic link */
public NFS3Response symlink(XDR xdr, RpcAuthSys authSys);
public NFS3Response symlink(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** READDIR: Read From directory */
public NFS3Response readdir(XDR xdr, RpcAuthSys authSys);
public NFS3Response readdir(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** FSSTAT: Get dynamic file system information */
public NFS3Response fsstat(XDR xdr, RpcAuthSys authSys);
public NFS3Response fsstat(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** FSINFO: Get static file system information */
public NFS3Response fsinfo(XDR xdr, RpcAuthSys authSys);
public NFS3Response fsinfo(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** PATHCONF: Retrieve POSIX information */
public NFS3Response pathconf(XDR xdr, RpcAuthSys authSys);
public NFS3Response pathconf(XDR xdr, RpcAuthSys authSys, InetAddress client);
/** COMMIT: Commit cached data on a server to stable storage */
public NFS3Response commit(XDR xdr, RpcAuthSys authSys);
public NFS3Response commit(XDR xdr, RpcAuthSys authSys, InetAddress client);
}

View File

@ -27,6 +27,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege;
import org.apache.hadoop.hdfs.nfs.security.NfsExports;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.mount.MountEntry;
@ -59,6 +61,8 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
/** List that is unmodifiable */
private final List<String> exports;
private final NfsExports hostsMatcher;
public RpcProgramMountd() throws IOException {
this(new ArrayList<String>(0));
@ -72,19 +76,29 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
throws IOException {
// Note that RPC cache is not enabled
super("mountd", "localhost", PORT, PROGRAM, VERSION_1, VERSION_3, 0);
this.hostsMatcher = NfsExports.getInstance(config);
this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
this.exports = Collections.unmodifiableList(exports);
this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
@Override
public XDR nullOp(XDR out, int xid, InetAddress client) {
if (LOG.isDebugEnabled()) {
LOG.debug("MOUNT NULLOP : " + " client: " + client);
}
return RpcAcceptedReply.voidReply(out, xid);
return RpcAcceptedReply.voidReply(out, xid);
}
@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
if (accessPrivilege == AccessPrivilege.NONE) {
return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
null);
}
String path = xdr.readString();
if (LOG.isDebugEnabled()) {
LOG.debug("MOUNT MNT path: " + path + " client: " + client);
@ -121,6 +135,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
return out;
}
@Override
public XDR dump(XDR out, int xid, InetAddress client) {
if (LOG.isDebugEnabled()) {
LOG.debug("MOUNT NULLOP : " + " client: " + client);
@ -131,6 +146,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
return out;
}
@Override
public XDR umnt(XDR xdr, XDR out, int xid, InetAddress client) {
String path = xdr.readString();
if (LOG.isDebugEnabled()) {
@ -143,6 +159,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
return out;
}
@Override
public XDR umntall(XDR out, int xid, InetAddress client) {
if (LOG.isDebugEnabled()) {
LOG.debug("MOUNT UMNTALL : " + " client: " + client);

View File

@ -32,12 +32,17 @@ import org.apache.hadoop.util.StringUtils;
* Only TCP server is supported and UDP is not supported.
*/
public class Nfs3 extends Nfs3Base {
static {
Configuration.addDefaultResource("hdfs-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
}
public Nfs3(List<String> exports) throws IOException {
super(new Mountd(exports), new RpcProgramNfs3(exports));
super(new Mountd(exports), new RpcProgramNfs3());
}
public Nfs3(List<String> exports, Configuration config) throws IOException {
super(new Mountd(exports, config), new RpcProgramNfs3(exports, config));
super(new Mountd(exports, config), new RpcProgramNfs3(config));
}
public static void main(String[] args) throws IOException {

View File

@ -88,6 +88,7 @@ public class Nfs3Utils {
return new WccAttr(attr.getSize(), attr.getMtime(), attr.getCtime());
}
// TODO: maybe not efficient
public static WccData createWccData(final WccAttr preOpAttr,
DFSClient dfsClient, final String fileIdPath, final IdUserGroup iug)
throws IOException {

View File

@ -22,22 +22,23 @@ import java.io.IOException;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege;
import org.apache.hadoop.hdfs.nfs.security.NfsExports;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -125,6 +126,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
private final IdUserGroup iug;// = new IdUserGroup();
private final DFSClientCache clientCache;
private final NfsExports exports;
/**
* superUserClient should always impersonate HDFS file system owner to send
* requests which requires supergroup privilege. This requires the same user
@ -138,17 +141,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
private Statistics statistics;
private String writeDumpDir; // The dir save dump files
public RpcProgramNfs3(List<String> exports) throws IOException {
this(exports, new Configuration());
public RpcProgramNfs3() throws IOException {
this(new Configuration());
}
public RpcProgramNfs3(List<String> exports, Configuration config)
public RpcProgramNfs3(Configuration config)
throws IOException {
super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM,
Nfs3Constant.VERSION, Nfs3Constant.VERSION, 100);
config.set(FsPermission.UMASK_LABEL, "000");
iug = new IdUserGroup();
exports = NfsExports.getInstance(config);
writeManager = new WriteManager(iug, config);
clientCache = new DFSClientCache(config);
superUserClient = new DFSClient(NameNode.getAddress(config), config);
@ -185,7 +190,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
/******************************************************
* RPC call handlers
******************************************************/
@Override
public NFS3Response nullProcedure() {
if (LOG.isDebugEnabled()) {
LOG.debug("NFS NULL");
@ -193,8 +199,16 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
return new VoidResponse(Nfs3Status.NFS3_OK);
}
public GETATTR3Response getattr(XDR xdr, RpcAuthSys authSys) {
@Override
public GETATTR3Response getattr(XDR xdr, RpcAuthSys authSys,
InetAddress client) {
GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -267,7 +281,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public SETATTR3Response setattr(XDR xdr, RpcAuthSys authSys) {
@Override
public SETATTR3Response setattr(XDR xdr, RpcAuthSys authSys,
InetAddress client) {
SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK);
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
@ -298,34 +314,39 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
String fileIdPath = Nfs3Utils.getFileIdPath(handle);
WccAttr preOpAttr = null;
Nfs3FileAttributes preOpAttr = null;
try {
preOpAttr = Nfs3Utils.getWccAttr(dfsClient, fileIdPath);
preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
if (preOpAttr == null) {
LOG.info("Can't get path for fileId:" + handle.getFileId());
response.setStatus(Nfs3Status.NFS3ERR_STALE);
return response;
}
WccAttr preOpWcc = Nfs3Utils.getWccAttr(preOpAttr);
if (request.isCheck()) {
if (!preOpAttr.getCtime().equals(request.getCtime())) {
WccData wccData = Nfs3Utils.createWccData(preOpAttr, dfsClient,
fileIdPath, iug);
WccData wccData = new WccData(preOpWcc, preOpAttr);
return new SETATTR3Response(Nfs3Status.NFS3ERR_NOT_SYNC, wccData);
}
}
// check the write access privilege
if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(
preOpWcc, preOpAttr));
}
setattrInternal(dfsClient, fileIdPath, request.getAttr(), true);
Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient,
fileIdPath, iug);
WccData wccData = new WccData(preOpAttr, postOpAttr);
WccData wccData = new WccData(preOpWcc, postOpAttr);
return new SETATTR3Response(Nfs3Status.NFS3_OK, wccData);
} catch (IOException e) {
LOG.warn("Exception ", e);
WccData wccData = null;
try {
wccData = Nfs3Utils
.createWccData(preOpAttr, dfsClient, fileIdPath, iug);
wccData = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpAttr),
dfsClient, fileIdPath, iug);
} catch (IOException e1) {
LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath);
}
@ -337,8 +358,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public LOOKUP3Response lookup(XDR xdr, RpcAuthSys authSys) {
@Override
public LOOKUP3Response lookup(XDR xdr, RpcAuthSys authSys, InetAddress client) {
LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -392,8 +420,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public ACCESS3Response access(XDR xdr, RpcAuthSys authSys) {
@Override
public ACCESS3Response access(XDR xdr, RpcAuthSys authSys, InetAddress client) {
ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -434,12 +469,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public READLINK3Response readlink(XDR xdr, RpcAuthSys authSys) {
public READLINK3Response readlink(XDR xdr, RpcAuthSys authSys,
InetAddress client) {
return new READLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP);
}
public READ3Response read(XDR xdr, RpcAuthSys authSys) {
@Override
public READ3Response read(XDR xdr, RpcAuthSys authSys, InetAddress client) {
READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -528,8 +571,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
@Override
public WRITE3Response write(XDR xdr, Channel channel, int xid,
RpcAuthSys authSys) {
RpcAuthSys authSys, InetAddress client) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK);
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
@ -570,6 +614,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
LOG.error("Can't get path for fileId:" + handle.getFileId());
return new WRITE3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
return new WRITE3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(
Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), 0, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
}
if (LOG.isDebugEnabled()) {
LOG.debug("requesed offset=" + offset + " and current filesize="
+ preOpAttr.getSize());
@ -596,7 +647,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
return null;
}
public CREATE3Response create(XDR xdr, RpcAuthSys authSys) {
@Override
public CREATE3Response create(XDR xdr, RpcAuthSys authSys, InetAddress client) {
CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK);
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
@ -631,16 +683,22 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
HdfsDataOutputStream fos = null;
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
WccAttr preOpDirAttr = null;
Nfs3FileAttributes preOpDirAttr = null;
Nfs3FileAttributes postOpObjAttr = null;
FileHandle fileHandle = null;
WccData dirWcc = null;
try {
preOpDirAttr = Nfs3Utils.getWccAttr(dfsClient, dirFileIdPath);
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
LOG.error("Can't get path for dirHandle:" + dirHandle);
return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null,
preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
preOpDirAttr));
}
String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName;
SetAttr3 setAttr3 = request.getObjAttr();
@ -649,9 +707,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode())
: FsPermission.getDefault().applyUMask(umask);
EnumSet<CreateFlag> flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet
.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet
.of(CreateFlag.CREATE);
EnumSet<CreateFlag> flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ?
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) :
EnumSet.of(CreateFlag.CREATE);
fos = new HdfsDataOutputStream(dfsClient.create(fileIdPath, permission,
flag, false, replication, blockSize, null, bufferSize, null),
@ -668,8 +726,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, dirFileIdPath,
iug);
dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
dfsClient, dirFileIdPath, iug);
} catch (IOException e) {
LOG.error("Exception", e);
if (fos != null) {
@ -682,8 +740,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
if (dirWcc == null) {
try {
dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
dfsClient, dirFileIdPath, iug);
} catch (IOException e1) {
LOG.error("Can't get postOpDirAttr for dirFileId:"
+ dirHandle.getFileId());
@ -712,7 +770,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
dirWcc);
}
public MKDIR3Response mkdir(XDR xdr, RpcAuthSys authSys) {
@Override
public MKDIR3Response mkdir(XDR xdr, RpcAuthSys authSys, InetAddress client) {
MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK);
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
@ -739,17 +798,22 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
WccAttr preOpDirAttr = null;
Nfs3FileAttributes preOpDirAttr = null;
Nfs3FileAttributes postOpDirAttr = null;
Nfs3FileAttributes postOpObjAttr = null;
FileHandle objFileHandle = null;
try {
preOpDirAttr = Nfs3Utils.getWccAttr(dfsClient, dirFileIdPath);
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
return new MKDIR3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr,
new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr));
}
final String fileIdPath = dirFileIdPath + "/" + fileName;
SetAttr3 setAttr3 = request.getObjAttr();
FsPermission permission = setAttr3.getUpdateFields().contains(
@ -757,8 +821,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
: FsPermission.getDefault().applyUMask(umask);
if (!dfsClient.mkdirs(fileIdPath, permission, false)) {
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
WccData dirWcc = Nfs3Utils.createWccData(
Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
return new MKDIR3Response(Nfs3Status.NFS3ERR_IO, null, null, dirWcc);
}
@ -771,8 +835,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
objFileHandle = new FileHandle(postOpObjAttr.getFileId());
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
WccData dirWcc = Nfs3Utils.createWccData(
Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
return new MKDIR3Response(Nfs3Status.NFS3_OK, new FileHandle(
postOpObjAttr.getFileId()), postOpObjAttr, dirWcc);
} catch (IOException e) {
@ -785,7 +849,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath);
}
}
WccData dirWcc = new WccData(preOpDirAttr, postOpDirAttr);
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
postOpDirAttr);
if (e instanceof AccessControlException) {
return new MKDIR3Response(Nfs3Status.NFS3ERR_PERM, objFileHandle,
postOpObjAttr, dirWcc);
@ -796,12 +861,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public READDIR3Response mknod(XDR xdr, RpcAuthSys authSys) {
public READDIR3Response mknod(XDR xdr, RpcAuthSys authSys, InetAddress client) {
return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP);
}
public REMOVE3Response remove(XDR xdr, RpcAuthSys authSys) {
@Override
public REMOVE3Response remove(XDR xdr, RpcAuthSys authSys, InetAddress client) {
REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
@ -825,10 +890,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
WccAttr preOpDirAttr = null;
Nfs3FileAttributes preOpDirAttr = null;
Nfs3FileAttributes postOpDirAttr = null;
try {
preOpDirAttr = Nfs3Utils.getWccAttr(dfsClient, dirFileIdPath);
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
@ -838,24 +903,23 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient,
fileIdPath);
if (fstat == null) {
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
preOpDirAttr);
return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, dirWcc);
}
if (fstat.isDir()) {
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
preOpDirAttr);
return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, dirWcc);
}
if (dfsClient.delete(fileIdPath, false) == false) {
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
boolean result = dfsClient.delete(fileIdPath, false);
WccData dirWcc = Nfs3Utils.createWccData(
Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
if (!result) {
return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
}
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc);
} catch (IOException e) {
LOG.warn("Exception ", e);
@ -867,7 +931,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath);
}
}
WccData dirWcc = new WccData(preOpDirAttr, postOpDirAttr);
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
postOpDirAttr);
if (e instanceof AccessControlException) {
return new REMOVE3Response(Nfs3Status.NFS3ERR_PERM, dirWcc);
} else {
@ -876,7 +941,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public RMDIR3Response rmdir(XDR xdr, RpcAuthSys authSys) {
@Override
public RMDIR3Response rmdir(XDR xdr, RpcAuthSys authSys, InetAddress client) {
RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK);
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
@ -901,45 +967,43 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
WccAttr preOpDirAttr = null;
Nfs3FileAttributes preOpDirAttr = null;
Nfs3FileAttributes postOpDirAttr = null;
try {
preOpDirAttr = Nfs3Utils.getWccAttr(dfsClient, dirFileIdPath);
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId());
return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
preOpDirAttr);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
}
String fileIdPath = dirFileIdPath + "/" + fileName;
HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient,
fileIdPath);
if (fstat == null) {
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, dirWcc);
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
}
if (!fstat.isDir()) {
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, dirWcc);
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc);
}
if (fstat.getChildrenNum() > 0) {
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, dirWcc);
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, errWcc);
}
if (dfsClient.delete(fileIdPath, false) == false) {
WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient,
dirFileIdPath, iug);
boolean result = dfsClient.delete(fileIdPath, false);
WccData dirWcc = Nfs3Utils.createWccData(
Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
if (!result) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
}
postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
WccData wccData = new WccData(preOpDirAttr, postOpDirAttr);
return new RMDIR3Response(Nfs3Status.NFS3_OK, wccData);
return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc);
} catch (IOException e) {
LOG.warn("Exception ", e);
// Try to return correct WccData
@ -950,7 +1014,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath);
}
}
WccData dirWcc = new WccData(preOpDirAttr, postOpDirAttr);
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
postOpDirAttr);
if (e instanceof AccessControlException) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_PERM, dirWcc);
} else {
@ -959,7 +1024,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public RENAME3Response rename(XDR xdr, RpcAuthSys authSys) {
@Override
public RENAME3Response rename(XDR xdr, RpcAuthSys authSys, InetAddress client) {
RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK);
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
@ -987,23 +1053,31 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
String fromDirFileIdPath = Nfs3Utils.getFileIdPath(fromHandle);
String toDirFileIdPath = Nfs3Utils.getFileIdPath(toHandle);
WccAttr fromPreOpAttr = null;
WccAttr toPreOpAttr = null;
Nfs3FileAttributes fromPreOpAttr = null;
Nfs3FileAttributes toPreOpAttr = null;
WccData fromDirWcc = null;
WccData toDirWcc = null;
try {
fromPreOpAttr = Nfs3Utils.getWccAttr(dfsClient, fromDirFileIdPath);
fromPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, fromDirFileIdPath, iug);
if (fromPreOpAttr == null) {
LOG.info("Can't get path for fromHandle fileId:"
+ fromHandle.getFileId());
return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
}
toPreOpAttr = Nfs3Utils.getWccAttr(dfsClient, toDirFileIdPath);
toPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, toDirFileIdPath, iug);
if (toPreOpAttr == null) {
LOG.info("Can't get path for toHandle fileId:" + toHandle.getFileId());
return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
WccData fromWcc = new WccData(Nfs3Utils.getWccAttr(fromPreOpAttr),
fromPreOpAttr);
WccData toWcc = new WccData(Nfs3Utils.getWccAttr(toPreOpAttr),
toPreOpAttr);
return new RENAME3Response(Nfs3Status.NFS3ERR_ACCES, fromWcc, toWcc);
}
String src = fromDirFileIdPath + "/" + fromName;
String dst = toDirFileIdPath + "/" + toName;
@ -1011,20 +1085,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
dfsClient.rename(src, dst, Options.Rename.NONE);
// Assemble the reply
fromDirWcc = Nfs3Utils.createWccData(fromPreOpAttr, dfsClient,
fromDirFileIdPath, iug);
toDirWcc = Nfs3Utils.createWccData(toPreOpAttr, dfsClient,
toDirFileIdPath, iug);
fromDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(fromPreOpAttr),
dfsClient, fromDirFileIdPath, iug);
toDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(toPreOpAttr),
dfsClient, toDirFileIdPath, iug);
return new RENAME3Response(Nfs3Status.NFS3_OK, fromDirWcc, toDirWcc);
} catch (IOException e) {
LOG.warn("Exception ", e);
// Try to return correct WccData
try {
fromDirWcc = Nfs3Utils.createWccData(fromPreOpAttr, dfsClient,
fromDirFileIdPath, iug);
toDirWcc = Nfs3Utils.createWccData(toPreOpAttr, dfsClient,
toDirFileIdPath, iug);
fromDirWcc = Nfs3Utils.createWccData(
Nfs3Utils.getWccAttr(fromPreOpAttr), dfsClient, fromDirFileIdPath,
iug);
toDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(toPreOpAttr),
dfsClient, toDirFileIdPath, iug);
} catch (IOException e1) {
LOG.info("Can't get postOpDirAttr for " + fromDirFileIdPath + " or"
+ toDirFileIdPath);
@ -1038,16 +1112,25 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public SYMLINK3Response symlink(XDR xdr, RpcAuthSys authSys) {
public SYMLINK3Response symlink(XDR xdr, RpcAuthSys authSys,
InetAddress client) {
return new SYMLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP);
}
public READDIR3Response link(XDR xdr, RpcAuthSys authSys) {
public READDIR3Response link(XDR xdr, RpcAuthSys authSys, InetAddress client) {
return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP);
}
public READDIR3Response readdir(XDR xdr, RpcAuthSys authSys) {
@Override
public READDIR3Response readdir(XDR xdr, RpcAuthSys authSys,
InetAddress client) {
READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -1180,7 +1263,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
dirStatus.getModificationTime(), dirList);
}
public READDIRPLUS3Response readdirplus(XDR xdr, RpcAuthSys authSys) {
public READDIRPLUS3Response readdirplus(XDR xdr, RpcAuthSys authSys,
InetAddress client) {
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES);
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -1325,8 +1413,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
dirStatus.getModificationTime(), dirListPlus);
}
public FSSTAT3Response fsstat(XDR xdr, RpcAuthSys authSys) {
@Override
public FSSTAT3Response fsstat(XDR xdr, RpcAuthSys authSys, InetAddress client) {
FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -1376,8 +1471,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public FSINFO3Response fsinfo(XDR xdr, RpcAuthSys authSys) {
@Override
public FSINFO3Response fsinfo(XDR xdr, RpcAuthSys authSys, InetAddress client) {
FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -1421,8 +1523,16 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public PATHCONF3Response pathconf(XDR xdr, RpcAuthSys authSys) {
@Override
public PATHCONF3Response pathconf(XDR xdr, RpcAuthSys authSys,
InetAddress client) {
PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
if (dfsClient == null) {
@ -1461,7 +1571,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
}
public COMMIT3Response commit(XDR xdr, RpcAuthSys authSys) {
@Override
public COMMIT3Response commit(XDR xdr, RpcAuthSys authSys, InetAddress client) {
COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
String uname = authSysCheck(authSys);
DFSClient dfsClient = clientCache.get(uname);
@ -1486,13 +1597,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
String fileIdPath = Nfs3Utils.getFileIdPath(handle);
WccAttr preOpAttr = null;
Nfs3FileAttributes preOpAttr = null;
try {
preOpAttr = Nfs3Utils.getWccAttr(dfsClient, fileIdPath);
preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
if (preOpAttr == null) {
LOG.info("Can't get path for fileId:" + handle.getFileId());
return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) {
return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(
Nfs3Utils.getWccAttr(preOpAttr), preOpAttr),
Nfs3Constant.WRITE_COMMIT_VERF);
}
long commitOffset = (request.getCount() == 0) ? 0
: (request.getOffset() + request.getCount());
@ -1504,7 +1622,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
}
Nfs3FileAttributes postOpAttr = writeManager.getFileAttr(dfsClient,
handle, iug);
WccData fileWcc = new WccData(preOpAttr, postOpAttr);
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
return new COMMIT3Response(status, fileWcc,
Nfs3Constant.WRITE_COMMIT_VERF);
@ -1516,7 +1634,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
} catch (IOException e1) {
LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId());
}
WccData fileWcc = new WccData(preOpAttr, postOpAttr);
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
return new COMMIT3Response(Nfs3Status.NFS3ERR_IO, fileWcc,
Nfs3Constant.WRITE_COMMIT_VERF);
}
@ -1554,47 +1672,47 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
if (nfsproc3 == NFSPROC3.NULL) {
response = nullProcedure();
} else if (nfsproc3 == NFSPROC3.GETATTR) {
response = getattr(xdr, authSys);
response = getattr(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.SETATTR) {
response = setattr(xdr, authSys);
response = setattr(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.LOOKUP) {
response = lookup(xdr, authSys);
response = lookup(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.ACCESS) {
response = access(xdr, authSys);
response = access(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.READLINK) {
response = readlink(xdr, authSys);
response = readlink(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.READ) {
response = read(xdr, authSys);
response = read(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.WRITE) {
response = write(xdr, channel, xid, authSys);
response = write(xdr, channel, xid, authSys, client);
} else if (nfsproc3 == NFSPROC3.CREATE) {
response = create(xdr, authSys);
response = create(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.MKDIR) {
response = mkdir(xdr, authSys);
response = mkdir(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.SYMLINK) {
response = symlink(xdr, authSys);
response = symlink(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.MKNOD) {
response = mknod(xdr, authSys);
response = mknod(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.REMOVE) {
response = remove(xdr, authSys);
response = remove(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.RMDIR) {
response = rmdir(xdr, authSys);
response = rmdir(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.RENAME) {
response = rename(xdr, authSys);
response = rename(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.LINK) {
response = link(xdr, authSys);
response = link(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.READDIR) {
response = readdir(xdr, authSys);
response = readdir(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.READDIRPLUS) {
response = readdirplus(xdr, authSys);
response = readdirplus(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.FSSTAT) {
response = fsstat(xdr, authSys);
response = fsstat(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.FSINFO) {
response = fsinfo(xdr, authSys);
response = fsinfo(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.PATHCONF) {
response = pathconf(xdr, authSys);
response = pathconf(xdr, authSys, client);
} else if (nfsproc3 == NFSPROC3.COMMIT) {
response = commit(xdr, authSys);
response = commit(xdr, authSys, client);
} else {
// Invalid procedure
RpcAcceptedReply.voidReply(out, xid,
@ -1611,4 +1729,17 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(call.getProcedure());
return nfsproc3 == null || nfsproc3.isIdempotent();
}
private boolean checkAccessPrivilege(final InetAddress client,
final AccessPrivilege expected) {
AccessPrivilege access = exports.getAccessPrivilege(client);
if (access == AccessPrivilege.NONE) {
return false;
}
if (access == AccessPrivilege.READ_ONLY
&& expected == AccessPrivilege.READ_WRITE) {
return false;
}
return true;
}
}

View File

@ -0,0 +1,24 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.nfs.security;
public enum AccessPrivilege {
READ_ONLY,
READ_WRITE,
NONE;
}

View File

@ -0,0 +1,354 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.nfs.security;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.util.SubnetUtils;
import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.util.LightWeightCache;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import com.google.common.base.Preconditions;
/**
* This class provides functionality for loading and checking the mapping
* between client hosts and their access privileges.
*/
public class NfsExports {
private static NfsExports exports = null;
public static synchronized NfsExports getInstance(Configuration conf) {
if (exports == null) {
String matchHosts = conf.get(Nfs3Constant.EXPORTS_ALLOWED_HOSTS_KEY,
Nfs3Constant.EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
int cacheSize = conf.getInt(Nfs3Constant.EXPORTS_CACHE_SIZE_KEY,
Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT);
long expirationPeriodNano = conf.getLong(
Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
}
return exports;
}
public static final Log LOG = LogFactory.getLog(NfsExports.class);
// only support IPv4 now
private static final String IP_ADDRESS =
"(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})";
private static final String SLASH_FORMAT_SHORT = IP_ADDRESS + "/(\\d{1,3})";
private static final String SLASH_FORMAT_LONG = IP_ADDRESS + "/" + IP_ADDRESS;
private static final Pattern CIDR_FORMAT_SHORT =
Pattern.compile(SLASH_FORMAT_SHORT);
private static final Pattern CIDR_FORMAT_LONG =
Pattern.compile(SLASH_FORMAT_LONG);
static class AccessCacheEntry implements LightWeightCache.Entry{
private final String hostAddr;
private AccessPrivilege access;
private final long expirationTime;
private LightWeightGSet.LinkedElement next;
AccessCacheEntry(String hostAddr, AccessPrivilege access,
long expirationTime) {
Preconditions.checkArgument(hostAddr != null);
this.hostAddr = hostAddr;
this.access = access;
this.expirationTime = expirationTime;
}
@Override
public int hashCode() {
return hostAddr.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof AccessCacheEntry) {
AccessCacheEntry entry = (AccessCacheEntry) obj;
return this.hostAddr.equals(entry.hostAddr);
}
return false;
}
@Override
public void setNext(LinkedElement next) {
this.next = next;
}
@Override
public LinkedElement getNext() {
return this.next;
}
@Override
public void setExpirationTime(long timeNano) {
// we set expiration time in the constructor, and the expiration time
// does not change
}
@Override
public long getExpirationTime() {
return this.expirationTime;
}
}
private final List<Match> mMatches;
private final LightWeightCache<AccessCacheEntry, AccessCacheEntry> accessCache;
private final long cacheExpirationPeriod;
/**
* Constructor.
* @param cacheSize The size of the access privilege cache.
* @param expirationPeriodNano The period
* @param matchingHosts A string specifying one or multiple matchers.
*/
NfsExports(int cacheSize, long expirationPeriodNano, String matchHosts) {
this.cacheExpirationPeriod = expirationPeriodNano;
accessCache = new LightWeightCache<AccessCacheEntry, AccessCacheEntry>(
cacheSize, cacheSize, expirationPeriodNano, 0);
String[] matchStrings = matchHosts.split(
Nfs3Constant.EXPORTS_ALLOWED_HOSTS_SEPARATOR);
mMatches = new ArrayList<Match>(matchStrings.length);
for(String mStr : matchStrings) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing match string '" + mStr + "'");
}
mStr = mStr.trim();
if(!mStr.isEmpty()) {
mMatches.add(getMatch(mStr));
}
}
}
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
return getAccessPrivilege(addr.getHostAddress(),
addr.getCanonicalHostName());
}
AccessPrivilege getAccessPrivilege(String address, String hostname) {
long now = System.nanoTime();
AccessCacheEntry newEntry = new AccessCacheEntry(address,
AccessPrivilege.NONE, now + this.cacheExpirationPeriod);
// check if there is a cache entry for the given address
AccessCacheEntry cachedEntry = accessCache.get(newEntry);
if (cachedEntry != null && now < cachedEntry.expirationTime) {
// get a non-expired cache entry, use it
return cachedEntry.access;
} else {
for(Match match : mMatches) {
if(match.isIncluded(address, hostname)) {
if (match.accessPrivilege == AccessPrivilege.READ_ONLY) {
newEntry.access = AccessPrivilege.READ_ONLY;
break;
} else if (match.accessPrivilege == AccessPrivilege.READ_WRITE) {
newEntry.access = AccessPrivilege.READ_WRITE;
}
}
}
accessCache.put(newEntry);
return newEntry.access;
}
}
private static abstract class Match {
private final AccessPrivilege accessPrivilege;
private Match(AccessPrivilege accessPrivilege) {
this.accessPrivilege = accessPrivilege;
}
public abstract boolean isIncluded(String address, String hostname);
}
/**
* Matcher covering all client hosts (specified by "*")
*/
private static class AnonymousMatch extends Match {
private AnonymousMatch(AccessPrivilege accessPrivilege) {
super(accessPrivilege);
}
@Override
public boolean isIncluded(String ip, String hostname) {
return true;
}
}
/**
* Matcher using CIDR for client host matching
*/
private static class CIDRMatch extends Match {
private final SubnetInfo subnetInfo;
private CIDRMatch(AccessPrivilege accessPrivilege, SubnetInfo subnetInfo) {
super(accessPrivilege);
this.subnetInfo = subnetInfo;
}
@Override
public boolean isIncluded(String address, String hostname) {
if(subnetInfo.isInRange(address)) {
if(LOG.isDebugEnabled()) {
LOG.debug("CIDRNMatcher low = " + subnetInfo.getLowAddress() +
", high = " + subnetInfo.getHighAddress() +
", allowing client '" + address + "', '" + hostname + "'");
}
return true;
}
if(LOG.isDebugEnabled()) {
LOG.debug("CIDRNMatcher low = " + subnetInfo.getLowAddress() +
", high = " + subnetInfo.getHighAddress() +
", denying client '" + address + "', '" + hostname + "'");
}
return false;
}
}
/**
* Matcher requiring exact string match for client host
*/
private static class ExactMatch extends Match {
private final String ipOrHost;
private ExactMatch(AccessPrivilege accessPrivilege, String ipOrHost) {
super(accessPrivilege);
this.ipOrHost = ipOrHost;
}
@Override
public boolean isIncluded(String address, String hostname) {
if(ipOrHost.equalsIgnoreCase(address) ||
ipOrHost.equalsIgnoreCase(hostname)) {
if(LOG.isDebugEnabled()) {
LOG.debug("ExactMatcher '" + ipOrHost + "', allowing client " +
"'" + address + "', '" + hostname + "'");
}
return true;
}
if(LOG.isDebugEnabled()) {
LOG.debug("ExactMatcher '" + ipOrHost + "', denying client " +
"'" + address + "', '" + hostname + "'");
}
return false;
}
}
/**
* Matcher where client hosts are specified by regular expression
*/
private static class RegexMatch extends Match {
private final Pattern pattern;
private RegexMatch(AccessPrivilege accessPrivilege, String wildcard) {
super(accessPrivilege);
this.pattern = Pattern.compile(wildcard, Pattern.CASE_INSENSITIVE);
}
@Override
public boolean isIncluded(String address, String hostname) {
if (pattern.matcher(address).matches()
|| pattern.matcher(hostname).matches()) {
if (LOG.isDebugEnabled()) {
LOG.debug("RegexMatcher '" + pattern.pattern()
+ "', allowing client '" + address + "', '" + hostname + "'");
}
return true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("RegexMatcher '" + pattern.pattern()
+ "', denying client '" + address + "', '" + hostname + "'");
}
return false;
}
}
/**
* Loading a matcher from a string. The default access privilege is read-only.
* The string contains 1 or 2 parts, separated by whitespace characters, where
* the first part specifies the client hosts, and the second part (if
* existent) specifies the access privilege of the client hosts. I.e.,
*
* "client-hosts [access-privilege]"
*/
private static Match getMatch(String line) {
String[] parts = line.split("\\s+");
final String host;
AccessPrivilege privilege = AccessPrivilege.READ_ONLY;
switch (parts.length) {
case 1:
host = parts[0].toLowerCase().trim();
break;
case 2:
host = parts[0].toLowerCase().trim();
String option = parts[1].trim();
if ("rw".equalsIgnoreCase(option)) {
privilege = AccessPrivilege.READ_WRITE;
}
break;
default:
throw new IllegalArgumentException("Incorrectly formatted line '" + line
+ "'");
}
if (host.equals("*")) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using match all for '" + host + "' and " + privilege);
}
return new AnonymousMatch(privilege);
} else if (CIDR_FORMAT_SHORT.matcher(host).matches()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using CIDR match for '" + host + "' and " + privilege);
}
return new CIDRMatch(privilege, new SubnetUtils(host).getInfo());
} else if (CIDR_FORMAT_LONG.matcher(host).matches()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using CIDR match for '" + host + "' and " + privilege);
}
String[] pair = host.split("/");
return new CIDRMatch(privilege,
new SubnetUtils(pair[0], pair[1]).getInfo());
} else if (host.contains("*") || host.contains("?") || host.contains("[")
|| host.contains("]")) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using Regex match for '" + host + "' and " + privilege);
}
return new RegexMatch(privilege, host);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Using exact match for '" + host + "' and " + privilege);
}
return new ExactMatch(privilege, host);
}
}

View File

@ -0,0 +1,191 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.nfs.security;
import junit.framework.Assert;
import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege;
import org.apache.hadoop.hdfs.nfs.security.NfsExports;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.junit.Test;
public class TestNfsExports {
private final String address1 = "192.168.0.1";
private final String address2 = "10.0.0.1";
private final String hostname1 = "a.b.com";
private final String hostname2 = "a.b.org";
private static final long ExpirationPeriod =
Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
private static final int CacheSize = Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT;
@Test
public void testWildcardRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, "* rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
}
@Test
public void testWildcardRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, "* ro");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
}
@Test
public void testExactAddressRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, address1
+ " rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertFalse(AccessPrivilege.READ_WRITE == matcher
.getAccessPrivilege(address2, hostname1));
}
@Test
public void testExactAddressRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, address1);
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testExactHostRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, hostname1
+ " rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
}
@Test
public void testExactHostRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, hostname1);
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
}
@Test
public void testCidrShortRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/22 rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testCidrShortRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/22");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testCidrLongRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/255.255.252.0 rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testCidrLongRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/255.255.252.0");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testRegexIPRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.[0-9]+ rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testRegexIPRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.[0-9]+");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testRegexHostRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"[a-z]+.b.com rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
// address1 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname2));
}
@Test
public void testRegexHostRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"[a-z]+.b.com");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
// address1 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname2));
}
@Test
public void testMultiMatchers() throws Exception {
long shortExpirationPeriod = 1 * 1000 * 1000 * 1000; // 1s
NfsExports matcher = new NfsExports(CacheSize, shortExpirationPeriod,
"192.168.0.[0-9]+;[a-z]+.b.com rw");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname2));
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, address1));
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address2, hostname1));
// address2 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address2, hostname2));
Thread.sleep(1000);
// no cache for address2 now
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, address2));
}
}

View File

@ -304,6 +304,9 @@ Release 2.1.1-beta - UNRELEASED
HDFS-5069 Include hadoop-nfs and hadoop-hdfs-nfs into hadoop dist for
NFS deployment (brandonli)
HDFS-4947 Add NFS server export table to control export by hostname or
IP range (Jing Zhao via brandonli)
IMPROVEMENTS
HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may

View File

@ -181,6 +181,9 @@ Release 2.1.1-beta - UNRELEASED
IMPROVEMENTS
MAPREDUCE-5478. TeraInputFormat unnecessarily defines its own FileSplit
subclass (Sandy Ryza)
OPTIMIZATIONS
MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race

View File

@ -129,6 +129,15 @@ public class JHAdminConfig {
public static final int DEFAULT_MR_HISTORY_WEBAPP_PORT = 19888;
public static final String DEFAULT_MR_HISTORY_WEBAPP_ADDRESS =
"0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_PORT;
/**The kerberos principal to be used for spnego filter for history server*/
public static final String MR_WEBAPP_SPNEGO_USER_NAME_KEY =
MR_HISTORY_PREFIX + "webapp.spnego-principal";
/** The kerberos keytab to be used for spnego filter for history server*/
public static final String MR_WEBAPP_SPNEGO_KEYTAB_FILE_KEY =
MR_HISTORY_PREFIX + "webapp.spnego-keytab-file";
/*
* HS Service Authorization
*/

View File

@ -80,6 +80,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
@ -148,8 +149,14 @@ public class HistoryClientService extends AbstractService {
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT);
// NOTE: there should be a .at(InetSocketAddress)
WebApps.$for("jobhistory", HistoryClientService.class, this, "ws")
.with(conf).at(NetUtils.getHostPortString(bindAddress)).start(webApp);
WebApps
.$for("jobhistory", HistoryClientService.class, this, "ws")
.with(conf)
.withHttpSpnegoKeytabKey(
JHAdminConfig.MR_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
.withHttpSpnegoPrincipalKey(
JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY)
.at(NetUtils.getHostPortString(bindAddress)).start(webApp);
conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
webApp.getListenerAddress());
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.mapred;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
@ -118,8 +120,10 @@ public class ResourceMgrDelegate extends YarnClient {
try {
Set<String> appTypes = new HashSet<String>(1);
appTypes.add(MRJobConfig.MR_APPLICATION_TYPE);
EnumSet<YarnApplicationState> appStates =
EnumSet.noneOf(YarnApplicationState.class);
return TypeConverter.fromYarnApps(
client.getApplications(appTypes), this.conf);
client.getApplications(appTypes, appStates), this.conf);
} catch (YarnException e) {
throw new IOException(e);
}
@ -299,11 +303,27 @@ public class ResourceMgrDelegate extends YarnClient {
}
@Override
public List<ApplicationReport> getApplications(
Set<String> applicationTypes) throws YarnException, IOException {
public List<ApplicationReport> getApplications(Set<String> applicationTypes)
throws YarnException,
IOException {
return client.getApplications(applicationTypes);
}
@Override
public List<ApplicationReport> getApplications(
EnumSet<YarnApplicationState> applicationStates) throws YarnException,
IOException {
return client.getApplications(applicationStates);
}
@Override
public List<ApplicationReport> getApplications(
Set<String> applicationTypes,
EnumSet<YarnApplicationState> applicationStates)
throws YarnException, IOException {
return client.getApplications(applicationTypes, applicationStates);
}
@Override
public YarnClusterMetrics getYarnClusterMetrics() throws YarnException,
IOException {

View File

@ -60,48 +60,6 @@ public class TeraInputFormat extends FileInputFormat<Text,Text> {
private static MRJobConfig lastContext = null;
private static List<InputSplit> lastResult = null;
static class TeraFileSplit extends FileSplit {
static private String[] ZERO_LOCATIONS = new String[0];
private String[] locations;
public TeraFileSplit() {
locations = ZERO_LOCATIONS;
}
public TeraFileSplit(Path file, long start, long length, String[] hosts) {
super(file, start, length, hosts);
try {
locations = super.getLocations();
} catch (IOException e) {
locations = ZERO_LOCATIONS;
}
}
// XXXXXX should this also be null-protected?
protected void setLocations(String[] hosts) {
locations = hosts;
}
@Override
public String[] getLocations() {
return locations;
}
public String toString() {
StringBuffer result = new StringBuffer();
result.append(getPath());
result.append(" from ");
result.append(getStart());
result.append(" length ");
result.append(getLength());
for(String host: getLocations()) {
result.append(" ");
result.append(host);
}
return result.toString();
}
}
static class TextSampler implements IndexedSortable {
private ArrayList<Text> records = new ArrayList<Text>();
@ -325,11 +283,6 @@ public class TeraInputFormat extends FileInputFormat<Text,Text> {
return new TeraRecordReader();
}
protected FileSplit makeSplit(Path file, long start, long length,
String[] hosts) {
return new TeraFileSplit(file, start, length, hosts);
}
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
if (job == lastContext) {
@ -343,7 +296,7 @@ public class TeraInputFormat extends FileInputFormat<Text,Text> {
System.out.println("Spent " + (t2 - t1) + "ms computing base-splits.");
if (job.getConfiguration().getBoolean(TeraScheduler.USE, true)) {
TeraScheduler scheduler = new TeraScheduler(
lastResult.toArray(new TeraFileSplit[0]), job.getConfiguration());
lastResult.toArray(new FileSplit[0]), job.getConfiguration());
lastResult = scheduler.getNewFileSplits();
t3 = System.currentTimeMillis();
System.out.println("Spent " + (t3 - t2) + "ms computing TeraScheduler splits.");

View File

@ -24,7 +24,6 @@ import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.examples.terasort.TeraInputFormat.TeraFileSplit;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
@ -214,8 +213,9 @@ class TeraScheduler {
for(int i=0; i < splits.length; ++i) {
if (splits[i].isAssigned) {
// copy the split and fix up the locations
((TeraFileSplit) realSplits[i]).setLocations
(new String[]{splits[i].locations.get(0).hostname});
String[] newLocations = {splits[i].locations.get(0).hostname};
realSplits[i] = new FileSplit(realSplits[i].getPath(),
realSplits[i].getStart(), realSplits[i].getLength(), newLocations);
result[left++] = realSplits[i];
} else {
result[right--] = realSplits[i];

View File

@ -23,6 +23,8 @@ Release 2.3.0 - UNRELEASED
IMPROVEMENTS
YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza)
OPTIMIZATIONS
BUG FIXES
@ -42,7 +44,16 @@ Release 2.1.1-beta - UNRELEASED
IMPROVEMENTS
YARN-589. Expose a REST API for monitoring the fair scheduler (Sandy Ryza).
YARN-1074. Cleaned up YARN CLI application list to only display running
applications by default. (Xuan Gong via vinodkv)
YARN-1093. Corrections to Fair Scheduler documentation (Wing Yew Poon via
Sandy Ryza)
YARN-942. In Fair Scheduler documentation, inconsistency on which
properties have prefix (Akira Ajisaka via Sandy Ryza)
OPTIMIZATIONS
BUG FIXES
@ -85,6 +96,15 @@ Release 2.1.1-beta - UNRELEASED
YARN-1082. Create base directories on HDFS after RM login to ensure RM
recovery doesn't fail in secure mode. (vinodkv via acmurthy)
YARN-1085. Modified YARN and MR2 web-apps to do HTTP authentication in
secure setup with kerberos. (Omkar Vinit Joshi via vinodkv)
YARN-1094. Fixed a blocker with RM restart code because of which RM crashes
when try to recover an existing app. (vinodkv)
YARN-1008. MiniYARNCluster with multiple nodemanagers, all nodes have same
key for allocations. (tucu)
Release 2.1.0-beta - 2013-08-22
INCOMPATIBLE CHANGES

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.api.protocolrecords;
import java.util.EnumSet;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@ -25,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.util.Records;
/**
@ -45,16 +47,68 @@ public abstract class GetApplicationsRequest {
return request;
}
/**
* <p>
* The request from clients to get a report of Applications matching the
* giving application types in the cluster from the
* <code>ResourceManager</code>.
* </p>
*
*
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
*/
@Public
@Stable
public static GetApplicationsRequest newInstance(
Set<String> applicationTypes) {
public static GetApplicationsRequest
newInstance(Set<String> applicationTypes) {
GetApplicationsRequest request =
Records.newRecord(GetApplicationsRequest.class);
request.setApplicationTypes(applicationTypes);
return request;
}
/**
* <p>
* The request from clients to get a report of Applications matching the
* giving application states in the cluster from the
* <code>ResourceManager</code>.
* </p>
*
*
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
*/
@Public
@Stable
public static GetApplicationsRequest newInstance(
EnumSet<YarnApplicationState> applicationStates) {
GetApplicationsRequest request =
Records.newRecord(GetApplicationsRequest.class);
request.setApplicationStates(applicationStates);
return request;
}
/**
* <p>
* The request from clients to get a report of Applications matching the
* giving and application types and application types in the cluster from the
* <code>ResourceManager</code>.
* </p>
*
*
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
*/
@Public
@Stable
public static GetApplicationsRequest newInstance(
Set<String> applicationTypes,
EnumSet<YarnApplicationState> applicationStates) {
GetApplicationsRequest request =
Records.newRecord(GetApplicationsRequest.class);
request.setApplicationTypes(applicationTypes);
request.setApplicationStates(applicationStates);
return request;
}
/**
* Get the application types to filter applications on
*
@ -75,4 +129,25 @@ public abstract class GetApplicationsRequest {
@Unstable
public abstract void
setApplicationTypes(Set<String> applicationTypes);
/**
* Get the application states to filter applications on
*
* @return Set of Application states to filter on
*/
@Public
@Stable
public abstract EnumSet<YarnApplicationState> getApplicationStates();
/**
* Set the application states to filter applications on
*
* @param applicationStates
* A Set of Application states to filter on.
* If not defined, match all running applications
*/
@Private
@Unstable
public abstract void
setApplicationStates(EnumSet<YarnApplicationState> applicationStates);
}

View File

@ -33,7 +33,7 @@ public abstract class GetDelegationTokenRequest {
@Public
@Stable
public GetDelegationTokenRequest newInstance(String renewer) {
public static GetDelegationTokenRequest newInstance(String renewer) {
GetDelegationTokenRequest request =
Records.newRecord(GetDelegationTokenRequest.class);
request.setRenewer(renewer);

View File

@ -132,6 +132,15 @@ public class YarnConfiguration extends Configuration {
RM_PREFIX + "scheduler.client.thread-count";
public static final int DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT = 50;
/** If the port should be included or not in the node name. The node name
* is used by the scheduler for resource requests allocation location
* matching. Typically this is just the hostname, using the port is needed
* when using minicluster and specific NM are required.*/
public static final String RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME =
YARN_PREFIX + "scheduler.include-port-in-node-name";
public static final boolean DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME =
false;
/**
* Enable periodic monitor threads.
* @see #RM_SCHEDULER_MONITOR_POLICIES
@ -205,6 +214,14 @@ public class YarnConfiguration extends Configuration {
public static final String RM_KEYTAB =
RM_PREFIX + "keytab";
/**The kerberos principal to be used for spnego filter for RM.*/
public static final String RM_WEBAPP_SPNEGO_USER_NAME_KEY =
RM_PREFIX + "webapp.spnego-principal";
/**The kerberos keytab to be used for spnego filter for RM.*/
public static final String RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY =
RM_PREFIX + "webapp.spnego-keytab-file";
/** How long to wait until a container is considered dead.*/
public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS =
RM_PREFIX + "rm.container-allocation.expiry-interval-ms";
@ -599,7 +616,15 @@ public class YarnConfiguration extends Configuration {
public static final String NM_USER_HOME_DIR =
NM_PREFIX + "user-home-dir";
/**The kerberos principal to be used for spnego filter for NM.*/
public static final String NM_WEBAPP_SPNEGO_USER_NAME_KEY =
NM_PREFIX + "webapp.spnego-principal";
/**The kerberos keytab to be used for spnego filter for NM.*/
public static final String NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY =
NM_PREFIX + "webapp.spnego-keytab-file";
public static final String DEFAULT_NM_USER_HOME_DIR= "/home/";
////////////////////////////////

View File

@ -122,6 +122,7 @@ message GetClusterMetricsResponseProto {
message GetApplicationsRequestProto {
repeated string application_types = 1;
repeated YarnApplicationStateProto application_states = 2;
}
message GetApplicationsResponseProto {

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.client.api;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
@ -36,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
@ -171,13 +173,13 @@ public abstract class YarnClient extends AbstractService {
* <p>
* Get a report (ApplicationReport) of all Applications in the cluster.
* </p>
*
*
* <p>
* If the user does not have <code>VIEW_APP</code> access for an application
* then the corresponding report will be filtered as described in
* {@link #getApplicationReport(ApplicationId)}.
* </p>
*
*
* @return a list of reports of all running applications
* @throws YarnException
* @throws IOException
@ -205,6 +207,50 @@ public abstract class YarnClient extends AbstractService {
public abstract List<ApplicationReport> getApplications(
Set<String> applicationTypes) throws YarnException, IOException;
/**
* <p>
* Get a report (ApplicationReport) of Applications matching the given
* application states in the cluster.
* </p>
*
* <p>
* If the user does not have <code>VIEW_APP</code> access for an application
* then the corresponding report will be filtered as described in
* {@link #getApplicationReport(ApplicationId)}.
* </p>
*
* @param applicationStates
* @return a list of reports of applications
* @throws YarnException
* @throws IOException
*/
public abstract List<ApplicationReport>
getApplications(EnumSet<YarnApplicationState> applicationStates)
throws YarnException, IOException;
/**
* <p>
* Get a report (ApplicationReport) of Applications matching the given
* application types and application states in the cluster.
* </p>
*
* <p>
* If the user does not have <code>VIEW_APP</code> access for an application
* then the corresponding report will be filtered as described in
* {@link #getApplicationReport(ApplicationId)}.
* </p>
*
* @param applicationTypes
* @param applicationStates
* @return a list of reports of applications
* @throws YarnException
* @throws IOException
*/
public abstract List<ApplicationReport> getApplications(
Set<String> applicationTypes,
EnumSet<YarnApplicationState> applicationStates) throws YarnException,
IOException;
/**
* <p>
* Get metrics ({@link YarnClusterMetrics}) about the cluster.

View File

@ -211,15 +211,29 @@ public class YarnClientImpl extends YarnClient {
@Override
public List<ApplicationReport> getApplications() throws YarnException,
IOException {
return getApplications(null);
return getApplications(null, null);
}
@Override
public List<ApplicationReport> getApplications(Set<String> applicationTypes)
throws YarnException,
IOException {
return getApplications(applicationTypes, null);
}
@Override
public List<ApplicationReport> getApplications(
Set<String> applicationTypes) throws YarnException, IOException {
EnumSet<YarnApplicationState> applicationStates)
throws YarnException, IOException {
return getApplications(null, applicationStates);
}
@Override
public List<ApplicationReport> getApplications(Set<String> applicationTypes,
EnumSet<YarnApplicationState> applicationStates) throws YarnException,
IOException {
GetApplicationsRequest request =
applicationTypes == null ? GetApplicationsRequest.newInstance()
: GetApplicationsRequest.newInstance(applicationTypes);
GetApplicationsRequest.newInstance(applicationTypes, applicationStates);
GetApplicationsResponse response = rmClient.getApplications(request);
return response.getApplicationList();
}

View File

@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.text.DecimalFormat;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -49,6 +50,10 @@ public class ApplicationCLI extends YarnCLI {
System.getProperty("line.separator");
private static final String APP_TYPE_CMD = "appTypes";
private static final String APP_STATE_CMD ="appStates";
private static final String ALLSTATES_OPTION = "ALL";
private boolean allAppStates;
public static void main(String[] args) throws Exception {
ApplicationCLI cli = new ApplicationCLI();
@ -66,7 +71,8 @@ public class ApplicationCLI extends YarnCLI {
opts.addOption(STATUS_CMD, true, "Prints the status of the application.");
opts.addOption(LIST_CMD, false, "List applications from the RM. " +
"Supports optional use of --appTypes to filter applications " +
"based on application type.");
"based on application type, " +
"and --appStates to filter applications based on application state");
opts.addOption(KILL_CMD, true, "Kills the application.");
opts.addOption(HELP_CMD, false, "Displays help for all commands.");
Option appTypeOpt = new Option(APP_TYPE_CMD, true,
@ -75,6 +81,16 @@ public class ApplicationCLI extends YarnCLI {
appTypeOpt.setArgs(Option.UNLIMITED_VALUES);
appTypeOpt.setArgName("Comma-separated list of application types");
opts.addOption(appTypeOpt);
Option appStateOpt =
new Option(
APP_STATE_CMD,
true,
"Works with --list to filter applications based on their state. "
+ getAllValidApplicationStates());
appStateOpt.setValueSeparator(',');
appStateOpt.setArgs(Option.UNLIMITED_VALUES);
appStateOpt.setArgName("Comma-separated list of application states");
opts.addOption(appStateOpt);
opts.getOption(KILL_CMD).setArgName("Application ID");
opts.getOption(STATUS_CMD).setArgName("Application ID");
CommandLine cliParser = new GnuParser().parse(opts, args);
@ -87,18 +103,44 @@ public class ApplicationCLI extends YarnCLI {
}
printApplicationReport(cliParser.getOptionValue(STATUS_CMD));
} else if (cliParser.hasOption(LIST_CMD)) {
allAppStates = false;
Set<String> appTypes = new HashSet<String>();
if(cliParser.hasOption(APP_TYPE_CMD)) {
String[] types = cliParser.getOptionValues(APP_TYPE_CMD);
if (types != null) {
for (String type : types) {
if (!type.trim().isEmpty()) {
appTypes.add(type.trim());
appTypes.add(type.toUpperCase().trim());
}
}
}
}
listApplications(appTypes);
EnumSet<YarnApplicationState> appStates =
EnumSet.noneOf(YarnApplicationState.class);
if (cliParser.hasOption(APP_STATE_CMD)) {
String[] states = cliParser.getOptionValues(APP_STATE_CMD);
if (states != null) {
for (String state : states) {
if (!state.trim().isEmpty()) {
if (state.trim().equalsIgnoreCase(ALLSTATES_OPTION)) {
allAppStates = true;
break;
}
try {
appStates.add(YarnApplicationState.valueOf(state.toUpperCase()
.trim()));
} catch (IllegalArgumentException ex) {
sysout.println("The application state " + state
+ " is invalid.");
sysout.println(getAllValidApplicationStates());
return exitCode;
}
}
}
}
}
listApplications(appTypes, appStates);
} else if (cliParser.hasOption(KILL_CMD)) {
if (args.length != 2) {
printUsage(opts);
@ -127,19 +169,33 @@ public class ApplicationCLI extends YarnCLI {
/**
* Lists the applications matching the given application Types
* present in the Resource Manager
* And application States present in the Resource Manager
*
* @param appTypes
* @param appStates
* @throws YarnException
* @throws IOException
*/
private void listApplications(Set<String> appTypes)
throws YarnException, IOException {
private void listApplications(Set<String> appTypes,
EnumSet<YarnApplicationState> appStates) throws YarnException,
IOException {
PrintWriter writer = new PrintWriter(sysout);
List<ApplicationReport> appsReport =
client.getApplications(appTypes);
if (allAppStates) {
for(YarnApplicationState appState : YarnApplicationState.values()) {
appStates.add(appState);
}
} else {
if (appStates.isEmpty()) {
appStates.add(YarnApplicationState.RUNNING);
}
}
writer.println("Total Applications:" + appsReport.size());
List<ApplicationReport> appsReport =
client.getApplications(appTypes, appStates);
writer
.println("Total number of applications (application-types: " + appTypes
+ " and states: " + appStates + ")" + ":" + appsReport.size());
writer.printf(APPLICATIONS_PATTERN, "Application-Id",
"Application-Name","Application-Type", "User", "Queue",
"State", "Final-State","Progress", "Tracking-URL");
@ -229,4 +285,16 @@ public class ApplicationCLI extends YarnCLI {
sysout.println(baos.toString("UTF-8"));
}
private String getAllValidApplicationStates() {
StringBuilder sb = new StringBuilder();
sb.append("The valid application state can be"
+ " one of the following: ");
sb.append(ALLSTATES_OPTION + ",");
for (YarnApplicationState appState : YarnApplicationState
.values()) {
sb.append(appState+",");
}
String output = sb.toString();
return output.substring(0, output.length()-1);
}
}

View File

@ -21,11 +21,14 @@ import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.lang.time.DateFormatUtils;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@ -40,9 +43,12 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
@Private
@Unstable
public class NodeCLI extends YarnCLI {
private static final String NODES_PATTERN = "%16s\t%10s\t%17s\t%18s" +
private static final String NODES_PATTERN = "%16s\t%15s\t%17s\t%18s" +
System.getProperty("line.separator");
private static final String NODE_STATE_CMD = "states";
private static final String NODE_ALL = "all";
public static void main(String[] args) throws Exception {
NodeCLI cli = new NodeCLI();
cli.setSysOutPrintStream(System.out);
@ -57,7 +63,18 @@ public class NodeCLI extends YarnCLI {
Options opts = new Options();
opts.addOption(STATUS_CMD, true, "Prints the status report of the node.");
opts.addOption(LIST_CMD, false, "Lists all the nodes in the RUNNING state.");
opts.addOption(LIST_CMD, false, "List all running nodes. " +
"Supports optional use of --states to filter nodes " +
"based on node state, all --all to list all nodes.");
Option nodeStateOpt = new Option(NODE_STATE_CMD, true,
"Works with -list to filter nodes based on their states.");
nodeStateOpt.setValueSeparator(',');
nodeStateOpt.setArgs(Option.UNLIMITED_VALUES);
nodeStateOpt.setArgName("Comma-separated list of node states");
opts.addOption(nodeStateOpt);
Option allOpt = new Option(NODE_ALL, false,
"Works with -list to list all nodes.");
opts.addOption(allOpt);
CommandLine cliParser = new GnuParser().parse(opts, args);
int exitCode = -1;
@ -68,7 +85,24 @@ public class NodeCLI extends YarnCLI {
}
printNodeStatus(cliParser.getOptionValue("status"));
} else if (cliParser.hasOption("list")) {
listClusterNodes();
Set<NodeState> nodeStates = new HashSet<NodeState>();
if (cliParser.hasOption(NODE_ALL)) {
for (NodeState state : NodeState.values()) {
nodeStates.add(state);
}
} else if (cliParser.hasOption(NODE_STATE_CMD)) {
String[] types = cliParser.getOptionValues(NODE_STATE_CMD);
if (types != null) {
for (String type : types) {
if (!type.trim().isEmpty()) {
nodeStates.add(NodeState.valueOf(type.trim().toUpperCase()));
}
}
}
} else {
nodeStates.add(NodeState.RUNNING);
}
listClusterNodes(nodeStates);
} else {
syserr.println("Invalid Command Usage : ");
printUsage(opts);
@ -86,14 +120,17 @@ public class NodeCLI extends YarnCLI {
}
/**
* Lists all the nodes present in the cluster
* Lists the nodes matching the given node states
*
* @param nodeStates
* @throws YarnException
* @throws IOException
*/
private void listClusterNodes() throws YarnException, IOException {
private void listClusterNodes(Set<NodeState> nodeStates)
throws YarnException, IOException {
PrintWriter writer = new PrintWriter(sysout);
List<NodeReport> nodesReport = client.getNodeReports(NodeState.RUNNING);
List<NodeReport> nodesReport = client.getNodeReports(
nodeStates.toArray(new NodeState[0]));
writer.println("Total Nodes:" + nodesReport.size());
writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address",
"Running-Containers");

View File

@ -27,6 +27,7 @@ import static org.mockito.Mockito.when;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@ -55,10 +56,12 @@ import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
@ -163,11 +166,15 @@ public class TestYarnClient {
List<ApplicationReport> expectedReports = ((MockYarnClient)client).getReports();
List<ApplicationReport> reports = client.getApplications();
Assert.assertEquals(reports, expectedReports);
Set<String> appTypes = new HashSet<String>();
appTypes.add("YARN");
appTypes.add("NON-YARN");
List<ApplicationReport> reports = client.getApplications(appTypes);
reports =
client.getApplications(appTypes, null);
Assert.assertEquals(reports.size(), 2);
Assert
.assertTrue((reports.get(0).getApplicationType().equals("YARN") && reports
@ -178,8 +185,28 @@ public class TestYarnClient {
Assert.assertTrue(expectedReports.contains(report));
}
reports = client.getApplications();
Assert.assertEquals(reports, expectedReports);
EnumSet<YarnApplicationState> appStates =
EnumSet.noneOf(YarnApplicationState.class);
appStates.add(YarnApplicationState.FINISHED);
appStates.add(YarnApplicationState.FAILED);
reports = client.getApplications(null, appStates);
Assert.assertEquals(reports.size(), 2);
Assert
.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN") && reports
.get(1).getApplicationType().equals("NON-MAPREDUCE"))
|| (reports.get(1).getApplicationType().equals("NON-YARN") && reports
.get(0).getApplicationType().equals("NON-MAPREDUCE")));
for (ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
reports = client.getApplications(appTypes, appStates);
Assert.assertEquals(reports.size(), 1);
Assert
.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN")));
for (ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
client.stop();
}
@ -187,6 +214,8 @@ public class TestYarnClient {
private static class MockYarnClient extends YarnClientImpl {
private ApplicationReport mockReport;
private List<ApplicationReport> reports;
GetApplicationsResponse mockAppResponse =
mock(GetApplicationsResponse.class);
public MockYarnClient() {
super();
@ -202,6 +231,8 @@ public class TestYarnClient {
try{
when(rmClient.getApplicationReport(any(
GetApplicationReportRequest.class))).thenReturn(mockResponse);
when(rmClient.getApplications(any(GetApplicationsRequest.class)))
.thenReturn(mockAppResponse);
} catch (YarnException e) {
Assert.fail("Exception is not expected.");
} catch (IOException e) {
@ -212,16 +243,11 @@ public class TestYarnClient {
@Override
public List<ApplicationReport> getApplications(
Set<String> applicationTypes) throws YarnException, IOException {
GetApplicationsRequest request =
applicationTypes == null ? GetApplicationsRequest.newInstance()
: GetApplicationsRequest.newInstance(applicationTypes);
when(rmClient.getApplications(request))
.thenReturn(
getApplicationReports(reports,
request));
GetApplicationsResponse response = rmClient.getApplications(request);
return response.getApplicationList();
Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates)
throws YarnException, IOException {
when(mockAppResponse.getApplicationList()).thenReturn(
getApplicationReports(reports, applicationTypes, applicationStates));
return super.getApplications(applicationTypes, applicationStates);
}
@Override
@ -243,7 +269,7 @@ public class TestYarnClient {
ApplicationReport newApplicationReport = ApplicationReport.newInstance(
applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
"user", "queue", "appname", "host", 124, null,
YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
List<ApplicationReport> applicationReports =
new ArrayList<ApplicationReport>();
@ -262,31 +288,44 @@ public class TestYarnClient {
ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3),
"user3", "queue3", "appname3", "host3", 126, null,
YarnApplicationState.FINISHED, "diagnostics3", "url3", 3, 3,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE",
YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE",
null);
applicationReports.add(newApplicationReport3);
ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8);
ApplicationReport newApplicationReport4 =
ApplicationReport.newInstance(
applicationId4,
ApplicationAttemptId.newInstance(applicationId4, 4),
"user4", "queue4", "appname4", "host4", 127, null,
YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
"NON-MAPREDUCE", null);
applicationReports.add(newApplicationReport4);
return applicationReports;
}
private GetApplicationsResponse getApplicationReports(
private List<ApplicationReport> getApplicationReports(
List<ApplicationReport> applicationReports,
GetApplicationsRequest request) {
Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) {
List<ApplicationReport> appReports = new ArrayList<ApplicationReport>();
Set<String> appTypes = request.getApplicationTypes();
boolean bypassFilter = appTypes.isEmpty();
for (ApplicationReport appReport : applicationReports) {
if (!(bypassFilter || appTypes.contains(
appReport.getApplicationType()))) {
continue;
if (applicationTypes != null && !applicationTypes.isEmpty()) {
if (!applicationTypes.contains(appReport.getApplicationType())) {
continue;
}
}
if (applicationStates != null && !applicationStates.isEmpty()) {
if (!applicationStates.contains(appReport.getYarnApplicationState())) {
continue;
}
}
appReports.add(appReport);
}
GetApplicationsResponse response =
GetApplicationsResponse.newInstance(appReports);
return response;
return appReports;
}
}

View File

@ -33,6 +33,7 @@ import java.io.PrintStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Date;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -134,7 +135,7 @@ public class TestYarnCLI {
ApplicationReport newApplicationReport = ApplicationReport.newInstance(
applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
"user", "queue", "appname", "host", 124, null,
YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>();
applicationReports.add(newApplicationReport);
@ -152,23 +153,39 @@ public class TestYarnCLI {
ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3),
"user3", "queue3", "appname3", "host3", 126, null,
YarnApplicationState.FINISHED, "diagnostics3", "url3", 3, 3,
YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE",
null);
applicationReports.add(newApplicationReport3);
Set<String> appType1 = new HashSet<String>();
appType1.add("YARN");
ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8);
ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(
applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4),
"user4", "queue4", "appname4", "host4", 127, null,
YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, "NON-MAPREDUCE",
null);
applicationReports.add(newApplicationReport4);
when(client.getApplications(appType1)).thenReturn(
getApplicationReports(applicationReports, appType1));
int result = cli.run(new String[] { "-list", "-appTypes", "YARN" });
// Test command yarn application -list
// if the set appStates is empty, RUNNING state will be automatically added
// to the appStates list
// the output of yarn application -list should be the same as
// equals to yarn application -list --appStates RUNNING
Set<String> appType1 = new HashSet<String>();
EnumSet<YarnApplicationState> appState1 =
EnumSet.noneOf(YarnApplicationState.class);
appState1.add(YarnApplicationState.RUNNING);
when(client.getApplications(appType1, appState1)).thenReturn(
getApplicationReports(applicationReports, appType1, appState1, false));
int result = cli.run(new String[] { "-list" });
assertEquals(0, result);
verify(client).getApplications(appType1);
verify(client).getApplications(appType1, appState1);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintWriter pw = new PrintWriter(baos);
pw.println("Total Applications:1");
pw.println("Total number of applications (application-types: " + appType1
+ " and states: " + appState1 + ")" + ":" + 2);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
@ -176,27 +193,41 @@ public class TestYarnCLI {
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t FINISHED\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.print(" application_1234_0007\t ");
pw.print("appname3\t MAPREDUCE\t user3\t ");
pw.print("queue3\t RUNNING\t ");
pw.print("SUCCEEDED\t 73.79%");
pw.println("\t N/A");
pw.close();
String appsReportStr = baos.toString("UTF-8");
Assert.assertEquals(appsReportStr, sysOutStream.toString());
verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt());
//Test command yarn application -list --appTypes apptype1,apptype2
//the output should be the same as
//yarn application -list --appTypes apptyp1, apptype2 --appStates RUNNING
sysOutStream.reset();
Set<String> appType2 = new HashSet<String>();
appType2.add("YARN");
appType2.add("FOO-YARN");
when(client.getApplications(appType2)).thenReturn(
getApplicationReports(applicationReports, appType2));
cli.run(new String[] { "-list", "-appTypes", "YARN , ,, ,FOO-YARN",
",,,,, YARN,," });
appType2.add("NON-YARN");
EnumSet<YarnApplicationState> appState2 =
EnumSet.noneOf(YarnApplicationState.class);
appState2.add(YarnApplicationState.RUNNING);
when(client.getApplications(appType2, appState2)).thenReturn(
getApplicationReports(applicationReports, appType2, appState2, false));
result =
cli.run(new String[] { "-list", "-appTypes", "YARN, ,, NON-YARN",
" ,, ,," });
assertEquals(0, result);
verify(client).getApplications(appType2);
verify(client).getApplications(appType2, appState2);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Applications:1");
pw.println("Total number of applications (application-types: " + appType2
+ " and states: " + appState2 + ")" + ":" + 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
@ -204,7 +235,7 @@ public class TestYarnCLI {
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t FINISHED\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.close();
@ -212,29 +243,74 @@ public class TestYarnCLI {
Assert.assertEquals(appsReportStr, sysOutStream.toString());
verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt());
//Test command yarn application -list --appStates appState1,appState2
sysOutStream.reset();
Set<String> appType3 = new HashSet<String>();
appType3.add("YARN");
appType3.add("NON-YARN");
when(client.getApplications(appType3)).thenReturn(
getApplicationReports(applicationReports, appType3));
result = cli.run(new String[] { "-list", "-appTypes", "YARN,NON-YARN" });
EnumSet<YarnApplicationState> appState3 =
EnumSet.noneOf(YarnApplicationState.class);
appState3.add(YarnApplicationState.FINISHED);
appState3.add(YarnApplicationState.FAILED);
when(client.getApplications(appType3, appState3)).thenReturn(
getApplicationReports(applicationReports, appType3, appState3, false));
result =
cli.run(new String[] { "-list", "--appStates", "FINISHED ,, , FAILED",
",,FINISHED" });
assertEquals(0, result);
verify(client).getApplications(appType3);
verify(client).getApplications(appType3, appState3);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Applications:2");
pw.println("Total number of applications (application-types: " + appType3
+ " and states: " + appState3 + ")" + ":" + 2);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t FINISHED\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.print(" application_1234_0008\t ");
pw.print("appname4\t NON-MAPREDUCE\t user4\t ");
pw.print("queue4\t FAILED\t ");
pw.print("SUCCEEDED\t 83.79%");
pw.println("\t N/A");
pw.close();
appsReportStr = baos.toString("UTF-8");
Assert.assertEquals(appsReportStr, sysOutStream.toString());
verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt());
// Test command yarn application -list --appTypes apptype1,apptype2
// --appStates appstate1,appstate2
sysOutStream.reset();
Set<String> appType4 = new HashSet<String>();
appType4.add("YARN");
appType4.add("NON-YARN");
EnumSet<YarnApplicationState> appState4 =
EnumSet.noneOf(YarnApplicationState.class);
appState4.add(YarnApplicationState.FINISHED);
appState4.add(YarnApplicationState.FAILED);
when(client.getApplications(appType4, appState4)).thenReturn(
getApplicationReports(applicationReports, appType4, appState4, false));
result =
cli.run(new String[] { "-list", "--appTypes", "YARN,NON-YARN",
"--appStates", "FINISHED ,, , FAILED" });
assertEquals(0, result);
verify(client).getApplications(appType2, appState2);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType4
+ " and states: " + appState4 + ")" + ":" + 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
@ -243,19 +319,46 @@ public class TestYarnCLI {
pw.close();
appsReportStr = baos.toString("UTF-8");
Assert.assertEquals(appsReportStr, sysOutStream.toString());
verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt());
verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt());
//Test command yarn application -list --appStates with invalid appStates
sysOutStream.reset();
Set<String> appType4 = new HashSet<String>();
when(client.getApplications(appType4)).thenReturn(
getApplicationReports(applicationReports, appType4));
result = cli.run(new String[] { "-list" });
assertEquals(0, result);
verify(client).getApplications(appType4);
result =
cli.run(new String[] { "-list", "--appStates", "FINISHED ,, , INVALID" });
assertEquals(-1, result);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Applications:3");
pw.println("The application state INVALID is invalid.");
pw.print("The valid application state can be one of the following: ");
StringBuilder sb = new StringBuilder();
sb.append("ALL,");
for(YarnApplicationState state : YarnApplicationState.values()) {
sb.append(state+",");
}
String output = sb.toString();
pw.println(output.substring(0, output.length()-1));
pw.close();
appsReportStr = baos.toString("UTF-8");
Assert.assertEquals(appsReportStr, sysOutStream.toString());
verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt());
//Test command yarn application -list --appStates all
sysOutStream.reset();
Set<String> appType5 = new HashSet<String>();
EnumSet<YarnApplicationState> appState5 =
EnumSet.noneOf(YarnApplicationState.class);
appState5.add(YarnApplicationState.FINISHED);
when(client.getApplications(appType5, appState5)).thenReturn(
getApplicationReports(applicationReports, appType5, appState5, true));
result =
cli.run(new String[] { "-list", "--appStates", "FINISHED ,, , ALL" });
assertEquals(0, result);
verify(client).getApplications(appType5, appState5);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType5
+ " and states: " + appState5 + ")" + ":" + 4);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
@ -263,7 +366,7 @@ public class TestYarnCLI {
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t FINISHED\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.print(" application_1234_0006\t ");
@ -273,27 +376,80 @@ public class TestYarnCLI {
pw.println("\t N/A");
pw.print(" application_1234_0007\t ");
pw.print("appname3\t MAPREDUCE\t user3\t ");
pw.print("queue3\t FINISHED\t ");
pw.print("queue3\t RUNNING\t ");
pw.print("SUCCEEDED\t 73.79%");
pw.println("\t N/A");
pw.print(" application_1234_0008\t ");
pw.print("appname4\t NON-MAPREDUCE\t user4\t ");
pw.print("queue4\t FAILED\t ");
pw.print("SUCCEEDED\t 83.79%");
pw.println("\t N/A");
pw.close();
appsReportStr = baos.toString("UTF-8");
Assert.assertEquals(appsReportStr, sysOutStream.toString());
verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt());
verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt());
// Test command yarn application user case insensitive
sysOutStream.reset();
Set<String> appType6 = new HashSet<String>();
appType6.add("YARN");
appType6.add("NON-YARN");
EnumSet<YarnApplicationState> appState6 =
EnumSet.noneOf(YarnApplicationState.class);
appState6.add(YarnApplicationState.FINISHED);
when(client.getApplications(appType6, appState6)).thenReturn(
getApplicationReports(applicationReports, appType6, appState6, false));
result =
cli.run(new String[] { "-list", "-appTypes", "YARN, ,, NON-YARN",
"--appStates", "finished" });
assertEquals(0, result);
verify(client).getApplications(appType6, appState6);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType6
+ " and states: " + appState6 + ")" + ":" + 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.close();
appsReportStr = baos.toString("UTF-8");
Assert.assertEquals(appsReportStr, sysOutStream.toString());
verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt());
}
private List<ApplicationReport> getApplicationReports(
List<ApplicationReport> applicationReports,
Set<String> appTypes) {
Set<String> appTypes, EnumSet<YarnApplicationState> appStates,
boolean allStates) {
List<ApplicationReport> appReports = new ArrayList<ApplicationReport>();
boolean bypassFilter = appTypes.isEmpty();
for (ApplicationReport appReport : applicationReports) {
if (!(bypassFilter || appTypes.contains(
appReport.getApplicationType()))) {
continue;
if (allStates) {
for(YarnApplicationState state : YarnApplicationState.values()) {
appStates.add(state);
}
}
for (ApplicationReport appReport : applicationReports) {
if (appTypes != null && !appTypes.isEmpty()) {
if (!appTypes.contains(appReport.getApplicationType())) {
continue;
}
}
if (appStates != null && !appStates.isEmpty()) {
if (!appStates.contains(appReport.getYarnApplicationState())) {
continue;
}
}
appReports.add(appReport);
}
return appReports;
@ -363,36 +519,239 @@ public class TestYarnCLI {
@Test
public void testListClusterNodes() throws Exception {
List<NodeReport> nodeReports = new ArrayList<NodeReport>();
nodeReports.addAll(getNodeReports(1, NodeState.NEW));
nodeReports.addAll(getNodeReports(2, NodeState.RUNNING));
nodeReports.addAll(getNodeReports(1, NodeState.UNHEALTHY));
nodeReports.addAll(getNodeReports(1, NodeState.DECOMMISSIONED));
nodeReports.addAll(getNodeReports(1, NodeState.REBOOTED));
nodeReports.addAll(getNodeReports(1, NodeState.LOST));
NodeCLI cli = new NodeCLI();
when(client.getNodeReports(NodeState.RUNNING)).thenReturn(
getNodeReports(3));
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
int result = cli.run(new String[] { "-list" });
Set<NodeState> nodeStates = new HashSet<NodeState>();
nodeStates.add(NodeState.NEW);
NodeState[] states = nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states))
.thenReturn(getNodeReports(nodeReports, nodeStates));
int result = cli.run(new String[] { "-list", "--states", "NEW" });
assertEquals(0, result);
verify(client).getNodeReports(NodeState.RUNNING);
verify(client).getNodeReports(states);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintWriter pw = new PrintWriter(baos);
pw.println("Total Nodes:3");
pw.print(" Node-Id\tNode-State\tNode-Http-Address\t");
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Running-Containers");
pw.print(" host0:0\t RUNNING\t host1:8888");
pw.println("\t 0");
pw.print(" host1:0\t RUNNING\t host1:8888");
pw.println("\t 0");
pw.print(" host2:0\t RUNNING\t host1:8888");
pw.print(" host0:0\t NEW\t host1:8888");
pw.println("\t 0");
pw.close();
String nodesReportStr = baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.RUNNING);
states = nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states))
.thenReturn(getNodeReports(nodeReports, nodeStates));
result = cli.run(new String[] { "-list", "--states", "RUNNING" });
assertEquals(0, result);
verify(client).getNodeReports(states);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Nodes:2");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Running-Containers");
pw.print(" host0:0\t RUNNING\t host1:8888");
pw.println("\t 0");
pw.print(" host1:0\t RUNNING\t host1:8888");
pw.println("\t 0");
pw.close();
nodesReportStr = baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt());
sysOutStream.reset();
result = cli.run(new String[] { "-list" });
assertEquals(0, result);
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.UNHEALTHY);
states = nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states))
.thenReturn(getNodeReports(nodeReports, nodeStates));
result = cli.run(new String[] { "-list", "--states", "UNHEALTHY" });
assertEquals(0, result);
verify(client).getNodeReports(states);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Running-Containers");
pw.print(" host0:0\t UNHEALTHY\t host1:8888");
pw.println("\t 0");
pw.close();
nodesReportStr = baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.DECOMMISSIONED);
states = nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states))
.thenReturn(getNodeReports(nodeReports, nodeStates));
result = cli.run(new String[] { "-list", "--states", "DECOMMISSIONED" });
assertEquals(0, result);
verify(client).getNodeReports(states);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Running-Containers");
pw.print(" host0:0\t DECOMMISSIONED\t host1:8888");
pw.println("\t 0");
pw.close();
nodesReportStr = baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.REBOOTED);
states = nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states))
.thenReturn(getNodeReports(nodeReports, nodeStates));
result = cli.run(new String[] { "-list", "--states", "REBOOTED" });
assertEquals(0, result);
verify(client).getNodeReports(states);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Running-Containers");
pw.print(" host0:0\t REBOOTED\t host1:8888");
pw.println("\t 0");
pw.close();
nodesReportStr = baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.LOST);
states = nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states))
.thenReturn(getNodeReports(nodeReports, nodeStates));
result = cli.run(new String[] { "-list", "--states", "LOST" });
assertEquals(0, result);
verify(client).getNodeReports(states);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Running-Containers");
pw.print(" host0:0\t LOST\t host1:8888");
pw.println("\t 0");
pw.close();
nodesReportStr = baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(7)).write(any(byte[].class), anyInt(), anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.NEW);
nodeStates.add(NodeState.RUNNING);
nodeStates.add(NodeState.LOST);
nodeStates.add(NodeState.REBOOTED);
states = nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states))
.thenReturn(getNodeReports(nodeReports, nodeStates));
result = cli.run(new String[] { "-list", "--states",
"NEW,RUNNING,LOST,REBOOTED" });
assertEquals(0, result);
verify(client).getNodeReports(states);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Nodes:5");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Running-Containers");
pw.print(" host0:0\t NEW\t host1:8888");
pw.println("\t 0");
pw.print(" host0:0\t RUNNING\t host1:8888");
pw.println("\t 0");
pw.print(" host1:0\t RUNNING\t host1:8888");
pw.println("\t 0");
pw.print(" host0:0\t REBOOTED\t host1:8888");
pw.println("\t 0");
pw.print(" host0:0\t LOST\t host1:8888");
pw.println("\t 0");
pw.close();
nodesReportStr = baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(8)).write(any(byte[].class), anyInt(), anyInt());
sysOutStream.reset();
nodeStates.clear();
for (NodeState s : NodeState.values()) {
nodeStates.add(s);
}
states = nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states))
.thenReturn(getNodeReports(nodeReports, nodeStates));
result = cli.run(new String[] { "-list", "--all" });
assertEquals(0, result);
verify(client).getNodeReports(states);
baos = new ByteArrayOutputStream();
pw = new PrintWriter(baos);
pw.println("Total Nodes:7");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Running-Containers");
pw.print(" host0:0\t NEW\t host1:8888");
pw.println("\t 0");
pw.print(" host0:0\t RUNNING\t host1:8888");
pw.println("\t 0");
pw.print(" host1:0\t RUNNING\t host1:8888");
pw.println("\t 0");
pw.print(" host0:0\t UNHEALTHY\t host1:8888");
pw.println("\t 0");
pw.print(" host0:0\t DECOMMISSIONED\t host1:8888");
pw.println("\t 0");
pw.print(" host0:0\t REBOOTED\t host1:8888");
pw.println("\t 0");
pw.print(" host0:0\t LOST\t host1:8888");
pw.println("\t 0");
pw.close();
nodesReportStr = baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr, sysOutStream.toString());
verify(sysOut, times(9)).write(any(byte[].class), anyInt(), anyInt());
}
private List<NodeReport> getNodeReports(
List<NodeReport> nodeReports,
Set<NodeState> nodeStates) {
List<NodeReport> reports = new ArrayList<NodeReport>();
for (NodeReport nodeReport : nodeReports) {
if (nodeStates.contains(nodeReport.getNodeState())) {
reports.add(nodeReport);
}
}
return reports;
}
@Test
public void testNodeStatus() throws Exception {
NodeId nodeId = NodeId.newInstance("host0", 0);
NodeCLI cli = new NodeCLI();
when(client.getNodeReports()).thenReturn(getNodeReports(3));
when(client.getNodeReports()).thenReturn(
getNodeReports(3, NodeState.RUNNING));
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
@ -424,7 +783,8 @@ public class TestYarnCLI {
public void testAbsentNodeStatus() throws Exception {
NodeId nodeId = NodeId.newInstance("Absenthost0", 0);
NodeCLI cli = new NodeCLI();
when(client.getNodeReports()).thenReturn(getNodeReports(0));
when(client.getNodeReports()).thenReturn(
getNodeReports(0, NodeState.RUNNING));
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
@ -452,12 +812,12 @@ public class TestYarnCLI {
verify(sysErr).println("Invalid Command Usage : ");
}
private List<NodeReport> getNodeReports(int noOfNodes) {
private List<NodeReport> getNodeReports(int noOfNodes, NodeState state) {
List<NodeReport> nodeReports = new ArrayList<NodeReport>();
for (int i = 0; i < noOfNodes; i++) {
NodeReport nodeReport = NodeReport.newInstance(NodeId
.newInstance("host" + i, 0), NodeState.RUNNING, "host" + 1 + ":8888",
.newInstance("host" + i, 0), state, "host" + 1 + ":8888",
"rack1", Records.newRecord(Resource.class), Records
.newRecord(Resource.class), 0, "", 0);
nodeReports.add(nodeReport);

View File

@ -18,13 +18,18 @@
package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProtoOrBuilder;
@ -38,6 +43,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
boolean viaProto = false;
Set<String> applicationTypes = null;
EnumSet<YarnApplicationState> applicationStates = null;
public GetApplicationsRequestPBImpl() {
builder = GetApplicationsRequestProto.newBuilder();
@ -67,6 +73,40 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
if (this.applicationTypes != null) {
addLocalApplicationTypesToProto();
}
if (this.applicationStates != null) {
maybeInitBuilder();
builder.clearApplicationStates();
Iterable<YarnApplicationStateProto> iterable =
new Iterable<YarnApplicationStateProto>() {
@Override
public Iterator<YarnApplicationStateProto> iterator() {
return new Iterator<YarnApplicationStateProto>() {
Iterator<YarnApplicationState> iter = applicationStates
.iterator();
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public YarnApplicationStateProto next() {
return ProtoUtils.convertToProtoFormat(iter.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
builder.addAllApplicationStates(iterable);
}
}
private void addLocalApplicationTypesToProto() {
@ -94,6 +134,20 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
this.applicationTypes.addAll(appTypeList);
}
private void initApplicationStates() {
if (this.applicationStates != null) {
return;
}
GetApplicationsRequestProtoOrBuilder p = viaProto ? proto : builder;
List<YarnApplicationStateProto> appStatesList =
p.getApplicationStatesList();
this.applicationStates = EnumSet.noneOf(YarnApplicationState.class);
for (YarnApplicationStateProto c : appStatesList) {
this.applicationStates.add(ProtoUtils.convertFromProtoFormat(c));
}
}
@Override
public Set<String> getApplicationTypes() {
initApplicationTypes();
@ -108,6 +162,21 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
this.applicationTypes = applicationTypes;
}
@Override
public EnumSet<YarnApplicationState> getApplicationStates() {
initApplicationStates();
return this.applicationStates;
}
@Override
public void setApplicationStates(EnumSet<YarnApplicationState> applicationStates) {
maybeInitBuilder();
if (applicationStates == null) {
builder.clearApplicationStates();
}
this.applicationStates = applicationStates;
}
@Override
public int hashCode() {
return getProto().hashCode();

View File

@ -33,6 +33,8 @@ import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.AdminACLsManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -65,7 +67,6 @@ import com.google.inject.servlet.GuiceFilter;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public class WebApps {
static final Logger LOG = LoggerFactory.getLogger(WebApps.class);
public static class Builder<T> {
static class ServletStruct {
public Class<? extends HttpServlet> clazz;
@ -82,6 +83,8 @@ public class WebApps {
boolean findPort = false;
Configuration conf;
boolean devMode = false;
private String spnegoPrincipalKey;
private String spnegoKeytabKey;
private final HashSet<ServletStruct> servlets = new HashSet<ServletStruct>();
private final HashMap<String, Object> attributes = new HashMap<String, Object>();
@ -135,6 +138,16 @@ public class WebApps {
this.conf = conf;
return this;
}
public Builder<T> withHttpSpnegoPrincipalKey(String spnegoPrincipalKey) {
this.spnegoPrincipalKey = spnegoPrincipalKey;
return this;
}
public Builder<T> withHttpSpnegoKeytabKey(String spnegoKeytabKey) {
this.spnegoKeytabKey = spnegoKeytabKey;
return this;
}
public Builder<T> inDevMode() {
devMode = true;
@ -197,8 +210,32 @@ public class WebApps {
}
}
HttpServer server =
new HttpServer(name, bindAddress, port, findPort, conf,
new AdminACLsManager(conf).getAdminAcl(), null, webapp.getServePathSpecs());
new HttpServer(name, bindAddress, port, findPort, conf,
new AdminACLsManager(conf).getAdminAcl(), null,
webapp.getServePathSpecs()) {
{
if (UserGroupInformation.isSecurityEnabled()) {
boolean initSpnego = true;
if (spnegoPrincipalKey == null
|| conf.get(spnegoPrincipalKey, "").isEmpty()) {
LOG.warn("Principal for spnego filter is not set");
initSpnego = false;
}
if (spnegoKeytabKey == null
|| conf.get(spnegoKeytabKey, "").isEmpty()) {
LOG.warn("Keytab for spnego filter is not set");
initSpnego = false;
}
if (initSpnego) {
LOG.info("Initializing spnego filter with principal key : "
+ spnegoPrincipalKey + " keytab key : "
+ spnegoKeytabKey);
initSpnego(conf, spnegoPrincipalKey, spnegoKeytabKey);
}
}
}
};
for(ServletStruct struct: servlets) {
server.addServlet(struct.name, struct.spec, struct.clazz);
}

View File

@ -59,8 +59,15 @@ public class WebServer extends AbstractService {
LOG.info("Instantiating NMWebApp at " + bindAddress);
try {
this.webApp =
WebApps.$for("node", Context.class, this.nmContext, "ws")
.at(bindAddress).with(getConfig()).start(this.nmWebApp);
WebApps
.$for("node", Context.class, this.nmContext, "ws")
.at(bindAddress)
.with(getConfig())
.withHttpSpnegoPrincipalKey(
YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY)
.withHttpSpnegoKeytabKey(
YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
.start(this.nmWebApp);
this.port = this.webApp.httpServer().getPort();
} catch (Exception e) {
String msg = "NMWebapps failed to start.";

View File

@ -73,10 +73,12 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.RPCUtil;
@ -86,6 +88,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstant
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
@ -394,7 +397,6 @@ public class ClientRMService extends AbstractService implements
@Override
public GetApplicationsResponse getApplications(
GetApplicationsRequest request) throws YarnException {
UserGroupInformation callerUGI;
try {
callerUGI = UserGroupInformation.getCurrentUser();
@ -404,12 +406,22 @@ public class ClientRMService extends AbstractService implements
}
Set<String> applicationTypes = request.getApplicationTypes();
boolean bypassFilter = applicationTypes.isEmpty();
EnumSet<YarnApplicationState> applicationStates =
request.getApplicationStates();
List<ApplicationReport> reports = new ArrayList<ApplicationReport>();
for (RMApp application : this.rmContext.getRMApps().values()) {
if (!(bypassFilter || applicationTypes.contains(application
.getApplicationType()))) {
continue;
if (applicationTypes != null && !applicationTypes.isEmpty()) {
if (!applicationTypes.contains(application.getApplicationType())) {
continue;
}
}
if (applicationStates != null && !applicationStates.isEmpty()) {
if (!applicationStates.contains(RMServerUtils
.createApplicationState(application.getState()))) {
continue;
}
}
boolean allowAccess = checkAccess(callerUGI, application.getUser(),
ApplicationAccessType.VIEW_APP, application.getApplicationId());

View File

@ -56,7 +56,7 @@ public class RMContextImpl implements RMContext {
private AMLivelinessMonitor amFinishingMonitor;
private RMStateStore stateStore = null;
private ContainerAllocationExpirer containerAllocationExpirer;
private final DelegationTokenRenewer tokenRenewer;
private final DelegationTokenRenewer delegationTokenRenewer;
private final AMRMTokenSecretManager amRMTokenSecretManager;
private final RMContainerTokenSecretManager containerTokenSecretManager;
private final NMTokenSecretManagerInRM nmTokenSecretManager;
@ -67,7 +67,7 @@ public class RMContextImpl implements RMContext {
ContainerAllocationExpirer containerAllocationExpirer,
AMLivelinessMonitor amLivelinessMonitor,
AMLivelinessMonitor amFinishingMonitor,
DelegationTokenRenewer tokenRenewer,
DelegationTokenRenewer delegationTokenRenewer,
AMRMTokenSecretManager amRMTokenSecretManager,
RMContainerTokenSecretManager containerTokenSecretManager,
NMTokenSecretManagerInRM nmTokenSecretManager,
@ -77,7 +77,7 @@ public class RMContextImpl implements RMContext {
this.containerAllocationExpirer = containerAllocationExpirer;
this.amLivelinessMonitor = amLivelinessMonitor;
this.amFinishingMonitor = amFinishingMonitor;
this.tokenRenewer = tokenRenewer;
this.delegationTokenRenewer = delegationTokenRenewer;
this.amRMTokenSecretManager = amRMTokenSecretManager;
this.containerTokenSecretManager = containerTokenSecretManager;
this.nmTokenSecretManager = nmTokenSecretManager;
@ -90,13 +90,13 @@ public class RMContextImpl implements RMContext {
ContainerAllocationExpirer containerAllocationExpirer,
AMLivelinessMonitor amLivelinessMonitor,
AMLivelinessMonitor amFinishingMonitor,
DelegationTokenRenewer tokenRenewer,
DelegationTokenRenewer delegationTokenRenewer,
AMRMTokenSecretManager appTokenSecretManager,
RMContainerTokenSecretManager containerTokenSecretManager,
NMTokenSecretManagerInRM nmTokenSecretManager,
ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager) {
this(rmDispatcher, null, containerAllocationExpirer, amLivelinessMonitor,
amFinishingMonitor, tokenRenewer, appTokenSecretManager,
amFinishingMonitor, delegationTokenRenewer, appTokenSecretManager,
containerTokenSecretManager, nmTokenSecretManager,
clientToAMTokenSecretManager);
RMStateStore nullStore = new NullRMStateStore();
@ -151,7 +151,7 @@ public class RMContextImpl implements RMContext {
@Override
public DelegationTokenRenewer getDelegationTokenRenewer() {
return tokenRenewer;
return delegationTokenRenewer;
}
@Override

View File

@ -28,9 +28,12 @@ import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
@ -112,4 +115,27 @@ public class RMServerUtils {
}
}
}
public static YarnApplicationState createApplicationState(RMAppState rmAppState) {
switch(rmAppState) {
case NEW:
return YarnApplicationState.NEW;
case NEW_SAVING:
return YarnApplicationState.NEW_SAVING;
case SUBMITTED:
return YarnApplicationState.SUBMITTED;
case ACCEPTED:
return YarnApplicationState.ACCEPTED;
case RUNNING:
return YarnApplicationState.RUNNING;
case FINISHING:
case FINISHED:
return YarnApplicationState.FINISHED;
case KILLED:
return YarnApplicationState.KILLED;
case FAILED:
return YarnApplicationState.FAILED;
}
throw new YarnRuntimeException("Unknown state passed!");
}
}

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.service.Service;
@ -129,6 +130,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
protected RMAppManager rmAppManager;
protected ApplicationACLsManager applicationACLsManager;
protected RMDelegationTokenSecretManager rmDTSecretManager;
private DelegationTokenRenewer delegationTokenRenewer;
private WebApp webApp;
protected RMContext rmContext;
protected ResourceTrackerService resourceTracker;
@ -168,8 +170,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor();
addService(amFinishingMonitor);
DelegationTokenRenewer tokenRenewer = createDelegationTokenRenewer();
addService(tokenRenewer);
if (UserGroupInformation.isSecurityEnabled()) {
this.delegationTokenRenewer = createDelegationTokenRenewer();
addService(delegationTokenRenewer);
}
this.containerTokenSecretManager = createContainerTokenSecretManager(conf);
this.nmTokenSecretManager = createNMTokenSecretManager(conf);
@ -200,7 +204,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
this.rmContext =
new RMContextImpl(this.rmDispatcher, rmStore,
this.containerAllocationExpirer, amLivelinessMonitor,
amFinishingMonitor, tokenRenewer, this.amRmTokenSecretManager,
amFinishingMonitor, delegationTokenRenewer, this.amRmTokenSecretManager,
this.containerTokenSecretManager, this.nmTokenSecretManager,
this.clientToAMSecretManager);
@ -573,9 +577,16 @@ public class ResourceManager extends CompositeService implements Recoverable {
protected void startWepApp() {
Builder<ApplicationMasterService> builder =
WebApps.$for("cluster", ApplicationMasterService.class, masterService, "ws").at(
this.conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS));
WebApps
.$for("cluster", ApplicationMasterService.class, masterService,
"ws")
.with(conf)
.withHttpSpnegoPrincipalKey(
YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY)
.withHttpSpnegoKeytabKey(
YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
.at(this.conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS,
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS));
String proxyHostAndPort = YarnConfiguration.getProxyHostAndPort(conf);
if(YarnConfiguration.getRMWebAppHostAndPort(conf).
equals(proxyHostAndPort)) {
@ -602,6 +613,13 @@ public class ResourceManager extends CompositeService implements Recoverable {
this.containerTokenSecretManager.start();
this.nmTokenSecretManager.start();
// Explicitly start DTRenewer too in secure mode before kicking recovery as
// tokens will start getting added for renewal as part of the recovery
// process itself.
if (UserGroupInformation.isSecurityEnabled()) {
this.delegationTokenRenewer.start();
}
RMStateStore rmStore = rmContext.getStateStore();
// The state store needs to start irrespective of recoveryEnabled as apps
// need events to move to further states.

View File

@ -44,7 +44,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
@ -55,6 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
@ -378,29 +378,6 @@ public class RMAppImpl implements RMApp, Recoverable {
}
}
private YarnApplicationState createApplicationState(RMAppState rmAppState) {
switch(rmAppState) {
case NEW:
return YarnApplicationState.NEW;
case NEW_SAVING:
return YarnApplicationState.NEW_SAVING;
case SUBMITTED:
return YarnApplicationState.SUBMITTED;
case ACCEPTED:
return YarnApplicationState.ACCEPTED;
case RUNNING:
return YarnApplicationState.RUNNING;
case FINISHING:
case FINISHED:
return YarnApplicationState.FINISHED;
case KILLED:
return YarnApplicationState.KILLED;
case FAILED:
return YarnApplicationState.FAILED;
}
throw new YarnRuntimeException("Unknown state passed!");
}
private FinalApplicationStatus createFinalApplicationStatus(RMAppState state) {
switch(state) {
case NEW:
@ -500,7 +477,7 @@ public class RMAppImpl implements RMApp, Recoverable {
return BuilderUtils.newApplicationReport(this.applicationId,
currentApplicationAttemptId, this.user, this.queue,
this.name, host, rpcPort, clientToAMToken,
createApplicationState(this.stateMachine.getCurrentState()), diags,
RMServerUtils.createApplicationState(this.stateMachine.getCurrentState()), diags,
trackingUrl, this.startTime, this.finishTime, finishState,
appUsageReport, origTrackingUrl, progress, this.applicationType,
amrmToken);

View File

@ -35,6 +35,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
@ -601,9 +602,13 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
rmNode.context.getDispatcher().getEventHandler().handle(
new NodeUpdateSchedulerEvent(rmNode));
}
rmNode.context.getDelegationTokenRenewer().updateKeepAliveApplications(
// Update DTRenewer in secure mode to keep these apps alive. Today this is
// needed for log-aggregation to finish long after the apps are gone.
if (UserGroupInformation.isSecurityEnabled()) {
rmNode.context.getDelegationTokenRenewer().updateKeepAliveApplications(
statusEvent.getKeepAliveAppIds());
}
return NodeState.RUNNING;
}

View File

@ -281,7 +281,7 @@ public class AppSchedulingInfo {
// Update future requirements
nodeLocalRequest.setNumContainers(nodeLocalRequest.getNumContainers() - 1);
if (nodeLocalRequest.getNumContainers() == 0) {
this.requests.get(priority).remove(node.getHostName());
this.requests.get(priority).remove(node.getNodeName());
}
ResourceRequest rackLocalRequest = requests.get(priority).get(

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
* Represents a YARN Cluster Node from the viewpoint of the scheduler.
@ -30,10 +31,17 @@ import org.apache.hadoop.yarn.api.records.Resource;
public abstract class SchedulerNode {
/**
* Get hostname.
* @return hostname
* Get the name of the node for scheduling matching decisions.
* <p/>
* Typically this is the 'hostname' reported by the node, but it could be
* configured to be 'hostname:port' reported by the node via the
* {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant.
* The main usecase of this is Yarn minicluster to be able to differentiate
* node manager instances by their port number.
*
* @return name of the node for scheduling matching decisions.
*/
public abstract String getHostName();
public abstract String getNodeName();
/**
* Get rackname.

View File

@ -185,7 +185,8 @@ public class CapacityScheduler
private boolean initialized = false;
private ResourceCalculator calculator;
private boolean usePortForNodeName;
public CapacityScheduler() {}
@Override
@ -256,6 +257,7 @@ public class CapacityScheduler
this.minimumAllocation = this.conf.getMinimumAllocation();
this.maximumAllocation = this.conf.getMaximumAllocation();
this.calculator = this.conf.getResourceCalculator();
this.usePortForNodeName = this.conf.getUsePortForNodeName();
this.rmContext = rmContext;
@ -759,7 +761,8 @@ public class CapacityScheduler
}
private synchronized void addNode(RMNode nodeManager) {
this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager));
this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager,
usePortForNodeName));
Resources.addTo(clusterResource, nodeManager.getTotalCapability());
root.updateClusterResource(clusterResource);
++numNodeManagers;

View File

@ -338,6 +338,11 @@ public class CapacitySchedulerConfiguration extends Configuration {
this);
}
public boolean getUsePortForNodeName() {
return getBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
}
public void setResourceComparator(
Class<? extends ResourceCalculator> resourceCalculatorClass) {
setClass(

View File

@ -801,7 +801,7 @@ public class LeafQueue implements CSQueue {
assignContainers(Resource clusterResource, FiCaSchedulerNode node) {
if(LOG.isDebugEnabled()) {
LOG.debug("assignContainers: node=" + node.getHostName()
LOG.debug("assignContainers: node=" + node.getNodeName()
+ " #applications=" + activeApplications.size());
}
@ -1130,7 +1130,7 @@ public class LeafQueue implements CSQueue {
// Data-local
ResourceRequest nodeLocalResourceRequest =
application.getResourceRequest(priority, node.getHostName());
application.getResourceRequest(priority, node.getNodeName());
if (nodeLocalResourceRequest != null) {
assigned =
assignNodeLocalContainers(clusterResource, nodeLocalResourceRequest,
@ -1257,7 +1257,7 @@ public class LeafQueue implements CSQueue {
if (type == NodeType.NODE_LOCAL) {
// Now check if we need containers on this host...
ResourceRequest nodeLocalRequest =
application.getResourceRequest(priority, node.getHostName());
application.getResourceRequest(priority, node.getNodeName());
if (nodeLocalRequest != null) {
return nodeLocalRequest.getNumContainers() > 0;
}
@ -1302,7 +1302,7 @@ public class LeafQueue implements CSQueue {
FiCaSchedulerApp application, Priority priority,
ResourceRequest request, NodeType type, RMContainer rmContainer) {
if (LOG.isDebugEnabled()) {
LOG.debug("assignContainers: node=" + node.getHostName()
LOG.debug("assignContainers: node=" + node.getNodeName()
+ " application=" + application.getApplicationId().getId()
+ " priority=" + priority.getPriority()
+ " request=" + request + " type=" + type);

View File

@ -59,11 +59,17 @@ public class FiCaSchedulerNode extends SchedulerNode {
new HashMap<ContainerId, RMContainer>();
private final RMNode rmNode;
private final String nodeName;
public FiCaSchedulerNode(RMNode node) {
public FiCaSchedulerNode(RMNode node, boolean usePortForNodeName) {
this.rmNode = node;
this.availableResource.setMemory(node.getTotalCapability().getMemory());
this.availableResource.setVirtualCores(node.getTotalCapability().getVirtualCores());
if (usePortForNodeName) {
nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort();
} else {
nodeName = rmNode.getHostName();
}
}
public RMNode getRMNode() {
@ -79,8 +85,8 @@ public class FiCaSchedulerNode extends SchedulerNode {
}
@Override
public String getHostName() {
return this.rmNode.getHostName();
public String getNodeName() {
return nodeName;
}
@Override

View File

@ -24,9 +24,9 @@ public class FiCaSchedulerUtils {
public static boolean isBlacklisted(FiCaSchedulerApp application,
FiCaSchedulerNode node, Log LOG) {
if (application.isBlacklisted(node.getHostName())) {
if (application.isBlacklisted(node.getNodeName())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping 'host' " + node.getHostName() +
LOG.debug("Skipping 'host' " + node.getNodeName() +
" for " + application.getApplicationId() +
" since it has been blacklisted");
}

View File

@ -185,7 +185,7 @@ public class AppSchedulable extends Schedulable {
*/
private void reserve(Priority priority, FSSchedulerNode node,
Container container, boolean alreadyReserved) {
LOG.info("Making reservation: node=" + node.getHostName() +
LOG.info("Making reservation: node=" + node.getNodeName() +
" app_id=" + app.getApplicationId());
if (!alreadyReserved) {
getMetrics().reserveResource(app.getUser(), container.getResource());
@ -309,7 +309,7 @@ public class AppSchedulable extends Schedulable {
ResourceRequest rackLocalRequest = app.getResourceRequest(priority,
node.getRackName());
ResourceRequest localRequest = app.getResourceRequest(priority,
node.getHostName());
node.getNodeName());
if (localRequest != null && !localRequest.getRelaxLocality()) {
LOG.warn("Relax locality off is not supported on local request: "
@ -369,7 +369,7 @@ public class AppSchedulable extends Schedulable {
public boolean hasContainerForNode(Priority prio, FSSchedulerNode node) {
ResourceRequest anyRequest = app.getResourceRequest(prio, ResourceRequest.ANY);
ResourceRequest rackRequest = app.getResourceRequest(prio, node.getRackName());
ResourceRequest nodeRequest = app.getResourceRequest(prio, node.getHostName());
ResourceRequest nodeRequest = app.getResourceRequest(prio, node.getNodeName());
return
// There must be outstanding requests at the given priority:

View File

@ -63,10 +63,16 @@ public class FSSchedulerNode extends SchedulerNode {
new HashMap<ContainerId, RMContainer>();
private final RMNode rmNode;
private final String nodeName;
public FSSchedulerNode(RMNode node) {
public FSSchedulerNode(RMNode node, boolean usePortForNodeName) {
this.rmNode = node;
this.availableResource = Resources.clone(node.getTotalCapability());
if (usePortForNodeName) {
nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort();
} else {
nodeName = rmNode.getHostName();
}
}
public RMNode getRMNode() {
@ -82,8 +88,8 @@ public class FSSchedulerNode extends SchedulerNode {
}
@Override
public String getHostName() {
return rmNode.getHostName();
public String getNodeName() {
return nodeName;
}
@Override

View File

@ -35,7 +35,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@ -122,6 +121,7 @@ public class FairScheduler implements ResourceScheduler {
private Resource incrAllocation;
private QueueManager queueMgr;
private Clock clock;
private boolean usePortForNodeName;
private static final Log LOG = LogFactory.getLog(FairScheduler.class);
@ -751,7 +751,7 @@ public class FairScheduler implements ResourceScheduler {
}
private synchronized void addNode(RMNode node) {
nodes.put(node.getNodeID(), new FSSchedulerNode(node));
nodes.put(node.getNodeID(), new FSSchedulerNode(node, usePortForNodeName));
Resources.addTo(clusterCapacity, node.getTotalCapability());
updateRootQueueMetrics();
@ -1065,7 +1065,8 @@ public class FairScheduler implements ResourceScheduler {
sizeBasedWeight = this.conf.getSizeBasedWeight();
preemptionInterval = this.conf.getPreemptionInterval();
waitTimeBeforeKill = this.conf.getWaitTimeBeforeKill();
usePortForNodeName = this.conf.getUsePortForNodeName();
if (!initialized) {
rootMetrics = FSQueueMetrics.forQueue("root", null, true, conf);
this.rmContext = rmContext;

View File

@ -166,7 +166,12 @@ public class FairSchedulerConfiguration extends Configuration {
public int getWaitTimeBeforeKill() {
return getInt(WAIT_TIME_BEFORE_KILL, DEFAULT_WAIT_TIME_BEFORE_KILL);
}
public boolean getUsePortForNodeName() {
return getBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
}
/**
* Parses a resource config value of a form like "1024", "1024 mb",
* or "1024 mb, 3 vcores". If no units are given, megabytes are assumed.

View File

@ -111,6 +111,7 @@ public class FifoScheduler implements ResourceScheduler, Configurable {
private boolean initialized;
private Resource minimumAllocation;
private Resource maximumAllocation;
private boolean usePortForNodeName;
private Map<ApplicationAttemptId, FiCaSchedulerApp> applications
= new TreeMap<ApplicationAttemptId, FiCaSchedulerApp>();
@ -233,6 +234,9 @@ public class FifoScheduler implements ResourceScheduler, Configurable {
Resources.createResource(conf.getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB));
this.usePortForNodeName = conf.getBoolean(
YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
this.metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false,
conf);
this.activeUsersManager = new ActiveUsersManager(metrics);
@ -490,7 +494,7 @@ public class FifoScheduler implements ResourceScheduler, Configurable {
FiCaSchedulerApp application, Priority priority) {
int assignedContainers = 0;
ResourceRequest request =
application.getResourceRequest(priority, node.getHostName());
application.getResourceRequest(priority, node.getNodeName());
if (request != null) {
// Don't allocate on this node if we don't need containers on this rack
ResourceRequest rackRequest =
@ -801,7 +805,8 @@ public class FifoScheduler implements ResourceScheduler, Configurable {
}
private synchronized void addNode(RMNode nodeManager) {
this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager));
this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager,
usePortForNodeName));
Resources.addTo(clusterResource, nodeManager.getTotalCapability());
}

View File

@ -200,15 +200,14 @@ public class MockNodes {
};
private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) {
return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++, null);
return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++, null, 123);
}
private static RMNode buildRMNode(int rack, final Resource perNode,
NodeState state, String httpAddr, int hostnum, String hostName) {
NodeState state, String httpAddr, int hostnum, String hostName, int port) {
final String rackName = "rack"+ rack;
final int nid = hostnum;
final String nodeAddr = hostName + ":" + nid;
final int port = 123;
if (hostName == null) {
hostName = "host"+ nid;
}
@ -230,12 +229,17 @@ public class MockNodes {
}
public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) {
return buildRMNode(rack, perNode, null, "localhost:0", hostnum, null);
return buildRMNode(rack, perNode, null, "localhost:0", hostnum, null, 123);
}
public static RMNode newNodeInfo(int rack, final Resource perNode,
int hostnum, String hostName) {
return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName);
return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName, 123);
}
public static RMNode newNodeInfo(int rack, final Resource perNode,
int hostnum, String hostName, int port) {
return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName, port);
}
}

View File

@ -101,7 +101,7 @@ public class NodeManager implements ContainerManagementProtocol {
request.setNodeId(this.nodeId);
resourceTrackerService.registerNodeManager(request);
this.schedulerNode = new FiCaSchedulerNode(rmContext.getRMNodes().get(
this.nodeId));
this.nodeId), false);
// Sanity check
Assert.assertEquals(capability.getMemory(),

View File

@ -18,10 +18,10 @@
package org.apache.hadoop.yarn.server.resourcemanager;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
@ -35,6 +35,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.DelegationKey;
@ -63,7 +64,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.log4j.Level;
@ -77,8 +77,11 @@ public class TestRMRestart {
private YarnConfiguration conf;
// Fake rmAddr for token-renewal
private static InetSocketAddress rmAddr;
@Before
public void setup() {
public void setup() throws UnknownHostException {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
ExitUtil.disableSystemExit();
@ -86,6 +89,8 @@ public class TestRMRestart {
UserGroupInformation.setConfiguration(conf);
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
rmAddr = new InetSocketAddress(InetAddress.getLocalHost(), 123);
}
@Test (timeout=180000)
@ -446,6 +451,7 @@ public class TestRMRestart {
Token<RMDelegationTokenIdentifier> token1 =
new Token<RMDelegationTokenIdentifier>(dtId1,
rm1.getRMDTSecretManager());
SecurityUtil.setTokenService(token1, rmAddr);
ts.addToken(userText1, token1);
tokenSet.add(token1);
@ -456,6 +462,7 @@ public class TestRMRestart {
Token<RMDelegationTokenIdentifier> token2 =
new Token<RMDelegationTokenIdentifier>(dtId2,
rm1.getRMDTSecretManager());
SecurityUtil.setTokenService(token2, rmAddr);
ts.addToken(userText2, token2);
tokenSet.add(token2);
@ -575,6 +582,7 @@ public class TestRMRestart {
@Test
public void testRMDelegationTokenRestoredOnRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
RMState rmState = memStore.getState();
@ -587,20 +595,21 @@ public class TestRMRestart {
rmState.getRMDTSecretManagerState().getMasterKeyState();
MockRM rm1 = new TestSecurityMockRM(conf, memStore);
rm1.start();
// create an empty credential
Credentials ts = new Credentials();
// request a token and add into credential
GetDelegationTokenRequest request1 = mock(GetDelegationTokenRequest.class);
when(request1.getRenewer()).thenReturn("renewer1");
GetDelegationTokenRequest request1 =
GetDelegationTokenRequest.newInstance("renewer1");
GetDelegationTokenResponse response1 =
rm1.getClientRMService().getDelegationToken(request1);
org.apache.hadoop.yarn.api.records.Token delegationToken1 =
response1.getRMDelegationToken();
Token<RMDelegationTokenIdentifier> token1 =
ConverterUtils.convertFromYarn(delegationToken1, null);
ConverterUtils.convertFromYarn(delegationToken1, rmAddr);
RMDelegationTokenIdentifier dtId1 = token1.decodeIdentifier();
HashSet<RMDelegationTokenIdentifier> tokenIdentSet =
@ -632,14 +641,14 @@ public class TestRMRestart {
rmState.getRMDTSecretManagerState().getDTSequenceNumber());
// request one more token
GetDelegationTokenRequest request2 = mock(GetDelegationTokenRequest.class);
when(request2.getRenewer()).thenReturn("renewer2");
GetDelegationTokenRequest request2 =
GetDelegationTokenRequest.newInstance("renewer2");
GetDelegationTokenResponse response2 =
rm1.getClientRMService().getDelegationToken(request2);
org.apache.hadoop.yarn.api.records.Token delegationToken2 =
response2.getRMDelegationToken();
Token<RMDelegationTokenIdentifier> token2 =
ConverterUtils.convertFromYarn(delegationToken2, null);
ConverterUtils.convertFromYarn(delegationToken2, rmAddr);
RMDelegationTokenIdentifier dtId2 = token2.decodeIdentifier();
// cancel token2
@ -721,20 +730,10 @@ public class TestRMRestart {
}
@Override
protected DelegationTokenRenewer createDelegationTokenRenewer() {
return new DelegationTokenRenewer() {
@Override
protected void renewToken(final DelegationTokenToRenew dttr)
throws IOException {
// Do nothing
}
@Override
protected void setTimerForTokenRenewal(DelegationTokenToRenew token)
throws IOException {
// Do nothing
}
};
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
RMDelegationTokenIdentifier.Renewer.setSecretManager(
this.getRMDTSecretManager(), rmAddr);
}
}
}

View File

@ -26,7 +26,6 @@ import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.util.HashMap;
@ -126,7 +125,7 @@ public class TestChildQueueOrder {
throw new Exception();
} catch (Exception e) {
LOG.info("FOOBAR q.assignContainers q=" + queue.getQueueName() +
" alloc=" + allocation + " node=" + node.getHostName());
" alloc=" + allocation + " node=" + node.getNodeName());
}
final Resource allocatedResource = Resources.createResource(allocation);
if (queue instanceof ParentQueue) {

View File

@ -138,7 +138,7 @@ public class TestParentQueue {
throw new Exception();
} catch (Exception e) {
LOG.info("FOOBAR q.assignContainers q=" + queue.getQueueName() +
" alloc=" + allocation + " node=" + node.getHostName());
" alloc=" + allocation + " node=" + node.getNodeName());
}
final Resource allocatedResource = Resources.createResource(allocation);
if (queue instanceof ParentQueue) {

View File

@ -160,7 +160,7 @@ public class TestUtils {
when(rmNode.getHostName()).thenReturn(host);
when(rmNode.getRackName()).thenReturn(rack);
FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode));
FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode, false));
LOG.info("node = " + host + " avail=" + node.getAvailableResource());
return node;
}

View File

@ -2146,4 +2146,54 @@ public class TestFairScheduler {
Assert.assertEquals(2, app3.getLiveContainers().size());
Assert.assertEquals(2, app4.getLiveContainers().size());
}
@Test(timeout = 30000)
public void testHostPortNodeName() throws Exception {
scheduler.getConf().setBoolean(YarnConfiguration
.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true);
scheduler.reinitialize(scheduler.getConf(),
resourceManager.getRMContext());
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024),
1, "127.0.0.1", 1);
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024),
2, "127.0.0.1", 2);
NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1",
"user1", 0);
ResourceRequest nodeRequest = createResourceRequest(1024,
node1.getNodeID().getHost() + ":" + node1.getNodeID().getPort(), 1,
1, true);
ResourceRequest rackRequest = createResourceRequest(1024,
node1.getRackName(), 1, 1, false);
ResourceRequest anyRequest = createResourceRequest(1024,
ResourceRequest.ANY, 1, 1, false);
createSchedulingRequestExistingApplication(nodeRequest, attId1);
createSchedulingRequestExistingApplication(rackRequest, attId1);
createSchedulingRequestExistingApplication(anyRequest, attId1);
scheduler.update();
NodeUpdateSchedulerEvent node1UpdateEvent = new
NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent node2UpdateEvent = new
NodeUpdateSchedulerEvent(node2);
// no matter how many heartbeats, node2 should never get a container
FSSchedulerApp app = scheduler.applications.get(attId1);
for (int i = 0; i < 10; i++) {
scheduler.handle(node2UpdateEvent);
assertEquals(0, app.getLiveContainers().size());
assertEquals(0, app.getReservedContainers().size());
}
// then node1 should get the container
scheduler.handle(node1UpdateEvent);
assertEquals(1, app.getLiveContainers().size());
}
}

View File

@ -53,6 +53,21 @@ import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
/**
* Embedded Yarn minicluster for testcases that need to interact with a cluster.
* <p/>
* In a real cluster, resource request matching is done using the hostname, and
* by default Yarn minicluster works in the exact same way as a real cluster.
* <p/>
* If a testcase needs to use multiple nodes and exercise resource request
* matching to a specific node, then the property
* {@YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} should be set
* <code>true</code> in the configuration used to initialize the minicluster.
* <p/>
* With this property set to <code>true</code>, the matching will be done using
* the <code>hostname:port</code> of the namenodes. In such case, the AM must
* do resource request using <code>hostname:port</code> as the location.
*/
public class MiniYARNCluster extends CompositeService {
private static final Log LOG = LogFactory.getLog(MiniYARNCluster.class);

View File

@ -47,7 +47,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler
The scheduler organizes apps further into "queues", and shares resources
fairly between these queues. By default, all users share a single queue,
called “default”. If an app specifically lists a queue in a container resource
named “default”. If an app specifically lists a queue in a container resource
request, the request is submitted to that queue. It is also possible to assign
queues based on the user name included with the request through
configuration. Within each queue, a scheduling policy is used to share
@ -85,7 +85,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler
their parents in the fair scheduler configuration file.
A queue's name starts with the names of its parents, with periods as
separators. So a queue named "queue1" under the root named, would be referred
separators. So a queue named "queue1" under the root queue, would be referred
to as "root.queue1", and a queue named "queue2" under a queue named "parent1"
would be referred to as "root.parent1.queue2". When referring to queues, the
root part of the name is optional, so queue1 could be referred to as just
@ -118,22 +118,23 @@ Hadoop MapReduce Next Generation - Fair Scheduler
Customizing the Fair Scheduler typically involves altering two files. First,
scheduler-wide options can be set by adding configuration properties in the
fair-scheduler.xml file in your existing configuration directory. Second, in
yarn-site.xml file in your existing configuration directory. Second, in
most cases users will want to create a manifest file listing which queues
exist and their respective weights and capacities. The location of this file
is flexible - but it must be declared in fair-scheduler.xml.
is flexible - but it must be declared in yarn-site.xml.
* <<<yarn.scheduler.fair.allocation.file>>>
* Path to allocation file. An allocation file is an XML manifest describing
queues and their properties, in addition to certain policy defaults. This file
must be in XML format as described in the next section.
Defaults to fair-scheduler.xml in configuration directory.
* <<<yarn.scheduler.fair.user-as-default-queue>>>
* Whether to use the username associated with the allocation as the default
queue name, in the event that a queue name is not specified. If this is set
to "false" or unset, all jobs have a shared default queue, called "default".
to "false" or unset, all jobs have a shared default queue, named "default".
Defaults to true.
* <<<yarn.scheduler.fair.preemption>>>
@ -158,7 +159,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler
* If assignmultiple is true, the maximum amount of containers that can be
assigned in one heartbeat. Defaults to -1, which sets no limit.
* <<<locality.threshold.node>>>
* <<<yarn.scheduler.fair.locality.threshold.node>>>
* For applications that request containers on particular nodes, the number of
scheduling opportunities since the last container assignment to wait before
@ -167,7 +168,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler
opportunities to pass up. The default value of -1.0 means don't pass up any
scheduling opportunities.
* <<<locality.threshold.rack>>>
* <<<yarn.scheduler.fair.locality.threshold.rack>>>
* For applications that request containers on particular racks, the number of
scheduling opportunities since the last container assignment to wait before
@ -178,14 +179,15 @@ Hadoop MapReduce Next Generation - Fair Scheduler
Allocation file format
The allocation file must be in XML format. The format contains four types of
The allocation file must be in XML format. The format contains five types of
elements:
* <<Queue elements>>, which represent queues. Each may contain the following
properties:
* minResources: minimum resources the queue is entitled to, in the form
"X mb, Y vcores". If a queue's minimum share is not satisfied, it will be
"X mb, Y vcores". For the single-resource fairness policy, the vcores
value is ignored. If a queue's minimum share is not satisfied, it will be
offered available resources before any other queue under the same parent.
Under the single-resource fairness policy, a queue
is considered unsatisfied if its memory usage is below its minimum memory
@ -199,7 +201,8 @@ Allocation file format
may be using those resources.
* maxResources: maximum resources a queue is allowed, in the form
"X mb, Y vcores". A queue will never be assigned a container that would
"X mb, Y vcores". For the single-resource fairness policy, the vcores
value is ignored. A queue will never be assigned a container that would
put its aggregate usage over this limit.
* maxRunningApps: limit the number of apps from the queue to run at once
@ -234,19 +237,23 @@ Allocation file format
its fair share before it will try to preempt containers to take resources from
other queues.
* <<A defaultQueueSchedulingPolicy element>>, which sets the default scheduling
policy for queues; overriden by the schedulingPolicy element in each queue
if specified. Defaults to "fair".
An example allocation file is given here:
---
<?xml version="1.0"?>
<allocations>
<queue name="sample_queue">
<minResources>10000 mb</minResources>
<maxResources>90000 mb</maxResources>
<minResources>10000 mb,0vcores</minResources>
<maxResources>90000 mb,0vcores</maxResources>
<maxRunningApps>50</maxRunningApps>
<weight>2.0</weight>
<schedulingPolicy>fair</schedulingPolicy>
<queue name="sample_sub_queue">
<minResources>5000 mb</minResources>
<minResources>5000 mb,0vcores</minResources>
</queue>
</queue>
<user name="sample_user">