Merging r1523878 through r1524586 from trunk to branch HDFS-2832
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1524590 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
a80b826eef
|
@ -363,12 +363,31 @@ Release 2.3.0 - UNRELEASED
|
|||
|
||||
HADOOP-9908. Fix NPE when versioninfo properties file is missing (todd)
|
||||
|
||||
Release 2.1.1-beta - UNRELEASED
|
||||
HADOOP-9350. Hadoop not building against Java7 on OSX
|
||||
(Robert Kanter via stevel)
|
||||
|
||||
Release 2.2.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 2.1.1-beta - 2013-09-23
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
HADOOP-9944. Fix RpcRequestHeaderProto.callId to be sint32 rather than
|
||||
uint32 since ipc.Client.CONNECTION_CONTEXT_CALL_ID is signed (i.e. -3)
|
||||
(acmurthy)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-9910. proxy server start and stop documentation wrong
|
||||
|
@ -411,6 +430,9 @@ Release 2.1.1-beta - UNRELEASED
|
|||
HADOOP-9962. in order to avoid dependency divergence within Hadoop itself
|
||||
lets enable DependencyConvergence. (rvs via tucu)
|
||||
|
||||
HADOOP-9669. Reduce the number of byte array creations and copies in
|
||||
XDR data manipulation. (Haohui Mai via brandonli)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -468,9 +490,6 @@ Release 2.1.1-beta - UNRELEASED
|
|||
HADOOP-9557. hadoop-client excludes commons-httpclient. (Lohit Vijayarenu via
|
||||
cnauroth)
|
||||
|
||||
HADOOP-9350. Hadoop not building against Java7 on OSX
|
||||
(Robert Kanter via stevel)
|
||||
|
||||
HADOOP-9961. versions of a few transitive dependencies diverged between hadoop
|
||||
subprojects. (rvs via tucu)
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -60,8 +60,8 @@ message RequestHeaderProto {
|
|||
* ProtocolInfoProto) since they reuse the connection; in this case
|
||||
* the declaringClassProtocolName field is set to the ProtocolInfoProto
|
||||
*/
|
||||
required string declaringClassProtocolName = 3;
|
||||
required string declaringClassProtocolName = 2;
|
||||
|
||||
/** protocol version of class declaring the called method */
|
||||
required uint64 clientProtocolVersion = 4;
|
||||
required uint64 clientProtocolVersion = 3;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ message RpcRequestHeaderProto { // the header for the RpcRequest
|
|||
|
||||
optional RpcKindProto rpcKind = 1;
|
||||
optional OperationProto rpcOp = 2;
|
||||
required uint32 callId = 3; // a sequence number that is sent back in response
|
||||
required sint32 callId = 3; // a sequence number that is sent back in response
|
||||
required bytes clientId = 4; // Globally unique client ID
|
||||
// clientId + callId uniquely identifies a request
|
||||
// retry count, 1 means this is the first retry
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.util.List;
|
|||
import org.apache.hadoop.nfs.NfsExports;
|
||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
|
||||
|
||||
/**
|
||||
|
@ -37,7 +38,7 @@ public class MountResponse {
|
|||
/** Response for RPC call {@link MountInterface.MNTPROC#MNT} */
|
||||
public static XDR writeMNTResponse(int status, XDR xdr, int xid,
|
||||
byte[] handle) {
|
||||
RpcAcceptedReply.voidReply(xdr, xid);
|
||||
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
|
||||
xdr.writeInt(status);
|
||||
if (status == MNT_OK) {
|
||||
xdr.writeVariableOpaque(handle);
|
||||
|
@ -50,7 +51,7 @@ public class MountResponse {
|
|||
|
||||
/** Response for RPC call {@link MountInterface.MNTPROC#DUMP} */
|
||||
public static XDR writeMountList(XDR xdr, int xid, List<MountEntry> mounts) {
|
||||
RpcAcceptedReply.voidReply(xdr, xid);
|
||||
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
|
||||
for (MountEntry mountEntry : mounts) {
|
||||
xdr.writeBoolean(true); // Value follows yes
|
||||
xdr.writeString(mountEntry.host());
|
||||
|
@ -65,7 +66,7 @@ public class MountResponse {
|
|||
List<NfsExports> hostMatcher) {
|
||||
assert (exports.size() == hostMatcher.size());
|
||||
|
||||
RpcAcceptedReply.voidReply(xdr, xid);
|
||||
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
|
||||
for (int i = 0; i < exports.size(); i++) {
|
||||
xdr.writeBoolean(true); // Value follows - yes
|
||||
xdr.writeString(exports.get(i));
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* ACCESS3 Response
|
||||
|
@ -43,8 +44,8 @@ public class ACCESS3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
out.writeBoolean(true);
|
||||
postOpAttr.serialize(out);
|
||||
if (this.getStatus() == Nfs3Status.NFS3_OK) {
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* COMMIT3 Response
|
||||
|
@ -47,8 +48,8 @@ public class COMMIT3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
fileWcc.serialize(out);
|
||||
if (getStatus() == Nfs3Status.NFS3_OK) {
|
||||
out.writeLongAsHyper(verf);
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* CREATE3 Response
|
||||
|
@ -55,8 +56,8 @@ public class CREATE3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
if (getStatus() == Nfs3Status.NFS3_OK) {
|
||||
out.writeBoolean(true); // Handle follows
|
||||
objHandle.serialize(out);
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.NfsTime;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* FSINFO3 Response
|
||||
|
@ -109,8 +110,8 @@ public class FSINFO3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
out.writeBoolean(true);
|
||||
postOpAttr.serialize(out);
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* FSSTAT3 Response
|
||||
|
@ -90,8 +91,8 @@ public class FSSTAT3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
out.writeBoolean(true);
|
||||
if (postOpAttr == null) {
|
||||
postOpAttr = new Nfs3FileAttributes();
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* GETATTR3 Response
|
||||
|
@ -40,8 +41,8 @@ public class GETATTR3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
if (getStatus() == Nfs3Status.NFS3_OK) {
|
||||
postOpAttr.serialize(out);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* LOOKUP3 Response
|
||||
|
@ -61,8 +62,8 @@ public class LOOKUP3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
if (this.status == Nfs3Status.NFS3_OK) {
|
||||
fileHandle.serialize(out);
|
||||
out.writeBoolean(true); // Attribute follows
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* MKDIR3 Response
|
||||
|
@ -55,8 +56,8 @@ public class MKDIR3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
if (getStatus() == Nfs3Status.NFS3_OK) {
|
||||
out.writeBoolean(true); // Handle follows
|
||||
objFileHandle.serialize(out);
|
||||
|
|
|
@ -19,11 +19,13 @@ package org.apache.hadoop.nfs.nfs3.response;
|
|||
|
||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* Abstract class for a NFSv3 response
|
||||
* Base class for a NFSv3 response. This class and its subclasses contain
|
||||
* the response from NFSv3 handlers.
|
||||
*/
|
||||
abstract public class NFS3Response {
|
||||
public class NFS3Response {
|
||||
protected int status;
|
||||
|
||||
public NFS3Response(int status) {
|
||||
|
@ -38,8 +40,13 @@ abstract public class NFS3Response {
|
|||
this.status = status;
|
||||
}
|
||||
|
||||
public XDR send(XDR out, int xid) {
|
||||
RpcAcceptedReply.voidReply(out, xid);
|
||||
/**
|
||||
* Write the response, along with the rpc header (including verifier), to the
|
||||
* XDR.
|
||||
*/
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
RpcAcceptedReply reply = RpcAcceptedReply.getAcceptInstance(xid, verifier);
|
||||
reply.write(out);
|
||||
out.writeInt(this.getStatus());
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* PATHCONF3 Response
|
||||
|
@ -77,8 +78,8 @@ public class PATHCONF3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
out.writeBoolean(true);
|
||||
postOpAttr.serialize(out);
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* READ3 Response
|
||||
|
@ -62,8 +63,8 @@ public class READ3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
out.writeBoolean(true); // Attribute follows
|
||||
postOpAttr.serialize(out);
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.List;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* READDIR3 Response
|
||||
|
@ -96,8 +97,8 @@ public class READDIR3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR xdr, int xid) {
|
||||
super.send(xdr, xid);
|
||||
public XDR writeHeaderAndResponse(XDR xdr, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(xdr, xid, verifier);
|
||||
xdr.writeBoolean(true); // Attributes follow
|
||||
postOpDirAttr.serialize(xdr);
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* READDIRPLUS3 Response
|
||||
|
@ -92,8 +93,8 @@ public class READDIRPLUS3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
out.writeBoolean(true); // attributes follow
|
||||
if (postOpDirAttr == null) {
|
||||
postOpDirAttr = new Nfs3FileAttributes();
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* READLINK3 Response
|
||||
|
@ -41,8 +42,8 @@ public class READLINK3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
out.writeBoolean(true); // Attribute follows
|
||||
postOpSymlinkAttr.serialize(out);
|
||||
if (getStatus() == Nfs3Status.NFS3_OK) {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.nfs.nfs3.response;
|
||||
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* REMOVE3 Response
|
||||
|
@ -35,8 +36,8 @@ public class REMOVE3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
if (dirWcc == null) {
|
||||
dirWcc = new WccData(null, null);
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.nfs.nfs3.response;
|
||||
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* RENAME3 Response
|
||||
|
@ -45,8 +46,8 @@ public class RENAME3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
fromDirWcc.serialize(out);
|
||||
toDirWcc.serialize(out);
|
||||
return out;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.nfs.nfs3.response;
|
||||
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* RMDIR3 Response
|
||||
|
@ -39,8 +40,8 @@ public class RMDIR3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
dirWcc.serialize(out);
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.nfs.nfs3.response;
|
||||
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* SETATTR3 Response
|
||||
|
@ -39,8 +40,8 @@ public class SETATTR3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
wccData.serialize(out);
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* SYMLINK3 Response
|
||||
|
@ -55,8 +56,8 @@ public class SYMLINK3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
if (this.getStatus() == Nfs3Status.NFS3_OK) {
|
||||
out.writeBoolean(true);
|
||||
objFileHandle.serialize(out);
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.nfs.nfs3.response;
|
||||
|
||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
|
||||
/**
|
||||
* A void NFSv3 response
|
||||
*/
|
||||
public class VoidResponse extends NFS3Response {
|
||||
|
||||
public VoidResponse(int status) {
|
||||
super(status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
RpcAcceptedReply.voidReply(out, xid);
|
||||
return out;
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* WRITE3 Response
|
||||
|
@ -58,8 +59,8 @@ public class WRITE3Response extends NFS3Response {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XDR send(XDR out, int xid) {
|
||||
super.send(out, xid);
|
||||
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||
super.writeHeaderAndResponse(out, xid, verifier);
|
||||
fileWcc.serialize(out);
|
||||
if (getStatus() == Nfs3Status.NFS3_OK) {
|
||||
out.writeInt(count);
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
package org.apache.hadoop.oncrpc;
|
||||
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo;
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
|
||||
|
||||
/**
|
||||
* Represents RPC message MSG_ACCEPTED reply body. See RFC 1831 for details.
|
||||
|
@ -43,43 +41,42 @@ public class RpcAcceptedReply extends RpcReply {
|
|||
return ordinal();
|
||||
}
|
||||
};
|
||||
|
||||
public static RpcAcceptedReply getAcceptInstance(int xid,
|
||||
Verifier verifier) {
|
||||
return getInstance(xid, AcceptState.SUCCESS, verifier);
|
||||
}
|
||||
|
||||
public static RpcAcceptedReply getInstance(int xid, AcceptState state,
|
||||
Verifier verifier) {
|
||||
return new RpcAcceptedReply(xid, ReplyState.MSG_ACCEPTED, verifier,
|
||||
state);
|
||||
}
|
||||
|
||||
private final RpcAuthInfo verifier;
|
||||
private final AcceptState acceptState;
|
||||
|
||||
RpcAcceptedReply(int xid, RpcMessage.Type messageType, ReplyState state,
|
||||
RpcAuthInfo verifier, AcceptState acceptState) {
|
||||
super(xid, messageType, state);
|
||||
this.verifier = verifier;
|
||||
RpcAcceptedReply(int xid, ReplyState state, Verifier verifier,
|
||||
AcceptState acceptState) {
|
||||
super(xid, state, verifier);
|
||||
this.acceptState = acceptState;
|
||||
}
|
||||
|
||||
public static RpcAcceptedReply read(int xid, RpcMessage.Type messageType,
|
||||
ReplyState replyState, XDR xdr) {
|
||||
public static RpcAcceptedReply read(int xid, ReplyState replyState, XDR xdr) {
|
||||
Verifier verifier = Verifier.readFlavorAndVerifier(xdr);
|
||||
AcceptState acceptState = AcceptState.fromValue(xdr.readInt());
|
||||
return new RpcAcceptedReply(xid, messageType, replyState, verifier,
|
||||
acceptState);
|
||||
}
|
||||
|
||||
public RpcAuthInfo getVerifier() {
|
||||
return verifier;
|
||||
return new RpcAcceptedReply(xid, replyState, verifier, acceptState);
|
||||
}
|
||||
|
||||
public AcceptState getAcceptState() {
|
||||
return acceptState;
|
||||
}
|
||||
|
||||
public static XDR voidReply(XDR xdr, int xid) {
|
||||
return voidReply(xdr, xid, AcceptState.SUCCESS);
|
||||
}
|
||||
|
||||
public static XDR voidReply(XDR xdr, int xid, AcceptState acceptState) {
|
||||
@Override
|
||||
public XDR write(XDR xdr) {
|
||||
xdr.writeInt(xid);
|
||||
xdr.writeInt(RpcMessage.Type.RPC_REPLY.getValue());
|
||||
xdr.writeInt(ReplyState.MSG_ACCEPTED.getValue());
|
||||
xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
|
||||
xdr.writeVariableOpaque(new byte[0]);
|
||||
xdr.writeInt(messageType.getValue());
|
||||
xdr.writeInt(replyState.getValue());
|
||||
Verifier.writeFlavorAndVerifier(verifier, xdr);
|
||||
xdr.writeInt(acceptState.getValue());
|
||||
return xdr;
|
||||
}
|
||||
|
|
|
@ -28,11 +28,25 @@ import org.apache.hadoop.oncrpc.security.Verifier;
|
|||
public class RpcCall extends RpcMessage {
|
||||
public static final int RPC_VERSION = 2;
|
||||
private static final Log LOG = LogFactory.getLog(RpcCall.class);
|
||||
|
||||
public static RpcCall read(XDR xdr) {
|
||||
return new RpcCall(xdr.readInt(), RpcMessage.Type.fromValue(xdr.readInt()),
|
||||
xdr.readInt(), xdr.readInt(), xdr.readInt(), xdr.readInt(),
|
||||
Credentials.readFlavorAndCredentials(xdr),
|
||||
Verifier.readFlavorAndVerifier(xdr));
|
||||
}
|
||||
|
||||
public static RpcCall getInstance(int xid, int program, int version,
|
||||
int procedure, Credentials cred, Verifier verifier) {
|
||||
return new RpcCall(xid, RpcMessage.Type.RPC_CALL, 2, program, version,
|
||||
procedure, cred, verifier);
|
||||
}
|
||||
|
||||
private final int rpcVersion;
|
||||
private final int program;
|
||||
private final int version;
|
||||
private final int procedure;
|
||||
private final Credentials credential;
|
||||
private final Credentials credentials;
|
||||
private final Verifier verifier;
|
||||
|
||||
protected RpcCall(int xid, RpcMessage.Type messageType, int rpcVersion,
|
||||
|
@ -43,7 +57,7 @@ public class RpcCall extends RpcMessage {
|
|||
this.program = program;
|
||||
this.version = version;
|
||||
this.procedure = procedure;
|
||||
this.credential = credential;
|
||||
this.credentials = credential;
|
||||
this.verifier = verifier;
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(this);
|
||||
|
@ -83,28 +97,24 @@ public class RpcCall extends RpcMessage {
|
|||
}
|
||||
|
||||
public Credentials getCredential() {
|
||||
return credential;
|
||||
return credentials;
|
||||
}
|
||||
|
||||
public Verifier getVerifier() {
|
||||
return verifier;
|
||||
}
|
||||
|
||||
public static RpcCall read(XDR xdr) {
|
||||
return new RpcCall(xdr.readInt(), RpcMessage.Type.fromValue(xdr.readInt()),
|
||||
xdr.readInt(), xdr.readInt(), xdr.readInt(), xdr.readInt(),
|
||||
Credentials.readFlavorAndCredentials(xdr),
|
||||
Verifier.readFlavorAndVerifier(xdr));
|
||||
}
|
||||
|
||||
public static void write(XDR out, int xid, int program, int progVersion,
|
||||
int procedure) {
|
||||
out.writeInt(xid);
|
||||
out.writeInt(RpcMessage.Type.RPC_CALL.getValue());
|
||||
out.writeInt(2);
|
||||
out.writeInt(program);
|
||||
out.writeInt(progVersion);
|
||||
out.writeInt(procedure);
|
||||
@Override
|
||||
public XDR write(XDR xdr) {
|
||||
xdr.writeInt(xid);
|
||||
xdr.writeInt(RpcMessage.Type.RPC_CALL.getValue());
|
||||
xdr.writeInt(2);
|
||||
xdr.writeInt(program);
|
||||
xdr.writeInt(version);
|
||||
xdr.writeInt(procedure);
|
||||
Credentials.writeFlavorAndCredentials(credentials, xdr);
|
||||
Verifier.writeFlavorAndVerifier(verifier, xdr);
|
||||
return xdr;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -112,6 +122,6 @@ public class RpcCall extends RpcMessage {
|
|||
return String.format("Xid:%d, messageType:%s, rpcVersion:%d, program:%d,"
|
||||
+ " version:%d, procedure:%d, credential:%s, verifier:%s", xid,
|
||||
messageType, rpcVersion, program, version, procedure,
|
||||
credential.toString(), verifier.toString());
|
||||
credentials.toString(), verifier.toString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.oncrpc;
|
||||
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
/**
|
||||
* Represents RPC message MSG_DENIED reply body. See RFC 1831 for details.
|
||||
|
@ -40,16 +40,16 @@ public class RpcDeniedReply extends RpcReply {
|
|||
|
||||
private final RejectState rejectState;
|
||||
|
||||
RpcDeniedReply(int xid, RpcMessage.Type messageType, ReplyState replyState,
|
||||
RejectState rejectState) {
|
||||
super(xid, messageType, replyState);
|
||||
public RpcDeniedReply(int xid, ReplyState replyState,
|
||||
RejectState rejectState, Verifier verifier) {
|
||||
super(xid, replyState, verifier);
|
||||
this.rejectState = rejectState;
|
||||
}
|
||||
|
||||
public static RpcDeniedReply read(int xid, RpcMessage.Type messageType,
|
||||
ReplyState replyState, XDR xdr) {
|
||||
public static RpcDeniedReply read(int xid, ReplyState replyState, XDR xdr) {
|
||||
Verifier verifier = Verifier.readFlavorAndVerifier(xdr);
|
||||
RejectState rejectState = RejectState.fromValue(xdr.readInt());
|
||||
return new RpcDeniedReply(xid, messageType, replyState, rejectState);
|
||||
return new RpcDeniedReply(xid, replyState, rejectState, verifier);
|
||||
}
|
||||
|
||||
public RejectState getRejectState() {
|
||||
|
@ -59,17 +59,17 @@ public class RpcDeniedReply extends RpcReply {
|
|||
@Override
|
||||
public String toString() {
|
||||
return new StringBuffer().append("xid:").append(xid)
|
||||
.append(",messageType:").append(messageType).append("rejectState:")
|
||||
.append(",messageType:").append(messageType).append("verifier_flavor:")
|
||||
.append(verifier.getFlavor()).append("rejectState:")
|
||||
.append(rejectState).toString();
|
||||
}
|
||||
|
||||
public static XDR voidReply(XDR xdr, int xid, ReplyState msgAccepted,
|
||||
RejectState rejectState) {
|
||||
@Override
|
||||
public XDR write(XDR xdr) {
|
||||
xdr.writeInt(xid);
|
||||
xdr.writeInt(RpcMessage.Type.RPC_REPLY.getValue());
|
||||
xdr.writeInt(msgAccepted.getValue());
|
||||
xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
|
||||
xdr.writeVariableOpaque(new byte[0]);
|
||||
xdr.writeInt(messageType.getValue());
|
||||
xdr.writeInt(replyState.getValue());
|
||||
Verifier.writeFlavorAndVerifier(verifier, xdr);
|
||||
xdr.writeInt(rejectState.getValue());
|
||||
return xdr;
|
||||
}
|
||||
|
|
|
@ -50,6 +50,8 @@ public abstract class RpcMessage {
|
|||
this.messageType = messageType;
|
||||
}
|
||||
|
||||
public abstract XDR write(XDR xdr);
|
||||
|
||||
public int getXid() {
|
||||
return xid;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
|
||||
import org.apache.hadoop.oncrpc.RpcCallCache.CacheEntry;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.portmap.PortmapMapping;
|
||||
import org.apache.hadoop.portmap.PortmapRequest;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
@ -163,13 +164,17 @@ public abstract class RpcProgram {
|
|||
|
||||
private XDR programMismatch(XDR out, RpcCall call) {
|
||||
LOG.warn("Invalid RPC call program " + call.getProgram());
|
||||
RpcAcceptedReply.voidReply(out, call.getXid(), AcceptState.PROG_UNAVAIL);
|
||||
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
|
||||
AcceptState.PROG_UNAVAIL, new VerifierNone());
|
||||
reply.write(out);
|
||||
return out;
|
||||
}
|
||||
|
||||
private XDR programVersionMismatch(XDR out, RpcCall call) {
|
||||
LOG.warn("Invalid RPC call version " + call.getVersion());
|
||||
RpcAcceptedReply.voidReply(out, call.getXid(), AcceptState.PROG_MISMATCH);
|
||||
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
|
||||
AcceptState.PROG_MISMATCH, new VerifierNone());
|
||||
reply.write(out);
|
||||
out.writeInt(lowProgVersion);
|
||||
out.writeInt(highProgVersion);
|
||||
return out;
|
||||
|
|
|
@ -17,6 +17,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.oncrpc;
|
||||
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Represents an RPC message of type RPC reply as defined in RFC 1831
|
||||
*/
|
||||
|
@ -36,28 +41,35 @@ public abstract class RpcReply extends RpcMessage {
|
|||
}
|
||||
}
|
||||
|
||||
private final ReplyState state;
|
||||
protected final ReplyState replyState;
|
||||
protected final Verifier verifier;
|
||||
|
||||
RpcReply(int xid, RpcMessage.Type messageType, ReplyState state) {
|
||||
super(xid, messageType);
|
||||
this.state = state;
|
||||
validateMessageType(RpcMessage.Type.RPC_REPLY);
|
||||
RpcReply(int xid, ReplyState state, Verifier verifier) {
|
||||
super(xid, RpcMessage.Type.RPC_REPLY);
|
||||
this.replyState = state;
|
||||
this.verifier = verifier;
|
||||
}
|
||||
|
||||
public RpcAuthInfo getVerifier() {
|
||||
return verifier;
|
||||
}
|
||||
|
||||
public static RpcReply read(XDR xdr) {
|
||||
int xid = xdr.readInt();
|
||||
final Type messageType = Type.fromValue(xdr.readInt());
|
||||
Preconditions.checkState(messageType == RpcMessage.Type.RPC_REPLY);
|
||||
|
||||
ReplyState stat = ReplyState.fromValue(xdr.readInt());
|
||||
switch (stat) {
|
||||
case MSG_ACCEPTED:
|
||||
return RpcAcceptedReply.read(xid, messageType, stat, xdr);
|
||||
return RpcAcceptedReply.read(xid, stat, xdr);
|
||||
case MSG_DENIED:
|
||||
return RpcDeniedReply.read(xid, messageType, stat, xdr);
|
||||
return RpcDeniedReply.read(xid, stat, xdr);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public ReplyState getState() {
|
||||
return state;
|
||||
return replyState;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,8 +57,7 @@ public class SimpleUdpClient {
|
|||
clientSocket.receive(receivePacket);
|
||||
|
||||
// Check reply status
|
||||
XDR xdr = new XDR();
|
||||
xdr.writeFixedOpaque(Arrays.copyOfRange(receiveData, 0,
|
||||
XDR xdr = new XDR(Arrays.copyOfRange(receiveData, 0,
|
||||
receivePacket.getLength()));
|
||||
RpcReply reply = RpcReply.read(xdr);
|
||||
if (reply.getState() != RpcReply.ReplyState.MSG_ACCEPTED) {
|
||||
|
|
|
@ -43,13 +43,14 @@ public class SimpleUdpServerHandler extends SimpleChannelHandler {
|
|||
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
|
||||
ChannelBuffer buf = (ChannelBuffer) e.getMessage();
|
||||
|
||||
XDR request = new XDR();
|
||||
|
||||
request.writeFixedOpaque(buf.array());
|
||||
XDR request = new XDR(buf.array());
|
||||
|
||||
InetAddress remoteInetAddr = ((InetSocketAddress) e.getRemoteAddress())
|
||||
.getAddress();
|
||||
XDR response = rpcProgram.handle(request, remoteInetAddr, null);
|
||||
e.getChannel().write(XDR.writeMessageUdp(response), e.getRemoteAddress());
|
||||
|
||||
e.getChannel().write(XDR.writeMessageUdp(response.asReadOnlyWrap()),
|
||||
e.getRemoteAddress());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -17,402 +17,253 @@
|
|||
*/
|
||||
package org.apache.hadoop.oncrpc;
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.util.Arrays;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffers;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Utility class for building XDR messages based on RFC 4506.
|
||||
* <p>
|
||||
* This class maintains a buffer into which java types are written as
|
||||
* XDR types for building XDR messages. Similarly this class can
|
||||
* be used to get java types from an XDR request or response.
|
||||
* <p>
|
||||
* Currently only a subset of XDR types defined in RFC 4506 are supported.
|
||||
*
|
||||
* Key points of the format:
|
||||
*
|
||||
* <ul>
|
||||
* <li>Primitives are stored in big-endian order (i.e., the default byte order
|
||||
* of ByteBuffer).</li>
|
||||
* <li>Booleans are stored as an integer.</li>
|
||||
* <li>Each field in the message is always aligned by 4.</li>
|
||||
* </ul>
|
||||
*
|
||||
*/
|
||||
public class XDR {
|
||||
private final static String HEXES = "0123456789abcdef";
|
||||
|
||||
/** Internal buffer for reading or writing to */
|
||||
private byte[] bytearr;
|
||||
|
||||
/** Place to read from or write to */
|
||||
private int cursor;
|
||||
public final class XDR {
|
||||
private static final int DEFAULT_INITIAL_CAPACITY = 256;
|
||||
private static final int SIZEOF_INT = 4;
|
||||
private static final int SIZEOF_LONG = 8;
|
||||
private static final byte[] PADDING_BYTES = new byte[] { 0, 0, 0, 0 };
|
||||
|
||||
public XDR() {
|
||||
this(new byte[0]);
|
||||
private ByteBuffer buf;
|
||||
|
||||
private enum State {
|
||||
READING, WRITING,
|
||||
}
|
||||
|
||||
public XDR(byte[] data) {
|
||||
bytearr = Arrays.copyOf(data, data.length);
|
||||
cursor = 0;
|
||||
private final State state;
|
||||
|
||||
/**
|
||||
* Construct a new XDR message buffer.
|
||||
*
|
||||
* @param initialCapacity
|
||||
* the initial capacity of the buffer.
|
||||
*/
|
||||
public XDR(int initialCapacity) {
|
||||
this(ByteBuffer.allocate(initialCapacity), State.WRITING);
|
||||
}
|
||||
|
||||
public XDR() {
|
||||
this(DEFAULT_INITIAL_CAPACITY);
|
||||
}
|
||||
|
||||
private XDR(ByteBuffer buf, State state) {
|
||||
this.buf = buf;
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param bytes bytes to be appended to internal buffer
|
||||
* Wraps a byte array as a read-only XDR message. There's no copy involved,
|
||||
* thus it is the client's responsibility to ensure that the byte array
|
||||
* remains unmodified when using the XDR object.
|
||||
*
|
||||
* @param src
|
||||
* the byte array to be wrapped.
|
||||
*/
|
||||
private void append(byte[] bytesToAdd) {
|
||||
bytearr = append(bytearr, bytesToAdd);
|
||||
public XDR(byte[] src) {
|
||||
this(ByteBuffer.wrap(src).asReadOnlyBuffer(), State.READING);
|
||||
}
|
||||
|
||||
public XDR asReadOnlyWrap() {
|
||||
ByteBuffer b = buf.asReadOnlyBuffer();
|
||||
if (state == State.WRITING) {
|
||||
b.flip();
|
||||
}
|
||||
|
||||
XDR n = new XDR(b, State.READING);
|
||||
return n;
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return bytearr.length;
|
||||
// TODO: This overloading intends to be compatible with the semantics of
|
||||
// the previous version of the class. This function should be separated into
|
||||
// two with clear semantics.
|
||||
return state == State.READING ? buf.limit() : buf.position();
|
||||
}
|
||||
|
||||
/** Skip some bytes by moving the cursor */
|
||||
public void skip(int size) {
|
||||
cursor += size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write Java primitive integer as XDR signed integer.
|
||||
*
|
||||
* Definition of XDR signed integer from RFC 4506:
|
||||
* <pre>
|
||||
* An XDR signed integer is a 32-bit datum that encodes an integer in
|
||||
* the range [-2147483648,2147483647]. The integer is represented in
|
||||
* two's complement notation. The most and least significant bytes are
|
||||
* 0 and 3, respectively. Integers are declared as follows:
|
||||
*
|
||||
* int identifier;
|
||||
*
|
||||
* (MSB) (LSB)
|
||||
* +-------+-------+-------+-------+
|
||||
* |byte 0 |byte 1 |byte 2 |byte 3 | INTEGER
|
||||
* +-------+-------+-------+-------+
|
||||
* <------------32 bits------------>
|
||||
* </pre>
|
||||
*/
|
||||
public void writeInt(int data) {
|
||||
append(toBytes(data));
|
||||
}
|
||||
|
||||
/**
|
||||
* Read an XDR signed integer and return as Java primitive integer.
|
||||
*/
|
||||
public int readInt() {
|
||||
byte byte0 = bytearr[cursor++];
|
||||
byte byte1 = bytearr[cursor++];
|
||||
byte byte2 = bytearr[cursor++];
|
||||
byte byte3 = bytearr[cursor++];
|
||||
return (XDR.toShort(byte0) << 24) + (XDR.toShort(byte1) << 16)
|
||||
+ (XDR.toShort(byte2) << 8) + XDR.toShort(byte3);
|
||||
Preconditions.checkState(state == State.READING);
|
||||
return buf.getInt();
|
||||
}
|
||||
|
||||
/**
|
||||
* Write Java primitive boolean as an XDR boolean.
|
||||
*
|
||||
* Definition of XDR boolean from RFC 4506:
|
||||
* <pre>
|
||||
* Booleans are important enough and occur frequently enough to warrant
|
||||
* their own explicit type in the standard. Booleans are declared as
|
||||
* follows:
|
||||
*
|
||||
* bool identifier;
|
||||
*
|
||||
* This is equivalent to:
|
||||
*
|
||||
* enum { FALSE = 0, TRUE = 1 } identifier;
|
||||
* </pre>
|
||||
*/
|
||||
public void writeBoolean(boolean data) {
|
||||
this.writeInt(data ? 1 : 0);
|
||||
public void writeInt(int v) {
|
||||
ensureFreeSpace(SIZEOF_INT);
|
||||
buf.putInt(v);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read an XDR boolean and return as Java primitive boolean.
|
||||
*/
|
||||
public boolean readBoolean() {
|
||||
return readInt() == 0 ? false : true;
|
||||
Preconditions.checkState(state == State.READING);
|
||||
return buf.getInt() != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write Java primitive long to an XDR signed long.
|
||||
*
|
||||
* Definition of XDR signed long from RFC 4506:
|
||||
* <pre>
|
||||
* The standard also defines 64-bit (8-byte) numbers called hyper
|
||||
* integers and unsigned hyper integers. Their representations are the
|
||||
* obvious extensions of integer and unsigned integer defined above.
|
||||
* They are represented in two's complement notation.The most and
|
||||
* least significant bytes are 0 and 7, respectively. Their
|
||||
* declarations:
|
||||
*
|
||||
* hyper identifier; unsigned hyper identifier;
|
||||
*
|
||||
* (MSB) (LSB)
|
||||
* +-------+-------+-------+-------+-------+-------+-------+-------+
|
||||
* |byte 0 |byte 1 |byte 2 |byte 3 |byte 4 |byte 5 |byte 6 |byte 7 |
|
||||
* +-------+-------+-------+-------+-------+-------+-------+-------+
|
||||
* <----------------------------64 bits---------------------------->
|
||||
* HYPER INTEGER
|
||||
* UNSIGNED HYPER INTEGER
|
||||
* </pre>
|
||||
*/
|
||||
public void writeLongAsHyper(long data) {
|
||||
byte byte0 = (byte) ((data & 0xff00000000000000l) >> 56);
|
||||
byte byte1 = (byte) ((data & 0x00ff000000000000l) >> 48);
|
||||
byte byte2 = (byte) ((data & 0x0000ff0000000000l) >> 40);
|
||||
byte byte3 = (byte) ((data & 0x000000ff00000000l) >> 32);
|
||||
byte byte4 = (byte) ((data & 0x00000000ff000000l) >> 24);
|
||||
byte byte5 = (byte) ((data & 0x0000000000ff0000l) >> 16);
|
||||
byte byte6 = (byte) ((data & 0x000000000000ff00l) >> 8);
|
||||
byte byte7 = (byte) ((data & 0x00000000000000ffl));
|
||||
this.append(new byte[] { byte0, byte1, byte2, byte3, byte4, byte5, byte6, byte7 });
|
||||
public void writeBoolean(boolean v) {
|
||||
ensureFreeSpace(SIZEOF_INT);
|
||||
buf.putInt(v ? 1 : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read XDR signed hyper and return as java primitive long.
|
||||
*/
|
||||
public long readHyper() {
|
||||
byte byte0 = bytearr[cursor++];
|
||||
byte byte1 = bytearr[cursor++];
|
||||
byte byte2 = bytearr[cursor++];
|
||||
byte byte3 = bytearr[cursor++];
|
||||
byte byte4 = bytearr[cursor++];
|
||||
byte byte5 = bytearr[cursor++];
|
||||
byte byte6 = bytearr[cursor++];
|
||||
byte byte7 = bytearr[cursor++];
|
||||
return ((long) XDR.toShort(byte0) << 56)
|
||||
+ ((long) XDR.toShort(byte1) << 48) + ((long) XDR.toShort(byte2) << 40)
|
||||
+ ((long) XDR.toShort(byte3) << 32) + ((long) XDR.toShort(byte4) << 24)
|
||||
+ ((long) XDR.toShort(byte5) << 16) + ((long) XDR.toShort(byte6) << 8)
|
||||
+ XDR.toShort(byte7);
|
||||
Preconditions.checkState(state == State.READING);
|
||||
return buf.getLong();
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a Java primitive byte array to XDR fixed-length opaque data.
|
||||
*
|
||||
* Defintion of fixed-length opaque data from RFC 4506:
|
||||
* <pre>
|
||||
* At times, fixed-length uninterpreted data needs to be passed among
|
||||
* machines. This data is called "opaque" and is declared as follows:
|
||||
*
|
||||
* opaque identifier[n];
|
||||
*
|
||||
* where the constant n is the (static) number of bytes necessary to
|
||||
* contain the opaque data. If n is not a multiple of four, then the n
|
||||
* bytes are followed by enough (0 to 3) residual zero bytes, r, to make
|
||||
* the total byte count of the opaque object a multiple of four.
|
||||
*
|
||||
* 0 1 ...
|
||||
* +--------+--------+...+--------+--------+...+--------+
|
||||
* | byte 0 | byte 1 |...|byte n-1| 0 |...| 0 |
|
||||
* +--------+--------+...+--------+--------+...+--------+
|
||||
* |<-----------n bytes---------->|<------r bytes------>|
|
||||
* |<-----------n+r (where (n+r) mod 4 = 0)------------>|
|
||||
* FIXED-LENGTH OPAQUE
|
||||
* </pre>
|
||||
*/
|
||||
public void writeFixedOpaque(byte[] data) {
|
||||
writeFixedOpaque(data, data.length);
|
||||
}
|
||||
|
||||
public void writeFixedOpaque(byte[] data, int length) {
|
||||
append(Arrays.copyOf(data, length + XDR.pad(length, 4)));
|
||||
public void writeLongAsHyper(long v) {
|
||||
ensureFreeSpace(SIZEOF_LONG);
|
||||
buf.putLong(v);
|
||||
}
|
||||
|
||||
public byte[] readFixedOpaque(int size) {
|
||||
byte[] ret = new byte[size];
|
||||
for(int i = 0; i < size; i++) {
|
||||
ret[i] = bytearr[cursor];
|
||||
cursor++;
|
||||
}
|
||||
|
||||
for(int i = 0; i < XDR.pad(size, 4); i++) {
|
||||
cursor++;
|
||||
}
|
||||
return ret;
|
||||
Preconditions.checkState(state == State.READING);
|
||||
byte[] r = new byte[size];
|
||||
buf.get(r);
|
||||
alignPosition();
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a Java primitive byte array as XDR variable-length opque data.
|
||||
*
|
||||
* Definition of XDR variable-length opaque data RFC 4506:
|
||||
*
|
||||
* <pre>
|
||||
* The standard also provides for variable-length (counted) opaque data,
|
||||
* defined as a sequence of n (numbered 0 through n-1) arbitrary bytes
|
||||
* to be the number n encoded as an unsigned integer (as described
|
||||
* below), and followed by the n bytes of the sequence.
|
||||
*
|
||||
* Byte m of the sequence always precedes byte m+1 of the sequence, and
|
||||
* byte 0 of the sequence always follows the sequence's length (count).
|
||||
* If n is not a multiple of four, then the n bytes are followed by
|
||||
* enough (0 to 3) residual zero bytes, r, to make the total byte count
|
||||
* a multiple of four. Variable-length opaque data is declared in the
|
||||
* following way:
|
||||
*
|
||||
* opaque identifier<m>;
|
||||
* or
|
||||
* opaque identifier<>;
|
||||
*
|
||||
* The constant m denotes an upper bound of the number of bytes that the
|
||||
* sequence may contain. If m is not specified, as in the second
|
||||
* declaration, it is assumed to be (2**32) - 1, the maximum length.
|
||||
*
|
||||
* The constant m would normally be found in a protocol specification.
|
||||
* For example, a filing protocol may state that the maximum data
|
||||
* transfer size is 8192 bytes, as follows:
|
||||
*
|
||||
* opaque filedata<8192>;
|
||||
*
|
||||
* 0 1 2 3 4 5 ...
|
||||
* +-----+-----+-----+-----+-----+-----+...+-----+-----+...+-----+
|
||||
* | length n |byte0|byte1|...| n-1 | 0 |...| 0 |
|
||||
* +-----+-----+-----+-----+-----+-----+...+-----+-----+...+-----+
|
||||
* |<-------4 bytes------->|<------n bytes------>|<---r bytes--->|
|
||||
* |<----n+r (where (n+r) mod 4 = 0)---->|
|
||||
* VARIABLE-LENGTH OPAQUE
|
||||
*
|
||||
* It is an error to encode a length greater than the maximum described
|
||||
* in the specification.
|
||||
* </pre>
|
||||
*/
|
||||
public void writeVariableOpaque(byte[] data) {
|
||||
this.writeInt(data.length);
|
||||
this.writeFixedOpaque(data);
|
||||
public void writeFixedOpaque(byte[] src, int length) {
|
||||
ensureFreeSpace(alignUp(length));
|
||||
buf.put(src, 0, length);
|
||||
writePadding();
|
||||
}
|
||||
|
||||
public void writeFixedOpaque(byte[] src) {
|
||||
writeFixedOpaque(src, src.length);
|
||||
}
|
||||
|
||||
public byte[] readVariableOpaque() {
|
||||
int size = this.readInt();
|
||||
return size != 0 ? this.readFixedOpaque(size) : new byte[0];
|
||||
Preconditions.checkState(state == State.READING);
|
||||
int size = readInt();
|
||||
return readFixedOpaque(size);
|
||||
}
|
||||
|
||||
public void skipVariableOpaque() {
|
||||
int length= this.readInt();
|
||||
this.skip(length+XDR.pad(length, 4));
|
||||
}
|
||||
|
||||
/**
|
||||
* Write Java String as XDR string.
|
||||
*
|
||||
* Definition of XDR string from RFC 4506:
|
||||
*
|
||||
* <pre>
|
||||
* The standard defines a string of n (numbered 0 through n-1) ASCII
|
||||
* bytes to be the number n encoded as an unsigned integer (as described
|
||||
* above), and followed by the n bytes of the string. Byte m of the
|
||||
* string always precedes byte m+1 of the string, and byte 0 of the
|
||||
* string always follows the string's length. If n is not a multiple of
|
||||
* four, then the n bytes are followed by enough (0 to 3) residual zero
|
||||
* bytes, r, to make the total byte count a multiple of four. Counted
|
||||
* byte strings are declared as follows:
|
||||
*
|
||||
* string object<m>;
|
||||
* or
|
||||
* string object<>;
|
||||
*
|
||||
* The constant m denotes an upper bound of the number of bytes that a
|
||||
* string may contain. If m is not specified, as in the second
|
||||
* declaration, it is assumed to be (2**32) - 1, the maximum length.
|
||||
* The constant m would normally be found in a protocol specification.
|
||||
* For example, a filing protocol may state that a file name can be no
|
||||
* longer than 255 bytes, as follows:
|
||||
*
|
||||
* string filename<255>;
|
||||
*
|
||||
* 0 1 2 3 4 5 ...
|
||||
* +-----+-----+-----+-----+-----+-----+...+-----+-----+...+-----+
|
||||
* | length n |byte0|byte1|...| n-1 | 0 |...| 0 |
|
||||
* +-----+-----+-----+-----+-----+-----+...+-----+-----+...+-----+
|
||||
* |<-------4 bytes------->|<------n bytes------>|<---r bytes--->|
|
||||
* |<----n+r (where (n+r) mod 4 = 0)---->|
|
||||
* STRING
|
||||
* It is an error to encode a length greater than the maximum described
|
||||
* in the specification.
|
||||
* </pre>
|
||||
*/
|
||||
public void writeString(String data) {
|
||||
this.writeVariableOpaque(data.getBytes());
|
||||
public void writeVariableOpaque(byte[] src) {
|
||||
ensureFreeSpace(SIZEOF_INT + alignUp(src.length));
|
||||
buf.putInt(src.length);
|
||||
writeFixedOpaque(src);
|
||||
}
|
||||
|
||||
public String readString() {
|
||||
return new String(this.readVariableOpaque());
|
||||
return new String(readVariableOpaque());
|
||||
}
|
||||
|
||||
public void dump(PrintStream out) {
|
||||
for(int i = 0; i < bytearr.length; i += 4) {
|
||||
out.println(hex(bytearr[i]) + " " + hex(bytearr[i + 1]) + " "
|
||||
+ hex(bytearr[i + 2]) + " " + hex(bytearr[i + 3]));
|
||||
public void writeString(String s) {
|
||||
writeVariableOpaque(s.getBytes());
|
||||
}
|
||||
|
||||
private void writePadding() {
|
||||
Preconditions.checkState(state == State.WRITING);
|
||||
int p = pad(buf.position());
|
||||
ensureFreeSpace(p);
|
||||
buf.put(PADDING_BYTES, 0, p);
|
||||
}
|
||||
|
||||
private int alignUp(int length) {
|
||||
return length + pad(length);
|
||||
}
|
||||
|
||||
private int pad(int length) {
|
||||
switch (length % 4) {
|
||||
case 1:
|
||||
return 3;
|
||||
case 2:
|
||||
return 2;
|
||||
case 3:
|
||||
return 1;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public byte[] getBytes() {
|
||||
return Arrays.copyOf(bytearr, bytearr.length);
|
||||
private void alignPosition() {
|
||||
buf.position(alignUp(buf.position()));
|
||||
}
|
||||
|
||||
public static byte[] append(byte[] bytes, byte[] bytesToAdd) {
|
||||
byte[] newByteArray = new byte[bytes.length + bytesToAdd.length];
|
||||
System.arraycopy(bytes, 0, newByteArray, 0, bytes.length);
|
||||
System.arraycopy(bytesToAdd, 0, newByteArray, bytes.length, bytesToAdd.length);
|
||||
return newByteArray;
|
||||
private void ensureFreeSpace(int size) {
|
||||
Preconditions.checkState(state == State.WRITING);
|
||||
if (buf.remaining() < size) {
|
||||
int newCapacity = buf.capacity() * 2;
|
||||
int newRemaining = buf.capacity() + buf.remaining();
|
||||
|
||||
while (newRemaining < size) {
|
||||
newRemaining += newCapacity;
|
||||
newCapacity *= 2;
|
||||
}
|
||||
|
||||
ByteBuffer newbuf = ByteBuffer.allocate(newCapacity);
|
||||
buf.flip();
|
||||
newbuf.put(buf);
|
||||
buf = newbuf;
|
||||
}
|
||||
}
|
||||
|
||||
private static int pad(int x, int y) {
|
||||
return x % y == 0 ? 0 : y - (x % y);
|
||||
}
|
||||
|
||||
static byte[] toBytes(int n) {
|
||||
byte[] ret = { (byte) ((n & 0xff000000) >> 24),
|
||||
(byte) ((n & 0x00ff0000) >> 16), (byte) ((n & 0x0000ff00) >> 8),
|
||||
(byte) (n & 0x000000ff) };
|
||||
return ret;
|
||||
}
|
||||
|
||||
private static short toShort(byte b) {
|
||||
return b < 0 ? (short) (b + 256): (short) b;
|
||||
}
|
||||
|
||||
private static String hex(byte b) {
|
||||
return "" + HEXES.charAt((b & 0xF0) >> 4) + HEXES.charAt((b & 0x0F));
|
||||
/** check if the rest of data has more than len bytes */
|
||||
public static boolean verifyLength(XDR xdr, int len) {
|
||||
return xdr.buf.remaining() >= len;
|
||||
}
|
||||
|
||||
private static byte[] recordMark(int size, boolean last) {
|
||||
return toBytes(!last ? size : size | 0x80000000);
|
||||
}
|
||||
|
||||
public static byte[] getVariableOpque(byte[] data) {
|
||||
byte[] bytes = toBytes(data.length);
|
||||
return append(bytes, Arrays.copyOf(data, data.length + XDR.pad(data.length, 4)));
|
||||
}
|
||||
|
||||
public static int fragmentSize(byte[] mark) {
|
||||
int n = (XDR.toShort(mark[0]) << 24) + (XDR.toShort(mark[1]) << 16)
|
||||
+ (XDR.toShort(mark[2]) << 8) + XDR.toShort(mark[3]);
|
||||
return n & 0x7fffffff;
|
||||
}
|
||||
|
||||
public static boolean isLastFragment(byte[] mark) {
|
||||
int n = (XDR.toShort(mark[0]) << 24) + (XDR.toShort(mark[1]) << 16)
|
||||
+ (XDR.toShort(mark[2]) << 8) + XDR.toShort(mark[3]);
|
||||
return (n & 0x80000000) != 0;
|
||||
}
|
||||
|
||||
/** check if the rest of data has more than <len> bytes */
|
||||
public static boolean verifyLength(XDR xdr, int len) {
|
||||
return (xdr.bytearr.length - xdr.cursor) >= len;
|
||||
byte[] b = new byte[SIZEOF_INT];
|
||||
ByteBuffer buf = ByteBuffer.wrap(b);
|
||||
buf.putInt(!last ? size : size | 0x80000000);
|
||||
return b;
|
||||
}
|
||||
|
||||
/** Write an XDR message to a TCP ChannelBuffer */
|
||||
public static ChannelBuffer writeMessageTcp(XDR request, boolean last) {
|
||||
byte[] fragmentHeader = XDR.recordMark(request.bytearr.length, last);
|
||||
ChannelBuffer outBuf = ChannelBuffers.buffer(fragmentHeader.length
|
||||
+ request.bytearr.length);
|
||||
outBuf.writeBytes(fragmentHeader);
|
||||
outBuf.writeBytes(request.bytearr);
|
||||
return outBuf;
|
||||
Preconditions.checkState(request.state == XDR.State.WRITING);
|
||||
ByteBuffer b = request.buf.duplicate();
|
||||
b.flip();
|
||||
byte[] fragmentHeader = XDR.recordMark(b.limit(), last);
|
||||
ByteBuffer headerBuf = ByteBuffer.wrap(fragmentHeader);
|
||||
|
||||
// TODO: Investigate whether making a copy of the buffer is necessary.
|
||||
return ChannelBuffers.copiedBuffer(headerBuf, b);
|
||||
}
|
||||
|
||||
/** Write an XDR message to a UDP ChannelBuffer */
|
||||
public static ChannelBuffer writeMessageUdp(XDR response) {
|
||||
ChannelBuffer outBuf = ChannelBuffers.buffer(response.bytearr.length);
|
||||
outBuf.writeBytes(response.bytearr);
|
||||
return outBuf;
|
||||
Preconditions.checkState(response.state == XDR.State.READING);
|
||||
// TODO: Investigate whether making a copy of the buffer is necessary.
|
||||
return ChannelBuffers.copiedBuffer(response.buf);
|
||||
}
|
||||
}
|
||||
|
||||
public static int fragmentSize(byte[] mark) {
|
||||
ByteBuffer b = ByteBuffer.wrap(mark);
|
||||
int n = b.getInt();
|
||||
return n & 0x7fffffff;
|
||||
}
|
||||
|
||||
public static boolean isLastFragment(byte[] mark) {
|
||||
ByteBuffer b = ByteBuffer.wrap(mark);
|
||||
int n = b.getInt();
|
||||
return (n & 0x80000000) != 0;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public byte[] getBytes() {
|
||||
ByteBuffer d = buf.duplicate();
|
||||
byte[] b = new byte[d.position()];
|
||||
d.flip();
|
||||
d.get(b);
|
||||
|
||||
return b;
|
||||
}
|
||||
}
|
|
@ -45,6 +45,22 @@ public abstract class Credentials extends RpcAuthInfo {
|
|||
return credentials;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write AuthFlavor and the credentials to the XDR
|
||||
*/
|
||||
public static void writeFlavorAndCredentials(Credentials cred, XDR xdr) {
|
||||
if (cred instanceof CredentialsNone) {
|
||||
xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
|
||||
} else if (cred instanceof CredentialsSys) {
|
||||
xdr.writeInt(AuthFlavor.AUTH_SYS.getValue());
|
||||
} else if (cred instanceof CredentialsGSS) {
|
||||
xdr.writeInt(AuthFlavor.RPCSEC_GSS.getValue());
|
||||
} else {
|
||||
throw new UnsupportedOperationException("Cannot recognize the verifier");
|
||||
}
|
||||
cred.write(xdr);
|
||||
}
|
||||
|
||||
protected int mCredentialsLength;
|
||||
|
||||
protected Credentials(AuthFlavor flavor) {
|
||||
|
|
|
@ -20,10 +20,11 @@ package org.apache.hadoop.oncrpc.security;
|
|||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
|
||||
|
||||
/**
|
||||
* Base class for verifier. Currently we only support 3 types of auth flavors:
|
||||
* {@link AuthFlavor#AUTH_NONE}, {@link AuthFlavor#AUTH_SYS},
|
||||
* and {@link AuthFlavor#RPCSEC_GSS}.
|
||||
/**
|
||||
* Base class for verifier. Currently our authentication only supports 3 types
|
||||
* of auth flavors: {@link AuthFlavor#AUTH_NONE}, {@link AuthFlavor#AUTH_SYS},
|
||||
* and {@link AuthFlavor#RPCSEC_GSS}. Thus for verifier we only need to handle
|
||||
* AUTH_NONE and RPCSEC_GSS
|
||||
*/
|
||||
public abstract class Verifier extends RpcAuthInfo {
|
||||
|
||||
|
@ -31,6 +32,7 @@ public abstract class Verifier extends RpcAuthInfo {
|
|||
super(flavor);
|
||||
}
|
||||
|
||||
/** Read both AuthFlavor and the verifier from the XDR */
|
||||
public static Verifier readFlavorAndVerifier(XDR xdr) {
|
||||
AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt());
|
||||
final Verifier verifer;
|
||||
|
@ -46,4 +48,19 @@ public abstract class Verifier extends RpcAuthInfo {
|
|||
return verifer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write AuthFlavor and the verifier to the XDR
|
||||
*/
|
||||
public static void writeFlavorAndVerifier(Verifier verifier, XDR xdr) {
|
||||
if (verifier instanceof VerifierNone) {
|
||||
xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
|
||||
} else if (verifier instanceof VerifierGSS) {
|
||||
xdr.writeInt(AuthFlavor.RPCSEC_GSS.getValue());
|
||||
} else {
|
||||
throw new UnsupportedOperationException("Cannot recognize the verifier");
|
||||
}
|
||||
verifier.write(xdr);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -21,10 +21,7 @@ import org.apache.hadoop.oncrpc.RpcCall;
|
|||
import org.apache.hadoop.oncrpc.RpcUtil;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsNone;
|
||||
import org.apache.hadoop.oncrpc.security.Credentials;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
|
||||
import org.apache.hadoop.portmap.PortmapInterface.Procedure;
|
||||
|
||||
/**
|
||||
|
@ -37,16 +34,12 @@ public class PortmapRequest {
|
|||
|
||||
public static XDR create(PortmapMapping mapping) {
|
||||
XDR request = new XDR();
|
||||
RpcCall.write(request,
|
||||
RpcCall call = RpcCall.getInstance(
|
||||
RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
|
||||
RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION,
|
||||
Procedure.PMAPPROC_SET.getValue());
|
||||
request.writeInt(AuthFlavor.AUTH_NONE.getValue());
|
||||
Credentials credential = new CredentialsNone();
|
||||
credential.write(request);
|
||||
request.writeInt(AuthFlavor.AUTH_NONE.getValue());
|
||||
Verifier verifier = new VerifierNone();
|
||||
verifier.write(request);
|
||||
Procedure.PMAPPROC_SET.getValue(), new CredentialsNone(),
|
||||
new VerifierNone());
|
||||
call.write(request);
|
||||
return mapping.serialize(request);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,30 +22,31 @@ import java.util.Collection;
|
|||
|
||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
|
||||
/**
|
||||
* Helper utility for sending portmap response.
|
||||
*/
|
||||
public class PortmapResponse {
|
||||
public static XDR voidReply(XDR xdr, int xid) {
|
||||
RpcAcceptedReply.voidReply(xdr, xid);
|
||||
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
|
||||
return xdr;
|
||||
}
|
||||
|
||||
public static XDR intReply(XDR xdr, int xid, int value) {
|
||||
RpcAcceptedReply.voidReply(xdr, xid);
|
||||
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
|
||||
xdr.writeInt(value);
|
||||
return xdr;
|
||||
}
|
||||
|
||||
public static XDR booleanReply(XDR xdr, int xid, boolean value) {
|
||||
RpcAcceptedReply.voidReply(xdr, xid);
|
||||
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
|
||||
xdr.writeBoolean(value);
|
||||
return xdr;
|
||||
}
|
||||
|
||||
public static XDR pmapList(XDR xdr, int xid, Collection<PortmapMapping> list) {
|
||||
RpcAcceptedReply.voidReply(xdr, xid);
|
||||
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
|
||||
for (PortmapMapping mapping : list) {
|
||||
System.out.println(mapping);
|
||||
xdr.writeBoolean(true); // Value follows
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
|||
import org.apache.hadoop.oncrpc.RpcCall;
|
||||
import org.apache.hadoop.oncrpc.RpcProgram;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
||||
/**
|
||||
|
@ -147,8 +148,9 @@ public class RpcProgramPortmap extends RpcProgram implements PortmapInterface {
|
|||
out = getport(xid, in, out);
|
||||
} else {
|
||||
LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
|
||||
RpcAcceptedReply.voidReply(out, xid,
|
||||
RpcAcceptedReply.AcceptState.PROC_UNAVAIL);
|
||||
RpcAcceptedReply.getInstance(xid,
|
||||
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
|
||||
out);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ public class TestNfsTime {
|
|||
t1.serialize(xdr);
|
||||
|
||||
// Deserialize it back
|
||||
NfsTime t2 = NfsTime.deserialize(xdr);
|
||||
NfsTime t2 = NfsTime.deserialize(xdr.asReadOnlyWrap());
|
||||
|
||||
// Ensure the NfsTimes are equal
|
||||
Assert.assertEquals(t1, t2);
|
||||
|
|
|
@ -33,7 +33,7 @@ public class TestFileHandle {
|
|||
|
||||
// Deserialize it back
|
||||
FileHandle handle2 = new FileHandle();
|
||||
handle2.deserialize(xdr);
|
||||
handle2.deserialize(xdr.asReadOnlyWrap());
|
||||
Assert.assertEquals(handle.getFileId(), 1024);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,8 @@ import static org.junit.Assert.assertTrue;
|
|||
import java.net.InetAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsNone;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
@ -55,7 +57,8 @@ public class TestFrameDecoder {
|
|||
InetAddress client, Channel channel) {
|
||||
// Get the final complete request and return a void response.
|
||||
result = in;
|
||||
return RpcAcceptedReply.voidReply(out, 1234);
|
||||
RpcAcceptedReply.getAcceptInstance(1234, new VerifierNone()).write(out);
|
||||
return out;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -161,7 +164,8 @@ public class TestFrameDecoder {
|
|||
|
||||
static void createPortmapXDRheader(XDR xdr_out, int procedure) {
|
||||
// Make this a method
|
||||
RpcCall.write(xdr_out, 0, 100000, 2, procedure);
|
||||
RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(),
|
||||
new VerifierNone()).write(xdr_out);
|
||||
}
|
||||
|
||||
static XDR createGetportMount() {
|
||||
|
|
|
@ -47,7 +47,7 @@ public class TestRpcAcceptedReply {
|
|||
@Test
|
||||
public void testConstructor() {
|
||||
Verifier verifier = new VerifierNone();
|
||||
RpcAcceptedReply reply = new RpcAcceptedReply(0, RpcMessage.Type.RPC_REPLY,
|
||||
RpcAcceptedReply reply = new RpcAcceptedReply(0,
|
||||
ReplyState.MSG_ACCEPTED, verifier, AcceptState.SUCCESS);
|
||||
assertEquals(0, reply.getXid());
|
||||
assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.oncrpc;
|
|||
|
||||
import org.apache.hadoop.oncrpc.RpcDeniedReply.RejectState;
|
||||
import org.apache.hadoop.oncrpc.RpcReply.ReplyState;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -39,10 +40,8 @@ public class TestRpcDeniedReply {
|
|||
|
||||
@Test
|
||||
public void testConstructor() {
|
||||
RpcDeniedReply reply = new RpcDeniedReply(0, RpcMessage.Type.RPC_REPLY,
|
||||
ReplyState.MSG_ACCEPTED, RejectState.AUTH_ERROR) {
|
||||
// Anonymous class
|
||||
};
|
||||
RpcDeniedReply reply = new RpcDeniedReply(0, ReplyState.MSG_ACCEPTED,
|
||||
RejectState.AUTH_ERROR, new VerifierNone());
|
||||
Assert.assertEquals(0, reply.getXid());
|
||||
Assert.assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());
|
||||
Assert.assertEquals(ReplyState.MSG_ACCEPTED, reply.getState());
|
||||
|
|
|
@ -26,7 +26,10 @@ import org.junit.Test;
|
|||
public class TestRpcMessage {
|
||||
private RpcMessage getRpcMessage(int xid, RpcMessage.Type msgType) {
|
||||
return new RpcMessage(xid, msgType) {
|
||||
// Anonymous class
|
||||
@Override
|
||||
public XDR write(XDR xdr) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.oncrpc;
|
|||
|
||||
|
||||
import org.apache.hadoop.oncrpc.RpcReply.ReplyState;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -39,8 +40,12 @@ public class TestRpcReply {
|
|||
|
||||
@Test
|
||||
public void testRpcReply() {
|
||||
RpcReply reply = new RpcReply(0, RpcMessage.Type.RPC_REPLY, ReplyState.MSG_ACCEPTED) {
|
||||
// Anonymous class
|
||||
RpcReply reply = new RpcReply(0, ReplyState.MSG_ACCEPTED,
|
||||
new VerifierNone()) {
|
||||
@Override
|
||||
public XDR write(XDR xdr) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
Assert.assertEquals(0, reply.getXid());
|
||||
Assert.assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());
|
||||
|
|
|
@ -17,23 +17,35 @@
|
|||
*/
|
||||
package org.apache.hadoop.oncrpc;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Tests for {@link XDR}
|
||||
*/
|
||||
import junit.framework.Assert;
|
||||
|
||||
public class TestXDR {
|
||||
/**
|
||||
* Test {@link XDR#append(byte[], byte[])}
|
||||
*/
|
||||
private void serializeInt(int times) {
|
||||
XDR w = new XDR();
|
||||
for (int i = 0; i < times; ++i)
|
||||
w.writeInt(23);
|
||||
|
||||
XDR r = w.asReadOnlyWrap();
|
||||
for (int i = 0; i < times; ++i)
|
||||
Assert.assertEquals(r.readInt(), 23);
|
||||
}
|
||||
|
||||
private void serializeLong(int times) {
|
||||
XDR w = new XDR();
|
||||
for (int i = 0; i < times; ++i)
|
||||
w.writeLongAsHyper(23);
|
||||
|
||||
XDR r = w.asReadOnlyWrap();
|
||||
for (int i = 0; i < times; ++i)
|
||||
Assert.assertEquals(r.readHyper(), 23);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAppendBytes() {
|
||||
byte[] arr1 = new byte[] {0, 1};
|
||||
byte[] arr2 = new byte[] {2, 3};
|
||||
assertTrue(Arrays.equals(new byte[]{0, 1, 2, 3}, XDR.append(arr1, arr2)));
|
||||
public void testPerformance() {
|
||||
final int TEST_TIMES = 8 << 20;
|
||||
serializeInt(TEST_TIMES);
|
||||
serializeLong(TEST_TIMES);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ public class TestCredentialsSys {
|
|||
credential.write(xdr);
|
||||
|
||||
CredentialsSys newCredential = new CredentialsSys();
|
||||
newCredential.read(xdr);
|
||||
newCredential.read(xdr.asReadOnlyWrap());
|
||||
|
||||
assertEquals(0, newCredential.getUID());
|
||||
assertEquals(1, newCredential.getGID());
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
|||
import org.apache.hadoop.oncrpc.RpcCall;
|
||||
import org.apache.hadoop.oncrpc.RpcProgram;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
||||
/**
|
||||
|
@ -88,7 +89,8 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
|
|||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("MOUNT NULLOP : " + " client: " + client);
|
||||
}
|
||||
return RpcAcceptedReply.voidReply(out, xid);
|
||||
return RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(
|
||||
out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -155,7 +157,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
|
|||
|
||||
String host = client.getHostName();
|
||||
mounts.remove(new MountEntry(host, path));
|
||||
RpcAcceptedReply.voidReply(out, xid);
|
||||
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(out);
|
||||
return out;
|
||||
}
|
||||
|
||||
|
@ -165,7 +167,8 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
|
|||
LOG.debug("MOUNT UMNTALL : " + " client: " + client);
|
||||
}
|
||||
mounts.clear();
|
||||
return RpcAcceptedReply.voidReply(out, xid);
|
||||
return RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(
|
||||
out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -190,8 +193,9 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
|
|||
out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
|
||||
} else {
|
||||
// Invalid procedure
|
||||
RpcAcceptedReply.voidReply(out, xid,
|
||||
RpcAcceptedReply.AcceptState.PROC_UNAVAIL);
|
||||
RpcAcceptedReply.getInstance(xid,
|
||||
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
|
||||
out);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.WccAttr;
|
||||
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
||||
/**
|
||||
|
@ -291,7 +292,8 @@ class OpenFileCtx {
|
|||
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
|
||||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
|
||||
fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
} else {
|
||||
// Handle repeated write requests(same xid or not).
|
||||
// If already replied, send reply again. If not replied, drop the
|
||||
|
@ -313,7 +315,8 @@ class OpenFileCtx {
|
|||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
|
||||
fileWcc, request.getCount(), request.getStableHow(),
|
||||
Nfs3Constant.WRITE_COMMIT_VERF);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
}
|
||||
updateLastAccessTime();
|
||||
|
||||
|
@ -367,7 +370,8 @@ class OpenFileCtx {
|
|||
WccData fileWcc = new WccData(preOpAttr, postOpAttr);
|
||||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
|
||||
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
writeCtx.setReplied(true);
|
||||
}
|
||||
|
||||
|
@ -392,7 +396,8 @@ class OpenFileCtx {
|
|||
WccData fileWcc = new WccData(preOpAttr, postOpAttr);
|
||||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
|
||||
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
writeCtx.setReplied(true);
|
||||
}
|
||||
|
||||
|
@ -418,7 +423,8 @@ class OpenFileCtx {
|
|||
}
|
||||
|
||||
updateLastAccessTime();
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -707,7 +713,8 @@ class OpenFileCtx {
|
|||
WccData fileWcc = new WccData(preOpAttr, latestAttr);
|
||||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
|
||||
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
|
@ -715,7 +722,8 @@ class OpenFileCtx {
|
|||
+ offset + " and length " + data.length, e);
|
||||
if (!writeCtx.getReplied()) {
|
||||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
// Keep stream open. Either client retries or SteamMonitor closes it.
|
||||
}
|
||||
|
||||
|
@ -752,8 +760,9 @@ class OpenFileCtx {
|
|||
WccData fileWcc = new WccData(preOpAttr, latestAttr);
|
||||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
|
||||
fileWcc, 0, writeCtx.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
|
||||
Nfs3Utils.writeChannel(writeCtx.getChannel(),
|
||||
response.send(new XDR(), writeCtx.getXid()), writeCtx.getXid());
|
||||
Nfs3Utils.writeChannel(writeCtx.getChannel(), response
|
||||
.writeHeaderAndResponse(new XDR(), writeCtx.getXid(),
|
||||
new VerifierNone()), writeCtx.getXid());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -98,7 +98,6 @@ import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
|
|||
import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
|
||||
import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
|
||||
import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
|
||||
import org.apache.hadoop.nfs.nfs3.response.VoidResponse;
|
||||
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
||||
import org.apache.hadoop.nfs.nfs3.response.WccAttr;
|
||||
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
||||
|
@ -108,12 +107,13 @@ import org.apache.hadoop.oncrpc.RpcDeniedReply;
|
|||
import org.apache.hadoop.oncrpc.RpcProgram;
|
||||
import org.apache.hadoop.oncrpc.RpcReply;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsSys;
|
||||
import org.apache.hadoop.oncrpc.security.Credentials;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsSys;
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
|
||||
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
||||
import org.apache.hadoop.oncrpc.security.SysSecurityHandler;
|
||||
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
|
||||
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
||||
|
@ -209,7 +209,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("NFS NULL");
|
||||
}
|
||||
return new VoidResponse(Nfs3Status.NFS3_OK);
|
||||
return new NFS3Response(Nfs3Status.NFS3_OK);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1790,9 +1790,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
+ rpcCall.getCredential().getFlavor()
|
||||
+ " is not AUTH_SYS or RPCSEC_GSS.");
|
||||
XDR reply = new XDR();
|
||||
reply = RpcDeniedReply.voidReply(reply, xid,
|
||||
RpcDeniedReply rdr = new RpcDeniedReply(xid,
|
||||
RpcReply.ReplyState.MSG_ACCEPTED,
|
||||
RpcDeniedReply.RejectState.AUTH_ERROR);
|
||||
RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
|
||||
rdr.write(reply);
|
||||
return reply;
|
||||
}
|
||||
}
|
||||
|
@ -1857,11 +1858,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
response = commit(xdr, securityHandler, client);
|
||||
} else {
|
||||
// Invalid procedure
|
||||
RpcAcceptedReply.voidReply(out, xid,
|
||||
RpcAcceptedReply.AcceptState.PROC_UNAVAIL);
|
||||
RpcAcceptedReply.getInstance(xid,
|
||||
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
|
||||
out);
|
||||
}
|
||||
if (response != null) {
|
||||
out = response.send(out, xid);
|
||||
// TODO: currently we just return VerifierNone
|
||||
out = response.writeHeaderAndResponse(out, xid, new VerifierNone());
|
||||
}
|
||||
|
||||
return out;
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
|
|||
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
||||
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
|
||||
|
@ -118,7 +119,8 @@ public class WriteManager {
|
|||
byte[] data = request.getData().array();
|
||||
if (data.length < count) {
|
||||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -155,7 +157,8 @@ public class WriteManager {
|
|||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
|
||||
fileWcc, count, request.getStableHow(),
|
||||
Nfs3Constant.WRITE_COMMIT_VERF);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -182,10 +185,12 @@ public class WriteManager {
|
|||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
|
||||
fileWcc, count, request.getStableHow(),
|
||||
Nfs3Constant.WRITE_COMMIT_VERF);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
} else {
|
||||
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
|
||||
Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
|
||||
Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
|
||||
new XDR(), xid, new VerifierNone()), xid);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,8 @@ import org.apache.hadoop.oncrpc.RpcReply;
|
|||
import org.apache.hadoop.oncrpc.SimpleTcpClient;
|
||||
import org.apache.hadoop.oncrpc.SimpleTcpClientHandler;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsNone;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.channel.Channel;
|
||||
import org.jboss.netty.channel.ChannelHandlerContext;
|
||||
|
@ -58,15 +60,9 @@ public class TestOutOfOrderWrite {
|
|||
|
||||
static XDR create() {
|
||||
XDR request = new XDR();
|
||||
RpcCall.write(request, 0x8000004c, Nfs3Constant.PROGRAM,
|
||||
Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3.CREATE.getValue());
|
||||
|
||||
// credentials
|
||||
request.writeInt(0); // auth null
|
||||
request.writeInt(0); // length zero
|
||||
// verifier
|
||||
request.writeInt(0); // auth null
|
||||
request.writeInt(0); // length zero
|
||||
RpcCall.getInstance(0x8000004c, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
|
||||
Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
|
||||
new VerifierNone()).write(request);
|
||||
|
||||
SetAttr3 objAttr = new SetAttr3();
|
||||
CREATE3Request createReq = new CREATE3Request(new FileHandle("/"),
|
||||
|
@ -78,15 +74,10 @@ public class TestOutOfOrderWrite {
|
|||
static XDR write(FileHandle handle, int xid, long offset, int count,
|
||||
byte[] data) {
|
||||
XDR request = new XDR();
|
||||
RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
|
||||
Nfs3Constant.NFSPROC3.WRITE.getValue());
|
||||
RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
|
||||
Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
|
||||
new VerifierNone()).write(request);
|
||||
|
||||
// credentials
|
||||
request.writeInt(0); // auth null
|
||||
request.writeInt(0); // length zero
|
||||
// verifier
|
||||
request.writeInt(0); // auth null
|
||||
request.writeInt(0); // length zero
|
||||
WRITE3Request write1 = new WRITE3Request(handle, offset, count,
|
||||
WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
|
||||
write1.serialize(request);
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
|||
import org.apache.hadoop.oncrpc.RegistrationClient;
|
||||
import org.apache.hadoop.oncrpc.RpcCall;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsNone;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
import org.apache.hadoop.portmap.PortmapMapping;
|
||||
import org.apache.hadoop.portmap.PortmapRequest;
|
||||
|
||||
|
@ -78,11 +80,8 @@ public class TestPortmapRegister {
|
|||
|
||||
static void createPortmapXDRheader(XDR xdr_out, int procedure) {
|
||||
// TODO: Move this to RpcRequest
|
||||
RpcCall.write(xdr_out, 0, 100000, 2, procedure);
|
||||
xdr_out.writeInt(0); //no auth
|
||||
xdr_out.writeInt(0);
|
||||
xdr_out.writeInt(0);
|
||||
xdr_out.writeInt(0);
|
||||
RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(),
|
||||
new VerifierNone()).write(xdr_out);
|
||||
|
||||
/*
|
||||
xdr_out.putInt(1); //unix auth
|
||||
|
|
|
@ -27,6 +27,8 @@ import java.net.UnknownHostException;
|
|||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||
import org.apache.hadoop.oncrpc.RpcCall;
|
||||
import org.apache.hadoop.oncrpc.XDR;
|
||||
import org.apache.hadoop.oncrpc.security.CredentialsNone;
|
||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||
|
||||
// TODO: convert this to Junit
|
||||
public class TestUdpServer {
|
||||
|
@ -82,7 +84,8 @@ public class TestUdpServer {
|
|||
|
||||
static void createPortmapXDRheader(XDR xdr_out, int procedure) {
|
||||
// Make this a method
|
||||
RpcCall.write(xdr_out, 0, 100000, 2, procedure);
|
||||
RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(),
|
||||
new VerifierNone()).write(xdr_out);
|
||||
}
|
||||
|
||||
static void testGetportMount() {
|
||||
|
|
|
@ -244,6 +244,9 @@ Release 2.3.0 - UNRELEASED
|
|||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-5122. Support failover and retry in WebHdfsFileSystem for NN HA.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-4657. Limit the number of blocks logged by the NN after a block
|
||||
|
@ -303,7 +306,22 @@ Release 2.3.0 - UNRELEASED
|
|||
HDFS-5170. BlockPlacementPolicyDefault uses the wrong classname when
|
||||
alerting to enable debug logging. (Andrew Wang)
|
||||
|
||||
Release 2.1.1-beta - UNRELEASED
|
||||
HDFS-5031. BlockScanner scans the block multiple times. (Vinay via Arpit
|
||||
Agarwal)
|
||||
|
||||
Release 2.2.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 2.1.1-beta - 2013-09-23
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
@ -384,6 +402,9 @@ Release 2.1.1-beta - UNRELEASED
|
|||
|
||||
HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang)
|
||||
|
||||
HDFS-5212. Refactor RpcMessage and NFS3Response to support different
|
||||
types of authentication information. (jing9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -455,6 +476,9 @@ Release 2.1.1-beta - UNRELEASED
|
|||
HDFS-5192. NameNode may fail to start when
|
||||
dfs.client.test.drop.namenode.response.number is set. (jing9)
|
||||
|
||||
HDFS-5219. Add configuration keys for retry policy in WebHDFSFileSystem.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
Release 2.1.0-beta - 2013-08-22
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -513,4 +513,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
// Timeout to wait for block receiver and responder thread to stop
|
||||
public static final String DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY = "dfs.datanode.xceiver.stop.timeout.millis";
|
||||
public static final long DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT = 60000;
|
||||
|
||||
// WebHDFS retry policy
|
||||
public static final String DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.http.client.retry.policy.enabled";
|
||||
public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false;
|
||||
public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.http.client.retry.policy.spec";
|
||||
public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
|
||||
public static final String DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts";
|
||||
public static final int DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15;
|
||||
public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis";
|
||||
public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
|
||||
public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis";
|
||||
public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000;
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ import java.net.InetSocketAddress;
|
|||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -610,6 +611,48 @@ public class DFSUtil {
|
|||
Configuration conf) {
|
||||
return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
|
||||
* the configuration.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @return list of InetSocketAddresses
|
||||
*/
|
||||
public static Map<String, Map<String, InetSocketAddress>> getHaNnHttpAddresses(
|
||||
Configuration conf) {
|
||||
return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve an HDFS URL into real INetSocketAddress. It works like a DNS resolver
|
||||
* when the URL points to an non-HA cluster. When the URL points to an HA
|
||||
* cluster, the resolver further resolves the logical name (i.e., the authority
|
||||
* in the URL) into real namenode addresses.
|
||||
*/
|
||||
public static InetSocketAddress[] resolve(URI uri, int schemeDefaultPort,
|
||||
Configuration conf) throws IOException {
|
||||
ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
|
||||
|
||||
if (!HAUtil.isLogicalUri(conf, uri)) {
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
|
||||
schemeDefaultPort);
|
||||
ret.add(addr);
|
||||
|
||||
} else {
|
||||
Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
|
||||
.getHaNnHttpAddresses(conf);
|
||||
|
||||
for (Map<String, InetSocketAddress> addrs : addresses.values()) {
|
||||
for (InetSocketAddress addr : addrs.values()) {
|
||||
ret.add(addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
InetSocketAddress[] r = new InetSocketAddress[ret.size()];
|
||||
return ret.toArray(r);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns list of InetSocketAddress corresponding to backup node rpc
|
||||
|
|
|
@ -100,6 +100,7 @@ class BlockPoolSliceScanner {
|
|||
private long currentPeriodStart = Time.now();
|
||||
private long bytesLeft = 0; // Bytes to scan in this period
|
||||
private long totalBytesToScan = 0;
|
||||
private boolean isNewPeriod = true;
|
||||
|
||||
private final LogFileHandler verificationLog;
|
||||
|
||||
|
@ -126,7 +127,10 @@ class BlockPoolSliceScanner {
|
|||
public int compare(BlockScanInfo left, BlockScanInfo right) {
|
||||
final long l = left.lastScanTime;
|
||||
final long r = right.lastScanTime;
|
||||
return l < r? -1: l > r? 1: 0;
|
||||
// compare blocks itself if scantimes are same to avoid.
|
||||
// because TreeMap uses comparator if available to check existence of
|
||||
// the object.
|
||||
return l < r? -1: l > r? 1: left.compareTo(right);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -148,8 +152,6 @@ class BlockPoolSliceScanner {
|
|||
public boolean equals(Object that) {
|
||||
if (this == that) {
|
||||
return true;
|
||||
} else if (that == null || !(that instanceof BlockScanInfo)) {
|
||||
return false;
|
||||
}
|
||||
return super.equals(that);
|
||||
}
|
||||
|
@ -539,10 +541,12 @@ class BlockPoolSliceScanner {
|
|||
entry.genStamp));
|
||||
if (info != null) {
|
||||
if (processedBlocks.get(entry.blockId) == null) {
|
||||
updateBytesLeft(-info.getNumBytes());
|
||||
if (isNewPeriod) {
|
||||
updateBytesLeft(-info.getNumBytes());
|
||||
}
|
||||
processedBlocks.put(entry.blockId, 1);
|
||||
}
|
||||
if (logIterator.isPrevious()) {
|
||||
if (logIterator.isLastReadFromPrevious()) {
|
||||
// write the log entry to current file
|
||||
// so that the entry is preserved for later runs.
|
||||
verificationLog.append(entry.verificationTime, entry.genStamp,
|
||||
|
@ -557,6 +561,7 @@ class BlockPoolSliceScanner {
|
|||
} finally {
|
||||
IOUtils.closeStream(logIterator);
|
||||
}
|
||||
isNewPeriod = false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -597,6 +602,7 @@ class BlockPoolSliceScanner {
|
|||
// reset the byte counts :
|
||||
bytesLeft = totalBytesToScan;
|
||||
currentPeriodStart = Time.now();
|
||||
isNewPeriod = true;
|
||||
}
|
||||
|
||||
private synchronized boolean workRemainingInCurrentPeriod() {
|
||||
|
|
|
@ -33,6 +33,12 @@ public interface RollingLogs {
|
|||
public interface LineIterator extends Iterator<String>, Closeable {
|
||||
/** Is the iterator iterating the previous? */
|
||||
public boolean isPrevious();
|
||||
|
||||
/**
|
||||
* Is the last read entry from previous? This should be called after
|
||||
* reading.
|
||||
*/
|
||||
public boolean isLastReadFromPrevious();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -134,6 +134,7 @@ class RollingLogsImpl implements RollingLogs {
|
|||
*/
|
||||
private class Reader implements RollingLogs.LineIterator {
|
||||
private File file;
|
||||
private File lastReadFile;
|
||||
private BufferedReader reader;
|
||||
private String line;
|
||||
private boolean closed = false;
|
||||
|
@ -149,6 +150,11 @@ class RollingLogsImpl implements RollingLogs {
|
|||
return file == prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLastReadFromPrevious() {
|
||||
return lastReadFile == prev;
|
||||
}
|
||||
|
||||
private boolean openFile() throws IOException {
|
||||
|
||||
for(int i=0; i<2; i++) {
|
||||
|
@ -203,6 +209,7 @@ class RollingLogsImpl implements RollingLogs {
|
|||
public String next() {
|
||||
String curLine = line;
|
||||
try {
|
||||
lastReadFile = file;
|
||||
readNext();
|
||||
} catch (IOException e) {
|
||||
DataBlockScanner.LOG.warn("Failed to read next line.", e);
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.hdfs.ByteRangeInputStream;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||
|
@ -86,6 +87,7 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
|
|||
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
|
||||
import org.apache.hadoop.hdfs.web.resources.UserParam;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryUtils;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -119,7 +121,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
|
||||
/** SPNEGO authenticator */
|
||||
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
|
||||
/** Default connection factory may be overriden in tests to use smaller timeout values */
|
||||
/** Default connection factory may be overridden in tests to use smaller timeout values */
|
||||
URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
|
||||
/** Configures connections for AuthenticatedURL */
|
||||
private final ConnectionConfigurator CONN_CONFIGURATOR =
|
||||
|
@ -159,12 +161,13 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
}
|
||||
|
||||
private UserGroupInformation ugi;
|
||||
private InetSocketAddress nnAddr;
|
||||
private URI uri;
|
||||
private boolean hasInitedToken;
|
||||
private Token<?> delegationToken;
|
||||
private RetryPolicy retryPolicy = null;
|
||||
private Path workingDir;
|
||||
private InetSocketAddress nnAddrs[];
|
||||
private int currentNNAddrIndex;
|
||||
|
||||
/**
|
||||
* Return the protocol scheme for the FileSystem.
|
||||
|
@ -174,7 +177,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
*/
|
||||
@Override
|
||||
public String getScheme() {
|
||||
return "webhdfs";
|
||||
return SCHEME;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -183,20 +186,42 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
super.initialize(uri, conf);
|
||||
setConf(conf);
|
||||
ugi = UserGroupInformation.getCurrentUser();
|
||||
|
||||
try {
|
||||
this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
|
||||
this.uri = new URI(uri.getScheme(), uri.getAuthority(), null,
|
||||
null, null);
|
||||
this.nnAddrs = DFSUtil.resolve(this.uri, getDefaultPort(), conf);
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
|
||||
this.retryPolicy =
|
||||
RetryUtils.getDefaultRetryPolicy(
|
||||
conf,
|
||||
DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
|
||||
DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
|
||||
DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
|
||||
SafeModeException.class);
|
||||
|
||||
if (!HAUtil.isLogicalUri(conf, this.uri)) {
|
||||
this.retryPolicy =
|
||||
RetryUtils.getDefaultRetryPolicy(
|
||||
conf,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
|
||||
SafeModeException.class);
|
||||
} else {
|
||||
|
||||
int maxFailoverAttempts = conf.getInt(
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
|
||||
int failoverSleepBaseMillis = conf.getInt(
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
|
||||
int failoverSleepMaxMillis = conf.getInt(
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
|
||||
DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
|
||||
|
||||
this.retryPolicy = RetryPolicies
|
||||
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
|
||||
maxFailoverAttempts, failoverSleepBaseMillis,
|
||||
failoverSleepMaxMillis);
|
||||
}
|
||||
|
||||
this.workingDir = getHomeDirectory();
|
||||
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
|
@ -348,6 +373,19 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
return ((RemoteException)ioe).unwrapRemoteException();
|
||||
}
|
||||
|
||||
private synchronized InetSocketAddress getCurrentNNAddr() {
|
||||
return nnAddrs[currentNNAddrIndex];
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset the appropriate state to gracefully fail over to another name node
|
||||
*/
|
||||
private synchronized void resetStateToFailOver() {
|
||||
currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length;
|
||||
delegationToken = null;
|
||||
hasInitedToken = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a URL pointing to given path on the namenode.
|
||||
*
|
||||
|
@ -357,6 +395,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
* @throws IOException on error constructing the URL
|
||||
*/
|
||||
private URL getNamenodeURL(String path, String query) throws IOException {
|
||||
InetSocketAddress nnAddr = getCurrentNNAddr();
|
||||
final URL url = new URL("http", nnAddr.getHostName(),
|
||||
nnAddr.getPort(), path + '?' + query);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
|
@ -414,38 +453,28 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
*/
|
||||
private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
|
||||
final Param<?,?>... parameters) throws IOException {
|
||||
return new Runner(op, fspath, parameters).run().json;
|
||||
return new FsPathRunner(op, fspath, parameters).run().json;
|
||||
}
|
||||
|
||||
/**
|
||||
* This class is for initialing a HTTP connection, connecting to server,
|
||||
* obtaining a response, and also handling retry on failures.
|
||||
*/
|
||||
class Runner {
|
||||
private final HttpOpParam.Op op;
|
||||
private final URL url;
|
||||
abstract class AbstractRunner {
|
||||
abstract protected URL getUrl() throws IOException;
|
||||
|
||||
protected final HttpOpParam.Op op;
|
||||
private final boolean redirected;
|
||||
|
||||
private boolean checkRetry;
|
||||
private HttpURLConnection conn = null;
|
||||
protected HttpURLConnection conn = null;
|
||||
private Map<?, ?> json = null;
|
||||
|
||||
Runner(final HttpOpParam.Op op, final URL url, final boolean redirected) {
|
||||
protected AbstractRunner(final HttpOpParam.Op op, boolean redirected) {
|
||||
this.op = op;
|
||||
this.url = url;
|
||||
this.redirected = redirected;
|
||||
}
|
||||
|
||||
Runner(final HttpOpParam.Op op, final Path fspath,
|
||||
final Param<?,?>... parameters) throws IOException {
|
||||
this(op, toUrl(op, fspath, parameters), false);
|
||||
}
|
||||
|
||||
Runner(final HttpOpParam.Op op, final HttpURLConnection conn) {
|
||||
this(op, null, false);
|
||||
this.conn = conn;
|
||||
}
|
||||
|
||||
private HttpURLConnection getHttpUrlConnection(final URL url)
|
||||
throws IOException, AuthenticationException {
|
||||
UserGroupInformation connectUgi = ugi.getRealUser();
|
||||
|
@ -493,6 +522,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
|
||||
private void init() throws IOException {
|
||||
checkRetry = !redirected;
|
||||
URL url = getUrl();
|
||||
try {
|
||||
conn = getHttpUrlConnection(url);
|
||||
} catch(AuthenticationException ae) {
|
||||
|
@ -519,7 +549,23 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
}
|
||||
}
|
||||
|
||||
Runner run() throws IOException {
|
||||
AbstractRunner run() throws IOException {
|
||||
/**
|
||||
* Do the real work.
|
||||
*
|
||||
* There are three cases that the code inside the loop can throw an
|
||||
* IOException:
|
||||
*
|
||||
* <ul>
|
||||
* <li>The connection has failed (e.g., ConnectException,
|
||||
* @see FailoverOnNetworkExceptionRetry for more details)</li>
|
||||
* <li>The namenode enters the standby state (i.e., StandbyException).</li>
|
||||
* <li>The server returns errors for the command (i.e., RemoteException)</li>
|
||||
* </ul>
|
||||
*
|
||||
* The call to shouldRetry() will conduct the retry policy. The policy
|
||||
* examines the exception and swallows it if it decides to rerun the work.
|
||||
*/
|
||||
for(int retry = 0; ; retry++) {
|
||||
try {
|
||||
init();
|
||||
|
@ -537,14 +583,25 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
|
||||
private void shouldRetry(final IOException ioe, final int retry
|
||||
) throws IOException {
|
||||
InetSocketAddress nnAddr = getCurrentNNAddr();
|
||||
if (checkRetry) {
|
||||
try {
|
||||
final RetryPolicy.RetryAction a = retryPolicy.shouldRetry(
|
||||
ioe, retry, 0, true);
|
||||
if (a.action == RetryPolicy.RetryAction.RetryDecision.RETRY) {
|
||||
|
||||
boolean isRetry = a.action == RetryPolicy.RetryAction.RetryDecision.RETRY;
|
||||
boolean isFailoverAndRetry =
|
||||
a.action == RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY;
|
||||
|
||||
if (isRetry || isFailoverAndRetry) {
|
||||
LOG.info("Retrying connect to namenode: " + nnAddr
|
||||
+ ". Already tried " + retry + " time(s); retry policy is "
|
||||
+ retryPolicy + ", delay " + a.delayMillis + "ms.");
|
||||
+ retryPolicy + ", delay " + a.delayMillis + "ms.");
|
||||
|
||||
if (isFailoverAndRetry) {
|
||||
resetStateToFailOver();
|
||||
}
|
||||
|
||||
Thread.sleep(a.delayMillis);
|
||||
return;
|
||||
}
|
||||
|
@ -617,6 +674,48 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
}
|
||||
}
|
||||
|
||||
final class FsPathRunner extends AbstractRunner {
|
||||
private final Path fspath;
|
||||
private final Param<?, ?>[] parameters;
|
||||
|
||||
FsPathRunner(final HttpOpParam.Op op, final Path fspath, final Param<?,?>... parameters) {
|
||||
super(op, false);
|
||||
this.fspath = fspath;
|
||||
this.parameters = parameters;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected URL getUrl() throws IOException {
|
||||
return toUrl(op, fspath, parameters);
|
||||
}
|
||||
}
|
||||
|
||||
final class URLRunner extends AbstractRunner {
|
||||
private final URL url;
|
||||
@Override
|
||||
protected URL getUrl() {
|
||||
return url;
|
||||
}
|
||||
|
||||
protected URLRunner(final HttpOpParam.Op op, final URL url, boolean redirected) {
|
||||
super(op, redirected);
|
||||
this.url = url;
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
final class ConnRunner extends AbstractRunner {
|
||||
protected ConnRunner(final HttpOpParam.Op op, HttpURLConnection conn) {
|
||||
super(op, false);
|
||||
this.conn = conn;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected URL getUrl() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private FsPermission applyUMask(FsPermission permission) {
|
||||
if (permission == null) {
|
||||
permission = FsPermission.getDefault();
|
||||
|
@ -772,7 +871,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
statistics.incrementWriteOps(1);
|
||||
|
||||
final HttpOpParam.Op op = PutOpParam.Op.CREATE;
|
||||
return new Runner(op, f,
|
||||
return new FsPathRunner(op, f,
|
||||
new PermissionParam(applyUMask(permission)),
|
||||
new OverwriteParam(overwrite),
|
||||
new BufferSizeParam(bufferSize),
|
||||
|
@ -788,7 +887,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
statistics.incrementWriteOps(1);
|
||||
|
||||
final HttpOpParam.Op op = PostOpParam.Op.APPEND;
|
||||
return new Runner(op, f, new BufferSizeParam(bufferSize))
|
||||
return new FsPathRunner(op, f, new BufferSizeParam(bufferSize))
|
||||
.run()
|
||||
.write(bufferSize);
|
||||
}
|
||||
|
@ -835,7 +934,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
final boolean resolved) throws IOException {
|
||||
final URL offsetUrl = offset == 0L? url
|
||||
: new URL(url + "&" + new OffsetParam(offset));
|
||||
return new Runner(GetOpParam.Op.OPEN, offsetUrl, resolved).run().conn;
|
||||
return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run().conn;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -909,7 +1008,7 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
|
||||
final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
|
||||
final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m);
|
||||
SecurityUtil.setTokenService(token, nnAddr);
|
||||
SecurityUtil.setTokenService(token, getCurrentNNAddr());
|
||||
return token;
|
||||
}
|
||||
|
||||
|
|
|
@ -871,7 +871,11 @@ public class TestDFSClientRetries {
|
|||
|
||||
final Path dir = new Path("/testNamenodeRestart");
|
||||
|
||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
|
||||
if (isWebHDFS) {
|
||||
conf.setBoolean(DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
|
||||
} else {
|
||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
|
||||
}
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
|
||||
conf.setInt(MiniDFSCluster.DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 5000);
|
||||
|
||||
|
|
|
@ -20,20 +20,25 @@ package org.apache.hadoop.hdfs;
|
|||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import org.junit.Assume;
|
||||
import static org.hamcrest.CoreMatchers.*;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
@ -54,8 +59,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -539,6 +547,55 @@ public class TestDFSUtil {
|
|||
assertEquals("ns1", DFSUtil.getSecondaryNameServiceId(conf));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetHaNnHttpAddresses() throws IOException {
|
||||
final String LOGICAL_HOST_NAME = "ns1";
|
||||
final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
|
||||
final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
|
||||
|
||||
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> map =
|
||||
DFSUtil.getHaNnHttpAddresses(conf);
|
||||
|
||||
assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
|
||||
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResolve() throws IOException, URISyntaxException {
|
||||
final String LOGICAL_HOST_NAME = "ns1";
|
||||
final String NS1_NN1_HOST = "ns1-nn1.example.com";
|
||||
final String NS1_NN2_HOST = "ns1-nn2.example.com";
|
||||
final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
|
||||
final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
|
||||
final int DEFAULT_PORT = NameNode.DEFAULT_PORT;
|
||||
|
||||
Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
|
||||
URI uri = new URI("webhdfs://ns1");
|
||||
assertTrue(HAUtil.isLogicalUri(conf, uri));
|
||||
InetSocketAddress[] addrs = DFSUtil.resolve(uri, DEFAULT_PORT, conf);
|
||||
assertArrayEquals(new InetSocketAddress[] {
|
||||
new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT),
|
||||
new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT),
|
||||
}, addrs);
|
||||
}
|
||||
|
||||
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
|
||||
conf.set(DFS_NAMESERVICES, "ns1");
|
||||
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
|
||||
conf.set(DFSUtil.addKeySuffixes(
|
||||
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
|
||||
conf.set(DFSUtil.addKeySuffixes(
|
||||
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);
|
||||
|
||||
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
|
||||
ConfiguredFailoverProxyProvider.class.getName());
|
||||
return conf;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSubstituteForWildcardAddress() throws IOException {
|
||||
assertEquals("foo:12345",
|
||||
|
|
|
@ -459,4 +459,43 @@ public class TestDatanodeBlockScanner {
|
|||
assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs);
|
||||
assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDuplicateScans() throws Exception {
|
||||
long startTime = Time.now();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
|
||||
.numDataNodes(1).build();
|
||||
FileSystem fs = null;
|
||||
try {
|
||||
fs = cluster.getFileSystem();
|
||||
DataNode dataNode = cluster.getDataNodes().get(0);
|
||||
int infoPort = dataNode.getInfoPort();
|
||||
long scanTimeBefore = 0, scanTimeAfter = 0;
|
||||
for (int i = 1; i < 10; i++) {
|
||||
Path fileName = new Path("/test" + i);
|
||||
DFSTestUtil.createFile(fs, fileName, 1024, (short) 1, 1000L);
|
||||
waitForVerification(infoPort, fs, fileName, i, startTime, TIMEOUT);
|
||||
if (i > 1) {
|
||||
scanTimeAfter = DataNodeTestUtils.getLatestScanTime(dataNode,
|
||||
DFSTestUtil.getFirstBlock(fs, new Path("/test" + (i - 1))));
|
||||
assertFalse("scan time shoud not be 0", scanTimeAfter == 0);
|
||||
assertEquals("There should not be duplicate scan", scanTimeBefore,
|
||||
scanTimeAfter);
|
||||
}
|
||||
|
||||
scanTimeBefore = DataNodeTestUtils.getLatestScanTime(dataNode,
|
||||
DFSTestUtil.getFirstBlock(fs, new Path("/test" + i)));
|
||||
}
|
||||
cluster.restartDataNode(0);
|
||||
Thread.sleep(10000);
|
||||
dataNode = cluster.getDataNodes().get(0);
|
||||
scanTimeAfter = DataNodeTestUtils.getLatestScanTime(dataNode,
|
||||
DFSTestUtil.getFirstBlock(fs, new Path("/test" + (9))));
|
||||
assertEquals("There should not be duplicate scan", scanTimeBefore,
|
||||
scanTimeAfter);
|
||||
} finally {
|
||||
IOUtils.closeStream(fs);
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -115,11 +115,22 @@ public class DataNodeTestUtils {
|
|||
}
|
||||
|
||||
public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
|
||||
DataBlockScanner scanner = dn.getBlockScanner();
|
||||
BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
|
||||
BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
|
||||
bpScanner.verifyBlock(b);
|
||||
}
|
||||
|
||||
|
||||
private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,
|
||||
ExtendedBlock b) {
|
||||
DataBlockScanner scanner = dn.getBlockScanner();
|
||||
BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
|
||||
return bpScanner;
|
||||
}
|
||||
|
||||
public static long getLatestScanTime(DataNode dn, ExtendedBlock b) {
|
||||
BlockPoolSliceScanner scanner = getBlockPoolScanner(dn, b);
|
||||
return scanner.getLastScanTime(b.getLocalBlock());
|
||||
}
|
||||
|
||||
public static void shutdownBlockScanner(DataNode dn) {
|
||||
if (dn.blockScanner != null) {
|
||||
dn.blockScanner.shutdown();
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.web;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/** Test whether WebHDFS can connect to an HA cluster */
|
||||
public class TestWebHDFSForHA {
|
||||
|
||||
private static final String LOGICAL_NAME = "minidfs";
|
||||
|
||||
@Test
|
||||
public void test() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
|
||||
MiniDFSNNTopology topo = new MiniDFSNNTopology()
|
||||
.addNameservice(new MiniDFSNNTopology.NSConf(LOGICAL_NAME).addNN(
|
||||
new MiniDFSNNTopology.NNConf("nn1")).addNN(
|
||||
new MiniDFSNNTopology.NNConf("nn2")));
|
||||
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
|
||||
.numDataNodes(3).build();
|
||||
|
||||
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
|
||||
|
||||
FileSystem fs = null;
|
||||
try {
|
||||
cluster.waitActive();
|
||||
|
||||
final String uri = WebHdfsFileSystem.SCHEME + "://" + LOGICAL_NAME;
|
||||
fs = (WebHdfsFileSystem) FileSystem.get(new URI(uri), conf);
|
||||
cluster.transitionToActive(0);
|
||||
|
||||
final Path dir = new Path("/test");
|
||||
Assert.assertTrue(fs.mkdirs(dir));
|
||||
|
||||
cluster.shutdownNameNode(0);
|
||||
cluster.transitionToActive(1);
|
||||
|
||||
final Path dir2 = new Path("/test2");
|
||||
Assert.assertTrue(fs.mkdirs(dir2));
|
||||
|
||||
} finally {
|
||||
if (fs != null) {
|
||||
fs.close();
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -81,7 +81,7 @@ public class WebHdfsTestUtil {
|
|||
|
||||
public static HttpURLConnection twoStepWrite(final WebHdfsFileSystem webhdfs,
|
||||
final HttpOpParam.Op op, HttpURLConnection conn) throws IOException {
|
||||
return webhdfs.new Runner(op, conn).twoStepWrite();
|
||||
return webhdfs.new ConnRunner(op, conn).twoStepWrite();
|
||||
}
|
||||
|
||||
public static FSDataOutputStream write(final WebHdfsFileSystem webhdfs,
|
||||
|
|
|
@ -164,6 +164,9 @@ Release 2.3.0 - UNRELEASED
|
|||
|
||||
MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza)
|
||||
|
||||
MAPREDUCE-5487. In task processes, JobConf is unnecessarily loaded again
|
||||
in Limits (Sandy Ryza)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal
|
||||
|
@ -175,7 +178,19 @@ Release 2.3.0 - UNRELEASED
|
|||
MAPREDUCE-5404. HSAdminServer does not use ephemeral ports in minicluster
|
||||
mode (Ted Yu via jlowe)
|
||||
|
||||
Release 2.1.1-beta - UNRELEASED
|
||||
Release 2.2.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 2.1.1-beta - 2013-09-23
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.ipc.RPC;
|
|||
import org.apache.hadoop.mapreduce.MRConfig;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.TaskType;
|
||||
import org.apache.hadoop.mapreduce.counters.Limits;
|
||||
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
|
||||
import org.apache.hadoop.mapreduce.security.TokenCache;
|
||||
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
|
||||
|
@ -76,6 +77,8 @@ class YarnChild {
|
|||
LOG.debug("Child starting");
|
||||
|
||||
final JobConf job = new JobConf();
|
||||
// Initing with our JobConf allows us to avoid loading confs twice
|
||||
Limits.init(job);
|
||||
job.addResource(MRJobConfig.JOB_CONF_FILE);
|
||||
UserGroupInformation.setConfiguration(job);
|
||||
|
||||
|
|
|
@ -62,8 +62,8 @@ import com.google.common.collect.Iterators;
|
|||
public class Counters
|
||||
extends AbstractCounters<Counters.Counter, Counters.Group> {
|
||||
|
||||
public static int MAX_COUNTER_LIMIT = Limits.COUNTERS_MAX;
|
||||
public static int MAX_GROUP_LIMIT = Limits.GROUPS_MAX;
|
||||
public static int MAX_COUNTER_LIMIT = Limits.getCountersMax();
|
||||
public static int MAX_GROUP_LIMIT = Limits.getGroupsMax();
|
||||
private static HashMap<String, String> depricatedCounterMap =
|
||||
new HashMap<String, String>();
|
||||
|
||||
|
|
|
@ -28,37 +28,80 @@ import static org.apache.hadoop.mapreduce.MRJobConfig.*;
|
|||
public class Limits {
|
||||
|
||||
static final Configuration conf = new JobConf();
|
||||
public static final int GROUP_NAME_MAX =
|
||||
conf.getInt(COUNTER_GROUP_NAME_MAX_KEY, COUNTER_GROUP_NAME_MAX_DEFAULT);
|
||||
public static final int COUNTER_NAME_MAX =
|
||||
conf.getInt(COUNTER_NAME_MAX_KEY, COUNTER_NAME_MAX_DEFAULT);
|
||||
public static final int GROUPS_MAX =
|
||||
conf.getInt(COUNTER_GROUPS_MAX_KEY, COUNTER_GROUPS_MAX_DEFAULT);
|
||||
public static final int COUNTERS_MAX =
|
||||
conf.getInt(COUNTERS_MAX_KEY, COUNTERS_MAX_DEFAULT);
|
||||
|
||||
private int totalCounters;
|
||||
private LimitExceededException firstViolation;
|
||||
|
||||
private static boolean isInited;
|
||||
|
||||
private static int GROUP_NAME_MAX;
|
||||
private static int COUNTER_NAME_MAX;
|
||||
private static int GROUPS_MAX;
|
||||
private static int COUNTERS_MAX;
|
||||
|
||||
public synchronized static void init(Configuration conf) {
|
||||
if (!isInited) {
|
||||
if (conf == null) {
|
||||
conf = new JobConf();
|
||||
}
|
||||
GROUP_NAME_MAX = conf.getInt(COUNTER_GROUP_NAME_MAX_KEY,
|
||||
COUNTER_GROUP_NAME_MAX_DEFAULT);
|
||||
COUNTER_NAME_MAX = conf.getInt(COUNTER_NAME_MAX_KEY,
|
||||
COUNTER_NAME_MAX_DEFAULT);
|
||||
GROUPS_MAX = conf.getInt(COUNTER_GROUPS_MAX_KEY, COUNTER_GROUPS_MAX_DEFAULT);
|
||||
COUNTERS_MAX = conf.getInt(COUNTERS_MAX_KEY, COUNTERS_MAX_DEFAULT);
|
||||
}
|
||||
isInited = true;
|
||||
}
|
||||
|
||||
public static int getGroupNameMax() {
|
||||
if (!isInited) {
|
||||
init(null);
|
||||
}
|
||||
return GROUP_NAME_MAX;
|
||||
}
|
||||
|
||||
public static int getCounterNameMax() {
|
||||
if (!isInited) {
|
||||
init(null);
|
||||
}
|
||||
return COUNTER_NAME_MAX;
|
||||
}
|
||||
|
||||
public static int getGroupsMax() {
|
||||
if (!isInited) {
|
||||
init(null);
|
||||
}
|
||||
return GROUPS_MAX;
|
||||
}
|
||||
|
||||
public static int getCountersMax() {
|
||||
if (!isInited) {
|
||||
init(null);
|
||||
}
|
||||
return COUNTERS_MAX;
|
||||
}
|
||||
|
||||
public static String filterName(String name, int maxLen) {
|
||||
return name.length() > maxLen ? name.substring(0, maxLen - 1) : name;
|
||||
}
|
||||
|
||||
public static String filterCounterName(String name) {
|
||||
return filterName(name, COUNTER_NAME_MAX);
|
||||
return filterName(name, getCounterNameMax());
|
||||
}
|
||||
|
||||
public static String filterGroupName(String name) {
|
||||
return filterName(name, GROUP_NAME_MAX);
|
||||
return filterName(name, getGroupNameMax());
|
||||
}
|
||||
|
||||
public synchronized void checkCounters(int size) {
|
||||
if (firstViolation != null) {
|
||||
throw new LimitExceededException(firstViolation);
|
||||
}
|
||||
if (size > COUNTERS_MAX) {
|
||||
int countersMax = getCountersMax();
|
||||
if (size > countersMax) {
|
||||
firstViolation = new LimitExceededException("Too many counters: "+ size +
|
||||
" max="+ COUNTERS_MAX);
|
||||
" max="+ countersMax);
|
||||
throw firstViolation;
|
||||
}
|
||||
}
|
||||
|
@ -72,9 +115,10 @@ public class Limits {
|
|||
if (firstViolation != null) {
|
||||
throw new LimitExceededException(firstViolation);
|
||||
}
|
||||
if (size > GROUPS_MAX) {
|
||||
int groupsMax = getGroupsMax();
|
||||
if (size > groupsMax) {
|
||||
firstViolation = new LimitExceededException("Too many counter groups: "+
|
||||
size +" max="+ GROUPS_MAX);
|
||||
size +" max="+ groupsMax);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -101,8 +101,8 @@ public class TestCounters {
|
|||
static final long FS_COUNTER_VALUE = 10;
|
||||
|
||||
private void testMaxCounters(final Counters counters) {
|
||||
LOG.info("counters max="+ Limits.COUNTERS_MAX);
|
||||
for (int i = 0; i < Limits.COUNTERS_MAX; ++i) {
|
||||
LOG.info("counters max="+ Limits.getCountersMax());
|
||||
for (int i = 0; i < Limits.getCountersMax(); ++i) {
|
||||
counters.findCounter("test", "test"+ i);
|
||||
}
|
||||
setExpected(counters);
|
||||
|
@ -115,8 +115,8 @@ public class TestCounters {
|
|||
}
|
||||
|
||||
private void testMaxGroups(final Counters counters) {
|
||||
LOG.info("counter groups max="+ Limits.GROUPS_MAX);
|
||||
for (int i = 0; i < Limits.GROUPS_MAX; ++i) {
|
||||
LOG.info("counter groups max="+ Limits.getGroupsMax());
|
||||
for (int i = 0; i < Limits.getGroupsMax(); ++i) {
|
||||
// assuming COUNTERS_MAX > GROUPS_MAX
|
||||
counters.findCounter("test"+ i, "test");
|
||||
}
|
||||
|
|
|
@ -38,7 +38,19 @@ Release 2.3.0 - UNRELEASED
|
|||
YARN-1060. Two tests in TestFairScheduler are missing @Test annotation
|
||||
(Niranjan Singh via Sandy Ryza)
|
||||
|
||||
Release 2.1.1-beta - UNRELEASED
|
||||
Release 2.2.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 2.1.1-beta - 2013-09-23
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
|
Loading…
Reference in New Issue