Merge trunk into auto-HA branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3042@1337645 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
4cd70e87be
|
@ -423,8 +423,8 @@ checkJavacWarnings () {
|
|||
if [[ $? != 0 ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
-1 javac. The patch appears to cause tar ant target to fail."
|
||||
return 1
|
||||
-1 javac. The patch appears to cause the build to fail."
|
||||
return 2
|
||||
fi
|
||||
### Compare trunk and patch javac warning numbers
|
||||
if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
|
||||
|
@ -528,6 +528,24 @@ $JIRA_COMMENT_FOOTER"
|
|||
return 0
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
### Install the new jars so tests and findbugs can find all of the updated jars
|
||||
buildAndInstall () {
|
||||
echo ""
|
||||
echo ""
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo " Installing all of the jars"
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "$MVN install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess"
|
||||
$MVN install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess
|
||||
return $?
|
||||
}
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Check there are no changes in the number of Findbugs warnings
|
||||
checkFindbugsWarnings () {
|
||||
|
@ -882,15 +900,22 @@ if [[ $? != 0 ]] ; then
|
|||
submitJiraComment 1
|
||||
cleanupAndExit 1
|
||||
fi
|
||||
checkJavadocWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
checkJavacWarnings
|
||||
JAVAC_RET=$?
|
||||
#2 is returned if the code could not compile
|
||||
if [[ $JAVAC_RET == 2 ]] ; then
|
||||
submitJiraComment 1
|
||||
cleanupAndExit 1
|
||||
fi
|
||||
(( RESULT = RESULT + $JAVAC_RET ))
|
||||
checkJavadocWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
checkEclipseGeneration
|
||||
(( RESULT = RESULT + $? ))
|
||||
### Checkstyle not implemented yet
|
||||
#checkStyle
|
||||
#(( RESULT = RESULT + $? ))
|
||||
buildAndInstall
|
||||
checkFindbugsWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
checkReleaseAuditWarnings
|
||||
|
|
|
@ -26,7 +26,6 @@ import javax.security.auth.login.Configuration;
|
|||
import javax.security.auth.login.LoginContext;
|
||||
import javax.security.auth.login.LoginException;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Field;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.security.AccessControlContext;
|
||||
|
@ -196,11 +195,10 @@ public class KerberosAuthenticator implements Authenticator {
|
|||
try {
|
||||
GSSManager gssManager = GSSManager.getInstance();
|
||||
String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost();
|
||||
|
||||
Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
|
||||
GSSName serviceName = gssManager.createName(servicePrincipal,
|
||||
GSSName.NT_HOSTBASED_SERVICE);
|
||||
Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal,
|
||||
gssManager);
|
||||
oid);
|
||||
oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
|
||||
gssContext = gssManager.createContext(serviceName, oid, null,
|
||||
GSSContext.DEFAULT_LIFETIME);
|
||||
gssContext.requestCredDeleg(true);
|
||||
|
|
|
@ -327,6 +327,8 @@ public class AuthenticationFilter implements Filter {
|
|||
@Override
|
||||
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
|
||||
throws IOException, ServletException {
|
||||
boolean unauthorizedResponse = true;
|
||||
String unauthorizedMsg = "";
|
||||
HttpServletRequest httpRequest = (HttpServletRequest) request;
|
||||
HttpServletResponse httpResponse = (HttpServletResponse) response;
|
||||
try {
|
||||
|
@ -350,6 +352,7 @@ public class AuthenticationFilter implements Filter {
|
|||
newToken = true;
|
||||
}
|
||||
if (token != null) {
|
||||
unauthorizedResponse = false;
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
|
||||
}
|
||||
|
@ -378,17 +381,17 @@ public class AuthenticationFilter implements Filter {
|
|||
}
|
||||
filterChain.doFilter(httpRequest, httpResponse);
|
||||
}
|
||||
else {
|
||||
throw new AuthenticationException("Missing AuthenticationToken");
|
||||
}
|
||||
} catch (AuthenticationException ex) {
|
||||
unauthorizedMsg = ex.toString();
|
||||
LOG.warn("Authentication exception: " + ex.getMessage(), ex);
|
||||
}
|
||||
if (unauthorizedResponse) {
|
||||
if (!httpResponse.isCommitted()) {
|
||||
Cookie cookie = createCookie("");
|
||||
cookie.setMaxAge(0);
|
||||
httpResponse.addCookie(cookie);
|
||||
httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, ex.getMessage());
|
||||
httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, unauthorizedMsg);
|
||||
}
|
||||
LOG.warn("Authentication exception: " + ex.getMessage(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.lang.reflect.InvocationTargetException;
|
|||
import java.lang.reflect.Method;
|
||||
|
||||
import org.ietf.jgss.GSSException;
|
||||
import org.ietf.jgss.GSSManager;
|
||||
import org.ietf.jgss.Oid;
|
||||
|
||||
public class KerberosUtil {
|
||||
|
@ -34,8 +33,7 @@ public class KerberosUtil {
|
|||
: "com.sun.security.auth.module.Krb5LoginModule";
|
||||
}
|
||||
|
||||
public static Oid getOidClassInstance(String servicePrincipal,
|
||||
GSSManager gssManager)
|
||||
public static Oid getOidInstance(String oidName)
|
||||
throws ClassNotFoundException, GSSException, NoSuchFieldException,
|
||||
IllegalAccessException {
|
||||
Class<?> oidClass;
|
||||
|
@ -44,7 +42,7 @@ public class KerberosUtil {
|
|||
} else {
|
||||
oidClass = Class.forName("sun.security.jgss.GSSUtil");
|
||||
}
|
||||
Field oidField = oidClass.getDeclaredField("GSS_KRB5_MECH_OID");
|
||||
Field oidField = oidClass.getDeclaredField(oidName);
|
||||
return (Oid)oidField.get(oidClass);
|
||||
}
|
||||
|
||||
|
|
|
@ -145,10 +145,10 @@ public class TestKerberosAuthenticationHandler extends TestCase {
|
|||
GSSContext gssContext = null;
|
||||
try {
|
||||
String servicePrincipal = KerberosTestUtils.getServerPrincipal();
|
||||
Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
|
||||
GSSName serviceName = gssManager.createName(servicePrincipal,
|
||||
GSSName.NT_HOSTBASED_SERVICE);
|
||||
Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal,
|
||||
gssManager);
|
||||
oid);
|
||||
oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
|
||||
gssContext = gssManager.createContext(serviceName, oid, null,
|
||||
GSSContext.DEFAULT_LIFETIME);
|
||||
gssContext.requestCredDeleg(true);
|
||||
|
|
|
@ -63,8 +63,6 @@ Trunk (unreleased changes)
|
|||
|
||||
HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh)
|
||||
|
||||
HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
|
||||
|
||||
HADOOP-8308. Support cross-project Jenkins builds. (tomwhite)
|
||||
|
||||
BUG FIXES
|
||||
|
@ -129,6 +127,15 @@ Trunk (unreleased changes)
|
|||
HADOOP-8339. jenkins complaining about 16 javadoc warnings
|
||||
(Tom White and Robert Evans via tgraves)
|
||||
|
||||
HADOOP-8354. test-patch findbugs may fail if a dependent module is changed
|
||||
(Tom White and Robert Evans)
|
||||
|
||||
HADOOP-8375. test-patch should stop immediately once it has found
|
||||
compilation errors (bobby)
|
||||
|
||||
HADOOP-8395. Text shell command unnecessarily demands that a
|
||||
SequenceFile's key class be WritableComparable (harsh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
@ -139,6 +146,9 @@ Release 2.0.0 - UNRELEASED
|
|||
|
||||
HADOOP-7920. Remove Avro Rpc. (suresh)
|
||||
|
||||
HADOOP-8388. Remove unused BlockLocation serialization.
|
||||
(Colin Patrick McCabe via eli)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HADOOP-7773. Add support for protocol buffer based RPC engine.
|
||||
|
@ -163,6 +173,9 @@ Release 2.0.0 - UNRELEASED
|
|||
HADOOP-8210. Common side of HDFS-3148: The client should be able
|
||||
to use multiple local interfaces for data transfer. (eli)
|
||||
|
||||
HADOOP-8343. Allow configuration of authorization for JmxJsonServlet and
|
||||
MetricsServlet (tucu)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7524. Change RPC to allow multiple protocols including multuple
|
||||
|
@ -284,6 +297,34 @@ Release 2.0.0 - UNRELEASED
|
|||
|
||||
HADOOP-8214. make hadoop script recognize a full set of deprecated commands (rvs via tucu)
|
||||
|
||||
HADOOP-8347. Hadoop Common logs misspell 'successful'.
|
||||
(Philip Zeyliger via eli)
|
||||
|
||||
HADOOP-8350. Improve NetUtils.getInputStream to return a stream which has
|
||||
a tunable timeout. (todd)
|
||||
|
||||
HADOOP-8356. FileSystem service loading mechanism should print the FileSystem
|
||||
impl it is failing to load (tucu)
|
||||
|
||||
HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
|
||||
final release. (todd)
|
||||
|
||||
HADOOP-8361. Avoid out-of-memory problems when deserializing strings.
|
||||
(Colin Patrick McCabe via eli)
|
||||
|
||||
HADOOP-8353. hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop.
|
||||
(Roman Shaposhnik via atm)
|
||||
|
||||
HADOOP-8224. Don't hardcode hdfs.audit.logger in the scripts.
|
||||
(Tomohiko Kinebuchi via eli)
|
||||
|
||||
HADOOP-8113. Correction to BUILDING.txt: HDFS needs ProtocolBuffer, too
|
||||
(not just MapReduce). Contributed by Eugene Koontz.
|
||||
|
||||
HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
|
||||
|
||||
HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -314,8 +355,6 @@ Release 2.0.0 - UNRELEASED
|
|||
|
||||
HADOOP-8104. Inconsistent Jackson versions (tucu)
|
||||
|
||||
HADOOP-7940. The Text.clear() method does not clear the bytes as intended. (Csaba Miklos via harsh)
|
||||
|
||||
HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth.
|
||||
(szetszwo)
|
||||
|
||||
|
@ -406,6 +445,22 @@ Release 2.0.0 - UNRELEASED
|
|||
HADOOP-8342. HDFS command fails with exception following merge of
|
||||
HADOOP-8325 (tucu)
|
||||
|
||||
HADOOP-8346. Makes oid changes to make SPNEGO work. Was broken due
|
||||
to fixes introduced by the IBM JDK compatibility patch. (ddas)
|
||||
|
||||
HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu)
|
||||
|
||||
HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. (atm)
|
||||
|
||||
HADOOP-8328. Duplicate FileSystem Statistics object for 'file' scheme.
|
||||
(tomwhite)
|
||||
|
||||
HADOOP-8359. Fix javadoc warnings in Configuration. (Anupam Seth via
|
||||
szetszwo)
|
||||
|
||||
HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
|
||||
starting with a numeric character. (Junping Du via suresh)
|
||||
|
||||
BREAKDOWN OF HADOOP-7454 SUBTASKS
|
||||
|
||||
HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
|
||||
|
@ -464,6 +519,11 @@ Release 2.0.0 - UNRELEASED
|
|||
HADOOP-8172. Configuration no longer sets all keys in a deprecated key
|
||||
list. (Anupam Seth via bobby)
|
||||
|
||||
HADOOP-7868. Hadoop native fails to compile when default linker
|
||||
option is -Wl,--as-needed. (Trevor Robinson via eli)
|
||||
|
||||
HADOOP-8316. Audit logging should be disabled by default. (eli)
|
||||
|
||||
Release 0.23.3 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -519,6 +579,13 @@ Release 0.23.3 - UNRELEASED
|
|||
HADOOP-8335. Improve Configuration's address handling (Daryn Sharp via
|
||||
bobby)
|
||||
|
||||
HADOOP-8327. distcpv2 and distcpv1 jars should not coexist (Dave Thompson
|
||||
via bobby)
|
||||
|
||||
HADOOP-8341. Fix or filter findbugs issues in hadoop-tools (bobby)
|
||||
|
||||
HADOOP-8373. Port RPC.getServerAddress to 0.23 (Daryn Sharp via bobby)
|
||||
|
||||
Release 0.23.2 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -109,8 +109,10 @@ fi
|
|||
export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
|
||||
export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
|
||||
export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
|
||||
export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
|
||||
log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
|
||||
pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
|
||||
HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
|
||||
|
||||
# Set default scheduling priority
|
||||
if [ "$HADOOP_NICENESS" = "" ]; then
|
||||
|
@ -162,9 +164,15 @@ case $startStop in
|
|||
(stop)
|
||||
|
||||
if [ -f $pid ]; then
|
||||
if kill -0 `cat $pid` > /dev/null 2>&1; then
|
||||
TARGET_PID=`cat $pid`
|
||||
if kill -0 $TARGET_PID > /dev/null 2>&1; then
|
||||
echo stopping $command
|
||||
kill `cat $pid`
|
||||
kill $TARGET_PID
|
||||
sleep $HADOOP_STOP_TIMEOUT
|
||||
if kill -0 $TARGET_PID > /dev/null 2>&1; then
|
||||
echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
|
||||
kill -9 $TARGET_PID
|
||||
fi
|
||||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
|
|
|
@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
|||
#
|
||||
#Security appender
|
||||
#
|
||||
hadoop.security.logger=INFO,console
|
||||
hadoop.security.logger=INFO,NullAppender
|
||||
hadoop.security.log.maxfilesize=256MB
|
||||
hadoop.security.log.maxbackupindex=20
|
||||
log4j.category.SecurityLogger=${hadoop.security.logger}
|
||||
|
@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
|
|||
#
|
||||
# hdfs audit logging
|
||||
#
|
||||
hdfs.audit.logger=INFO,console
|
||||
hdfs.audit.logger=INFO,NullAppender
|
||||
hdfs.audit.log.maxfilesize=256MB
|
||||
hdfs.audit.log.maxbackupindex=20
|
||||
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
|
||||
|
@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
|
|||
#
|
||||
# mapred audit logging
|
||||
#
|
||||
mapred.audit.logger=INFO,console
|
||||
mapred.audit.logger=INFO,NullAppender
|
||||
mapred.audit.log.maxfilesize=256MB
|
||||
mapred.audit.log.maxbackupindex=20
|
||||
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.conf;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.Writer;
|
||||
|
||||
import javax.servlet.ServletException;
|
||||
|
@ -57,9 +56,8 @@ public class ConfServlet extends HttpServlet {
|
|||
public void doGet(HttpServletRequest request, HttpServletResponse response)
|
||||
throws ServletException, IOException {
|
||||
|
||||
// Do the authorization
|
||||
if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
|
||||
response)) {
|
||||
if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
|
||||
request, response)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -278,7 +278,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
* @param key
|
||||
* @param newKeys
|
||||
* @param customMessage
|
||||
* @deprecated use {@link addDeprecation(String key, String newKey,
|
||||
* @deprecated use {@link #addDeprecation(String key, String newKey,
|
||||
String customMessage)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
|
@ -328,7 +328,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
*
|
||||
* @param key Key that is to be deprecated
|
||||
* @param newKeys list of keys that take up the values of deprecated key
|
||||
* @deprecated use {@link addDeprecation(String key, String newKey)} instead
|
||||
* @deprecated use {@link #addDeprecation(String key, String newKey)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public synchronized static void addDeprecation(String key, String[] newKeys) {
|
||||
|
|
|
@ -346,7 +346,7 @@ public abstract class AbstractFileSystem {
|
|||
path);
|
||||
} else {
|
||||
throw new InvalidPathException(
|
||||
"Path without scheme with non-null autorhrity:" + path);
|
||||
"Path without scheme with non-null authority:" + path);
|
||||
}
|
||||
}
|
||||
String thisScheme = this.getUri().getScheme();
|
||||
|
|
|
@ -35,16 +35,7 @@ import org.apache.hadoop.io.WritableFactory;
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class BlockLocation implements Writable {
|
||||
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory
|
||||
(BlockLocation.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() { return new BlockLocation(); }
|
||||
});
|
||||
}
|
||||
|
||||
public class BlockLocation {
|
||||
private String[] hosts; //hostnames of datanodes
|
||||
private String[] names; //hostname:portNumber of datanodes
|
||||
private String[] topologyPaths; // full path name in network topology
|
||||
|
@ -219,62 +210,6 @@ public class BlockLocation implements Writable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implement write of Writable
|
||||
*/
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeLong(offset);
|
||||
out.writeLong(length);
|
||||
out.writeBoolean(corrupt);
|
||||
out.writeInt(names.length);
|
||||
for (int i=0; i < names.length; i++) {
|
||||
Text name = new Text(names[i]);
|
||||
name.write(out);
|
||||
}
|
||||
out.writeInt(hosts.length);
|
||||
for (int i=0; i < hosts.length; i++) {
|
||||
Text host = new Text(hosts[i]);
|
||||
host.write(out);
|
||||
}
|
||||
out.writeInt(topologyPaths.length);
|
||||
for (int i=0; i < topologyPaths.length; i++) {
|
||||
Text host = new Text(topologyPaths[i]);
|
||||
host.write(out);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implement readFields of Writable
|
||||
*/
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
this.offset = in.readLong();
|
||||
this.length = in.readLong();
|
||||
this.corrupt = in.readBoolean();
|
||||
int numNames = in.readInt();
|
||||
this.names = new String[numNames];
|
||||
for (int i = 0; i < numNames; i++) {
|
||||
Text name = new Text();
|
||||
name.readFields(in);
|
||||
names[i] = name.toString();
|
||||
}
|
||||
|
||||
int numHosts = in.readInt();
|
||||
this.hosts = new String[numHosts];
|
||||
for (int i = 0; i < numHosts; i++) {
|
||||
Text host = new Text();
|
||||
host.readFields(in);
|
||||
hosts[i] = host.toString();
|
||||
}
|
||||
|
||||
int numTops = in.readInt();
|
||||
topologyPaths = new String[numTops];
|
||||
for (int i = 0; i < numTops; i++) {
|
||||
Text path = new Text();
|
||||
path.readFields(in);
|
||||
topologyPaths[i] = path.toString();
|
||||
}
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuilder result = new StringBuilder();
|
||||
result.append(offset);
|
||||
|
|
|
@ -228,6 +228,9 @@ public class CommonConfigurationKeysPublic {
|
|||
public static final String HADOOP_SECURITY_AUTHORIZATION =
|
||||
"hadoop.security.authorization";
|
||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||
public static final String HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN =
|
||||
"hadoop.security.instrumentation.requires.admin";
|
||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||
public static final String HADOOP_SECURITY_SERVICE_USER_NAME_KEY =
|
||||
"hadoop.security.service.user.name.key";
|
||||
}
|
||||
|
|
|
@ -254,7 +254,7 @@ public class FileStatus implements Writable, Comparable {
|
|||
// Writable
|
||||
//////////////////////////////////////////////////
|
||||
public void write(DataOutput out) throws IOException {
|
||||
Text.writeString(out, getPath().toString());
|
||||
Text.writeString(out, getPath().toString(), Text.ONE_MEGABYTE);
|
||||
out.writeLong(getLen());
|
||||
out.writeBoolean(isDirectory());
|
||||
out.writeShort(getReplication());
|
||||
|
@ -262,16 +262,16 @@ public class FileStatus implements Writable, Comparable {
|
|||
out.writeLong(getModificationTime());
|
||||
out.writeLong(getAccessTime());
|
||||
getPermission().write(out);
|
||||
Text.writeString(out, getOwner());
|
||||
Text.writeString(out, getGroup());
|
||||
Text.writeString(out, getOwner(), Text.ONE_MEGABYTE);
|
||||
Text.writeString(out, getGroup(), Text.ONE_MEGABYTE);
|
||||
out.writeBoolean(isSymlink());
|
||||
if (isSymlink()) {
|
||||
Text.writeString(out, getSymlink().toString());
|
||||
Text.writeString(out, getSymlink().toString(), Text.ONE_MEGABYTE);
|
||||
}
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
String strPath = Text.readString(in);
|
||||
String strPath = Text.readString(in, Text.ONE_MEGABYTE);
|
||||
this.path = new Path(strPath);
|
||||
this.length = in.readLong();
|
||||
this.isdir = in.readBoolean();
|
||||
|
@ -280,10 +280,10 @@ public class FileStatus implements Writable, Comparable {
|
|||
modification_time = in.readLong();
|
||||
access_time = in.readLong();
|
||||
permission.readFields(in);
|
||||
owner = Text.readString(in);
|
||||
group = Text.readString(in);
|
||||
owner = Text.readString(in, Text.ONE_MEGABYTE);
|
||||
group = Text.readString(in, Text.ONE_MEGABYTE);
|
||||
if (in.readBoolean()) {
|
||||
this.symlink = new Path(Text.readString(in));
|
||||
this.symlink = new Path(Text.readString(in, Text.ONE_MEGABYTE));
|
||||
} else {
|
||||
this.symlink = null;
|
||||
}
|
||||
|
|
|
@ -199,7 +199,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
* @return the protocol scheme for the FileSystem.
|
||||
*/
|
||||
public String getScheme() {
|
||||
throw new UnsupportedOperationException("Not implemented by the FileSystem implementation");
|
||||
throw new UnsupportedOperationException("Not implemented by the " + getClass().getSimpleName() + " FileSystem implementation");
|
||||
}
|
||||
|
||||
/** Returns a URI whose scheme and authority identify this FileSystem.*/
|
||||
|
|
|
@ -53,7 +53,7 @@ import org.apache.hadoop.util.Progressable;
|
|||
public class FilterFileSystem extends FileSystem {
|
||||
|
||||
protected FileSystem fs;
|
||||
private String swapScheme;
|
||||
protected String swapScheme;
|
||||
|
||||
/*
|
||||
* so that extending classes can define it
|
||||
|
|
|
@ -40,6 +40,17 @@ public class LocalFileSystem extends ChecksumFileSystem {
|
|||
this(new RawLocalFileSystem());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(URI name, Configuration conf) throws IOException {
|
||||
if (fs.getConf() == null) {
|
||||
fs.initialize(name, conf);
|
||||
}
|
||||
String scheme = name.getScheme();
|
||||
if (!scheme.equals(fs.getUri().getScheme())) {
|
||||
swapScheme = scheme;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the protocol scheme for the FileSystem.
|
||||
* <p/>
|
||||
|
|
|
@ -223,6 +223,13 @@ public class Path implements Comparable {
|
|||
return isUriPathAbsolute();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if and only if this path represents the root of a file system
|
||||
*/
|
||||
public boolean isRoot() {
|
||||
return getParent() == null;
|
||||
}
|
||||
|
||||
/** Returns the final component of this path.*/
|
||||
public String getName() {
|
||||
String path = uri.getPath();
|
||||
|
|
|
@ -84,8 +84,8 @@ public class PermissionStatus implements Writable {
|
|||
|
||||
/** {@inheritDoc} */
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
username = Text.readString(in);
|
||||
groupname = Text.readString(in);
|
||||
username = Text.readString(in, Text.ONE_MEGABYTE);
|
||||
groupname = Text.readString(in, Text.ONE_MEGABYTE);
|
||||
permission = FsPermission.read(in);
|
||||
}
|
||||
|
||||
|
@ -110,8 +110,8 @@ public class PermissionStatus implements Writable {
|
|||
String username,
|
||||
String groupname,
|
||||
FsPermission permission) throws IOException {
|
||||
Text.writeString(out, username);
|
||||
Text.writeString(out, groupname);
|
||||
Text.writeString(out, username, Text.ONE_MEGABYTE);
|
||||
Text.writeString(out, groupname, Text.ONE_MEGABYTE);
|
||||
permission.write(out);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.hadoop.io.DataOutputBuffer;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
@ -136,7 +135,7 @@ class Display extends FsCommand {
|
|||
|
||||
protected class TextRecordInputStream extends InputStream {
|
||||
SequenceFile.Reader r;
|
||||
WritableComparable<?> key;
|
||||
Writable key;
|
||||
Writable val;
|
||||
|
||||
DataInputBuffer inbuf;
|
||||
|
@ -148,7 +147,7 @@ class Display extends FsCommand {
|
|||
r = new SequenceFile.Reader(lconf,
|
||||
SequenceFile.Reader.file(fpath));
|
||||
key = ReflectionUtils.newInstance(
|
||||
r.getKeyClass().asSubclass(WritableComparable.class), lconf);
|
||||
r.getKeyClass().asSubclass(Writable.class), lconf);
|
||||
val = ReflectionUtils.newInstance(
|
||||
r.getValueClass().asSubclass(Writable.class), lconf);
|
||||
inbuf = new DataInputBuffer();
|
||||
|
|
|
@ -75,7 +75,8 @@ class ChRootedFileSystem extends FilterFileSystem {
|
|||
protected Path fullPath(final Path path) {
|
||||
super.checkPath(path);
|
||||
return path.isAbsolute() ?
|
||||
new Path(chRootPathPartString + path.toUri().getPath()) :
|
||||
new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
|
||||
+ path.toUri().getPath()) :
|
||||
new Path(chRootPathPartString + workingDir.toUri().getPath(), path);
|
||||
}
|
||||
|
||||
|
@ -127,7 +128,7 @@ class ChRootedFileSystem extends FilterFileSystem {
|
|||
}
|
||||
String pathPart = p.toUri().getPath();
|
||||
return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart
|
||||
.substring(chRootPathPartString.length() + 1);
|
||||
.substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -79,7 +79,8 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
*/
|
||||
protected Path fullPath(final Path path) {
|
||||
super.checkPath(path);
|
||||
return new Path(chRootPathPartString + path.toUri().getPath());
|
||||
return new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
|
||||
+ path.toUri().getPath());
|
||||
}
|
||||
|
||||
public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
|
||||
|
@ -127,7 +128,8 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
}
|
||||
String pathPart = p.toUri().getPath();
|
||||
return (pathPart.length() == chRootPathPartString.length()) ?
|
||||
"" : pathPart.substring(chRootPathPartString.length() + 1);
|
||||
"" : pathPart.substring(chRootPathPartString.length() +
|
||||
(chRootPathPart.isRoot() ? 0 : 1));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -52,8 +52,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|||
import org.apache.hadoop.jmx.JMXJsonServlet;
|
||||
import org.apache.hadoop.log.LogLevel;
|
||||
import org.apache.hadoop.metrics.MetricsServlet;
|
||||
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
|
||||
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
@ -99,6 +97,7 @@ public class HttpServer implements FilterContainer {
|
|||
// gets stored.
|
||||
public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
|
||||
static final String ADMINS_ACL = "admins.acl";
|
||||
public static final String SPNEGO_FILTER = "SpnegoFilter";
|
||||
|
||||
public static final String BIND_ADDRESS = "bind.address";
|
||||
|
||||
|
@ -238,10 +237,6 @@ public class HttpServer implements FilterContainer {
|
|||
|
||||
addDefaultApps(contexts, appDir, conf);
|
||||
|
||||
defineFilter(webAppContext, "krb5Filter",
|
||||
Krb5AndCertsSslSocketConnector.Krb5SslFilter.class.getName(),
|
||||
null, null);
|
||||
|
||||
addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
|
||||
final FilterInitializer[] initializers = getFilterInitializers(conf);
|
||||
if (initializers != null) {
|
||||
|
@ -424,12 +419,13 @@ public class HttpServer implements FilterContainer {
|
|||
* protect with Kerberos authentication.
|
||||
* Note: This method is to be used for adding servlets that facilitate
|
||||
* internal communication and not for user facing functionality. For
|
||||
* servlets added using this method, filters (except internal Kerberized
|
||||
+ * servlets added using this method, filters (except internal Kerberos
|
||||
* filters) are not enabled.
|
||||
*
|
||||
* @param name The name of the servlet (can be passed as null)
|
||||
* @param pathSpec The path spec for the servlet
|
||||
* @param clazz The servlet class
|
||||
* @param requireAuth Require Kerberos authenticate to access servlet
|
||||
*/
|
||||
public void addInternalServlet(String name, String pathSpec,
|
||||
Class<? extends HttpServlet> clazz, boolean requireAuth) {
|
||||
|
@ -440,11 +436,11 @@ public class HttpServer implements FilterContainer {
|
|||
webAppContext.addServlet(holder, pathSpec);
|
||||
|
||||
if(requireAuth && UserGroupInformation.isSecurityEnabled()) {
|
||||
LOG.info("Adding Kerberos filter to " + name);
|
||||
LOG.info("Adding Kerberos (SPNEGO) filter to " + name);
|
||||
ServletHandler handler = webAppContext.getServletHandler();
|
||||
FilterMapping fmap = new FilterMapping();
|
||||
fmap.setPathSpec(pathSpec);
|
||||
fmap.setFilterName("krb5Filter");
|
||||
fmap.setFilterName(SPNEGO_FILTER);
|
||||
fmap.setDispatches(Handler.ALL);
|
||||
handler.addFilterMapping(fmap);
|
||||
}
|
||||
|
@ -580,26 +576,14 @@ public class HttpServer implements FilterContainer {
|
|||
webServer.addConnector(sslListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure an ssl listener on the server.
|
||||
* @param addr address to listen on
|
||||
* @param sslConf conf to retrieve ssl options
|
||||
* @param needClientAuth whether client authentication is required
|
||||
*/
|
||||
public void addSslListener(InetSocketAddress addr, Configuration sslConf,
|
||||
boolean needClientAuth) throws IOException {
|
||||
addSslListener(addr, sslConf, needClientAuth, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure an ssl listener on the server.
|
||||
* @param addr address to listen on
|
||||
* @param sslConf conf to retrieve ssl options
|
||||
* @param needCertsAuth whether x509 certificate authentication is required
|
||||
* @param needKrbAuth whether to allow kerberos auth
|
||||
*/
|
||||
public void addSslListener(InetSocketAddress addr, Configuration sslConf,
|
||||
boolean needCertsAuth, boolean needKrbAuth) throws IOException {
|
||||
boolean needCertsAuth) throws IOException {
|
||||
if (webServer.isStarted()) {
|
||||
throw new IOException("Failed to add ssl listener");
|
||||
}
|
||||
|
@ -612,15 +596,7 @@ public class HttpServer implements FilterContainer {
|
|||
System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
|
||||
"ssl.server.truststore.type", "jks"));
|
||||
}
|
||||
Krb5AndCertsSslSocketConnector.MODE mode;
|
||||
if(needCertsAuth && needKrbAuth)
|
||||
mode = MODE.BOTH;
|
||||
else if (!needCertsAuth && needKrbAuth)
|
||||
mode = MODE.KRB;
|
||||
else // Default to certificates
|
||||
mode = MODE.CERTS;
|
||||
|
||||
SslSocketConnector sslListener = new Krb5AndCertsSslSocketConnector(mode);
|
||||
SslSocketConnector sslListener = new SslSocketConnector();
|
||||
sslListener.setHost(addr.getHostName());
|
||||
sslListener.setPort(addr.getPort());
|
||||
sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));
|
||||
|
@ -779,6 +755,37 @@ public class HttpServer implements FilterContainer {
|
|||
: "Inactive HttpServer";
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the user has privileges to access to instrumentation servlets.
|
||||
* <p/>
|
||||
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
|
||||
* (default value) it always returns TRUE.
|
||||
* <p/>
|
||||
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
|
||||
* it will check that if the current user is in the admin ACLS. If the user is
|
||||
* in the admin ACLs it returns TRUE, otherwise it returns FALSE.
|
||||
*
|
||||
* @param servletContext the servlet context.
|
||||
* @param request the servlet request.
|
||||
* @param response the servlet response.
|
||||
* @return TRUE/FALSE based on the logic decribed above.
|
||||
*/
|
||||
public static boolean isInstrumentationAccessAllowed(
|
||||
ServletContext servletContext, HttpServletRequest request,
|
||||
HttpServletResponse response) throws IOException {
|
||||
Configuration conf =
|
||||
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
|
||||
|
||||
boolean access = true;
|
||||
boolean adminAccess = conf.getBoolean(
|
||||
CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
|
||||
false);
|
||||
if (adminAccess) {
|
||||
access = hasAdministratorAccess(servletContext, request, response);
|
||||
}
|
||||
return access;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does the user sending the HttpServletRequest has the administrator ACLs? If
|
||||
* it isn't the case, response will be modified to send an error to the user.
|
||||
|
@ -794,7 +801,6 @@ public class HttpServer implements FilterContainer {
|
|||
HttpServletResponse response) throws IOException {
|
||||
Configuration conf =
|
||||
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
|
||||
|
||||
// If there is no authorization, anybody has administrator access.
|
||||
if (!conf.getBoolean(
|
||||
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
|
||||
|
@ -834,12 +840,11 @@ public class HttpServer implements FilterContainer {
|
|||
@Override
|
||||
public void doGet(HttpServletRequest request, HttpServletResponse response)
|
||||
throws ServletException, IOException {
|
||||
response.setContentType("text/plain; charset=UTF-8");
|
||||
// Do the authorization
|
||||
if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
|
||||
response)) {
|
||||
if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
|
||||
request, response)) {
|
||||
return;
|
||||
}
|
||||
response.setContentType("text/plain; charset=UTF-8");
|
||||
PrintWriter out = response.getWriter();
|
||||
ReflectionUtils.printThreadInfo(out, "");
|
||||
out.close();
|
||||
|
|
|
@ -239,7 +239,6 @@ public class Text extends BinaryComparable
|
|||
*/
|
||||
public void clear() {
|
||||
length = 0;
|
||||
bytes = EMPTY_BYTES;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -413,6 +412,8 @@ public class Text extends BinaryComparable
|
|||
return bytes;
|
||||
}
|
||||
|
||||
static final public int ONE_MEGABYTE = 1024 * 1024;
|
||||
|
||||
/** Read a UTF8 encoded string from in
|
||||
*/
|
||||
public static String readString(DataInput in) throws IOException {
|
||||
|
@ -422,6 +423,16 @@ public class Text extends BinaryComparable
|
|||
return decode(bytes);
|
||||
}
|
||||
|
||||
/** Read a UTF8 encoded string with a maximum size
|
||||
*/
|
||||
public static String readString(DataInput in, int maxLength)
|
||||
throws IOException {
|
||||
int length = WritableUtils.readVIntInRange(in, 0, maxLength - 1);
|
||||
byte [] bytes = new byte[length];
|
||||
in.readFully(bytes, 0, length);
|
||||
return decode(bytes);
|
||||
}
|
||||
|
||||
/** Write a UTF8 encoded string to out
|
||||
*/
|
||||
public static int writeString(DataOutput out, String s) throws IOException {
|
||||
|
@ -432,6 +443,22 @@ public class Text extends BinaryComparable
|
|||
return length;
|
||||
}
|
||||
|
||||
/** Write a UTF8 encoded string with a maximum size to out
|
||||
*/
|
||||
public static int writeString(DataOutput out, String s, int maxLength)
|
||||
throws IOException {
|
||||
ByteBuffer bytes = encode(s);
|
||||
int length = bytes.limit();
|
||||
if (length >= maxLength) {
|
||||
throw new IOException("string was too long to write! Expected " +
|
||||
"less than " + maxLength + " bytes, but got " +
|
||||
length + " bytes.");
|
||||
}
|
||||
WritableUtils.writeVInt(out, length);
|
||||
out.write(bytes.array(), 0, length);
|
||||
return length;
|
||||
}
|
||||
|
||||
////// states for validateUTF8
|
||||
|
||||
private static final int LEAD_BYTE = 0;
|
||||
|
|
|
@ -53,6 +53,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|||
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
|
||||
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
|
||||
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
|
||||
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
|
||||
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
@ -845,24 +847,24 @@ public class Client {
|
|||
touch();
|
||||
|
||||
try {
|
||||
int id = in.readInt(); // try to read an id
|
||||
|
||||
RpcResponseHeaderProto response =
|
||||
RpcResponseHeaderProto.parseDelimitedFrom(in);
|
||||
int callId = response.getCallId();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug(getName() + " got value #" + id);
|
||||
LOG.debug(getName() + " got value #" + callId);
|
||||
|
||||
Call call = calls.get(id);
|
||||
|
||||
int state = in.readInt(); // read call status
|
||||
if (state == Status.SUCCESS.state) {
|
||||
Call call = calls.get(callId);
|
||||
RpcStatusProto status = response.getStatus();
|
||||
if (status == RpcStatusProto.SUCCESS) {
|
||||
Writable value = ReflectionUtils.newInstance(valueClass, conf);
|
||||
value.readFields(in); // read value
|
||||
call.setRpcResponse(value);
|
||||
calls.remove(id);
|
||||
} else if (state == Status.ERROR.state) {
|
||||
calls.remove(callId);
|
||||
} else if (status == RpcStatusProto.ERROR) {
|
||||
call.setException(new RemoteException(WritableUtils.readString(in),
|
||||
WritableUtils.readString(in)));
|
||||
calls.remove(id);
|
||||
} else if (state == Status.FATAL.state) {
|
||||
calls.remove(callId);
|
||||
} else if (status == RpcStatusProto.FATAL) {
|
||||
// Close the connection
|
||||
markClosed(new RemoteException(WritableUtils.readString(in),
|
||||
WritableUtils.readString(in)));
|
||||
|
|
|
@ -217,7 +217,7 @@ public abstract class Server {
|
|||
public static final Log AUDITLOG =
|
||||
LogFactory.getLog("SecurityLogger."+Server.class.getName());
|
||||
private static final String AUTH_FAILED_FOR = "Auth failed for ";
|
||||
private static final String AUTH_SUCCESSFULL_FOR = "Auth successfull for ";
|
||||
private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
|
||||
|
||||
private static final ThreadLocal<Server> SERVER = new ThreadLocal<Server>();
|
||||
|
||||
|
@ -1234,7 +1234,7 @@ public abstract class Server {
|
|||
LOG.debug("SASL server successfully authenticated client: " + user);
|
||||
}
|
||||
rpcMetrics.incrAuthenticationSuccesses();
|
||||
AUDITLOG.info(AUTH_SUCCESSFULL_FOR + user);
|
||||
AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user);
|
||||
saslContextEstablished = true;
|
||||
}
|
||||
} else {
|
||||
|
@ -1339,7 +1339,7 @@ public abstract class Server {
|
|||
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
|
||||
+ ") is configured as simple. Please configure another method "
|
||||
+ "like kerberos or digest.");
|
||||
setupResponse(authFailedResponse, authFailedCall, Status.FATAL,
|
||||
setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
|
||||
null, ae.getClass().getName(), ae.getMessage());
|
||||
responder.doRespond(authFailedCall);
|
||||
throw ae;
|
||||
|
@ -1420,7 +1420,7 @@ public abstract class Server {
|
|||
Call fakeCall = new Call(-1, null, this);
|
||||
// Versions 3 and greater can interpret this exception
|
||||
// response in the same manner
|
||||
setupResponse(buffer, fakeCall, Status.FATAL,
|
||||
setupResponseOldVersionFatal(buffer, fakeCall,
|
||||
null, VersionMismatch.class.getName(), errMsg);
|
||||
|
||||
responder.doRespond(fakeCall);
|
||||
|
@ -1443,7 +1443,7 @@ public abstract class Server {
|
|||
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
|
||||
|
||||
Call fakeCall = new Call(-1, null, this);
|
||||
setupResponse(buffer, fakeCall, Status.FATAL, null,
|
||||
setupResponse(buffer, fakeCall, RpcStatusProto.FATAL, null,
|
||||
IpcException.class.getName(), errMsg);
|
||||
responder.doRespond(fakeCall);
|
||||
}
|
||||
|
@ -1579,7 +1579,7 @@ public abstract class Server {
|
|||
new Call(header.getCallId(), null, this);
|
||||
ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
|
||||
|
||||
setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
|
||||
setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
|
||||
IOException.class.getName(),
|
||||
"Unknown rpc kind " + header.getRpcKind());
|
||||
responder.doRespond(readParamsFailedCall);
|
||||
|
@ -1597,7 +1597,7 @@ public abstract class Server {
|
|||
new Call(header.getCallId(), null, this);
|
||||
ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
|
||||
|
||||
setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
|
||||
setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
|
||||
t.getClass().getName(),
|
||||
"IPC server unable to read call parameters: " + t.getMessage());
|
||||
responder.doRespond(readParamsFailedCall);
|
||||
|
@ -1627,7 +1627,7 @@ public abstract class Server {
|
|||
rpcMetrics.incrAuthorizationSuccesses();
|
||||
} catch (AuthorizationException ae) {
|
||||
rpcMetrics.incrAuthorizationFailures();
|
||||
setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null,
|
||||
setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL, null,
|
||||
ae.getClass().getName(), ae.getMessage());
|
||||
responder.doRespond(authFailedCall);
|
||||
return false;
|
||||
|
@ -1725,8 +1725,8 @@ public abstract class Server {
|
|||
// responder.doResponse() since setupResponse may use
|
||||
// SASL to encrypt response data and SASL enforces
|
||||
// its own message ordering.
|
||||
setupResponse(buf, call, (error == null) ? Status.SUCCESS
|
||||
: Status.ERROR, value, errorClass, error);
|
||||
setupResponse(buf, call, (error == null) ? RpcStatusProto.SUCCESS
|
||||
: RpcStatusProto.ERROR, value, errorClass, error);
|
||||
|
||||
// Discard the large buf and reset it back to smaller size
|
||||
// to free up heap
|
||||
|
@ -1859,40 +1859,79 @@ public abstract class Server {
|
|||
/**
|
||||
* Setup response for the IPC Call.
|
||||
*
|
||||
* @param response buffer to serialize the response into
|
||||
* @param responseBuf buffer to serialize the response into
|
||||
* @param call {@link Call} to which we are setting up the response
|
||||
* @param status {@link Status} of the IPC call
|
||||
* @param status of the IPC call
|
||||
* @param rv return value for the IPC Call, if the call was successful
|
||||
* @param errorClass error class, if the the call failed
|
||||
* @param error error message, if the call failed
|
||||
* @throws IOException
|
||||
*/
|
||||
private void setupResponse(ByteArrayOutputStream response,
|
||||
Call call, Status status,
|
||||
private void setupResponse(ByteArrayOutputStream responseBuf,
|
||||
Call call, RpcStatusProto status,
|
||||
Writable rv, String errorClass, String error)
|
||||
throws IOException {
|
||||
response.reset();
|
||||
DataOutputStream out = new DataOutputStream(response);
|
||||
out.writeInt(call.callId); // write call id
|
||||
out.writeInt(status.state); // write status
|
||||
responseBuf.reset();
|
||||
DataOutputStream out = new DataOutputStream(responseBuf);
|
||||
RpcResponseHeaderProto.Builder response =
|
||||
RpcResponseHeaderProto.newBuilder();
|
||||
response.setCallId(call.callId);
|
||||
response.setStatus(status);
|
||||
|
||||
if (status == Status.SUCCESS) {
|
||||
|
||||
if (status == RpcStatusProto.SUCCESS) {
|
||||
try {
|
||||
response.build().writeDelimitedTo(out);
|
||||
rv.write(out);
|
||||
} catch (Throwable t) {
|
||||
LOG.warn("Error serializing call response for call " + call, t);
|
||||
// Call back to same function - this is OK since the
|
||||
// buffer is reset at the top, and since status is changed
|
||||
// to ERROR it won't infinite loop.
|
||||
setupResponse(response, call, Status.ERROR,
|
||||
setupResponse(responseBuf, call, RpcStatusProto.ERROR,
|
||||
null, t.getClass().getName(),
|
||||
StringUtils.stringifyException(t));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (status == RpcStatusProto.FATAL) {
|
||||
response.setServerIpcVersionNum(Server.CURRENT_VERSION);
|
||||
}
|
||||
response.build().writeDelimitedTo(out);
|
||||
WritableUtils.writeString(out, errorClass);
|
||||
WritableUtils.writeString(out, error);
|
||||
}
|
||||
if (call.connection.useWrap) {
|
||||
wrapWithSasl(responseBuf, call);
|
||||
}
|
||||
call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup response for the IPC Call on Fatal Error from a
|
||||
* client that is using old version of Hadoop.
|
||||
* The response is serialized using the previous protocol's response
|
||||
* layout.
|
||||
*
|
||||
* @param response buffer to serialize the response into
|
||||
* @param call {@link Call} to which we are setting up the response
|
||||
* @param rv return value for the IPC Call, if the call was successful
|
||||
* @param errorClass error class, if the the call failed
|
||||
* @param error error message, if the call failed
|
||||
* @throws IOException
|
||||
*/
|
||||
private void setupResponseOldVersionFatal(ByteArrayOutputStream response,
|
||||
Call call,
|
||||
Writable rv, String errorClass, String error)
|
||||
throws IOException {
|
||||
final int OLD_VERSION_FATAL_STATUS = -1;
|
||||
response.reset();
|
||||
DataOutputStream out = new DataOutputStream(response);
|
||||
out.writeInt(call.callId); // write call id
|
||||
out.writeInt(OLD_VERSION_FATAL_STATUS); // write FATAL_STATUS
|
||||
WritableUtils.writeString(out, errorClass);
|
||||
WritableUtils.writeString(out, error);
|
||||
|
||||
if (call.connection.useWrap) {
|
||||
wrapWithSasl(response, call);
|
||||
}
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
/**
|
||||
* Status of a Hadoop IPC call.
|
||||
*/
|
||||
enum Status {
|
||||
SUCCESS (0),
|
||||
ERROR (1),
|
||||
FATAL (-1);
|
||||
|
||||
int state;
|
||||
private Status(int state) {
|
||||
this.state = state;
|
||||
}
|
||||
}
|
|
@ -148,9 +148,8 @@ public class JMXJsonServlet extends HttpServlet {
|
|||
@Override
|
||||
public void doGet(HttpServletRequest request, HttpServletResponse response) {
|
||||
try {
|
||||
// Do the authorization
|
||||
if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
|
||||
response)) {
|
||||
if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
|
||||
request, response)) {
|
||||
return;
|
||||
}
|
||||
JsonGenerator jg = null;
|
||||
|
|
|
@ -106,9 +106,8 @@ public class MetricsServlet extends HttpServlet {
|
|||
public void doGet(HttpServletRequest request, HttpServletResponse response)
|
||||
throws ServletException, IOException {
|
||||
|
||||
// Do the authorization
|
||||
if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
|
||||
response)) {
|
||||
if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
|
||||
request, response)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ public class NetUtils {
|
|||
|
||||
/**
|
||||
* Util method to build socket addr from either:
|
||||
* <host>:<post>
|
||||
* <host>:<port>
|
||||
* <fs>://<host>:<port>/<path>
|
||||
*/
|
||||
public static InetSocketAddress createSocketAddr(String target) {
|
||||
|
@ -150,7 +150,7 @@ public class NetUtils {
|
|||
/**
|
||||
* Util method to build socket addr from either:
|
||||
* <host>
|
||||
* <host>:<post>
|
||||
* <host>:<port>
|
||||
* <fs>://<host>:<port>/<path>
|
||||
*/
|
||||
public static InetSocketAddress createSocketAddr(String target,
|
||||
|
@ -375,53 +375,44 @@ public class NetUtils {
|
|||
}
|
||||
|
||||
/**
|
||||
* Same as getInputStream(socket, socket.getSoTimeout()).<br><br>
|
||||
*
|
||||
* From documentation for {@link #getInputStream(Socket, long)}:<br>
|
||||
* Returns InputStream for the socket. If the socket has an associated
|
||||
* SocketChannel then it returns a
|
||||
* {@link SocketInputStream} with the given timeout. If the socket does not
|
||||
* have a channel, {@link Socket#getInputStream()} is returned. In the later
|
||||
* case, the timeout argument is ignored and the timeout set with
|
||||
* {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
|
||||
*
|
||||
* Any socket created using socket factories returned by {@link NetUtils},
|
||||
* must use this interface instead of {@link Socket#getInputStream()}.
|
||||
* Same as <code>getInputStream(socket, socket.getSoTimeout()).</code>
|
||||
* <br><br>
|
||||
*
|
||||
* @see #getInputStream(Socket, long)
|
||||
*
|
||||
* @param socket
|
||||
* @return InputStream for reading from the socket.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static InputStream getInputStream(Socket socket)
|
||||
public static SocketInputWrapper getInputStream(Socket socket)
|
||||
throws IOException {
|
||||
return getInputStream(socket, socket.getSoTimeout());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns InputStream for the socket. If the socket has an associated
|
||||
* SocketChannel then it returns a
|
||||
* {@link SocketInputStream} with the given timeout. If the socket does not
|
||||
* have a channel, {@link Socket#getInputStream()} is returned. In the later
|
||||
* case, the timeout argument is ignored and the timeout set with
|
||||
* {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
|
||||
* Return a {@link SocketInputWrapper} for the socket and set the given
|
||||
* timeout. If the socket does not have an associated channel, then its socket
|
||||
* timeout will be set to the specified value. Otherwise, a
|
||||
* {@link SocketInputStream} will be created which reads with the configured
|
||||
* timeout.
|
||||
*
|
||||
* Any socket created using socket factories returned by {@link NetUtils},
|
||||
* Any socket created using socket factories returned by {@link #NetUtils},
|
||||
* must use this interface instead of {@link Socket#getInputStream()}.
|
||||
*
|
||||
* In general, this should be called only once on each socket: see the note
|
||||
* in {@link SocketInputWrapper#setTimeout(long)} for more information.
|
||||
*
|
||||
* @see Socket#getChannel()
|
||||
*
|
||||
* @param socket
|
||||
* @param timeout timeout in milliseconds. This may not always apply. zero
|
||||
* for waiting as long as necessary.
|
||||
* @return InputStream for reading from the socket.
|
||||
* @param timeout timeout in milliseconds. zero for waiting as
|
||||
* long as necessary.
|
||||
* @return SocketInputWrapper for reading from the socket.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static InputStream getInputStream(Socket socket, long timeout)
|
||||
public static SocketInputWrapper getInputStream(Socket socket, long timeout)
|
||||
throws IOException {
|
||||
return (socket.getChannel() == null) ?
|
||||
socket.getInputStream() : new SocketInputStream(socket, timeout);
|
||||
InputStream stm = (socket.getChannel() == null) ?
|
||||
socket.getInputStream() : new SocketInputStream(socket);
|
||||
SocketInputWrapper w = new SocketInputWrapper(socket, stm);
|
||||
w.setTimeout(timeout);
|
||||
return w;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -503,7 +494,7 @@ public class NetUtils {
|
|||
* also takes a local address and port to bind the socket to.
|
||||
*
|
||||
* @param socket
|
||||
* @param address the remote address
|
||||
* @param endpoint the remote address
|
||||
* @param localAddr the local address to bind the socket to
|
||||
* @param timeout timeout in milliseconds
|
||||
*/
|
||||
|
@ -558,15 +549,10 @@ public class NetUtils {
|
|||
* @return its IP address in the string format
|
||||
*/
|
||||
public static String normalizeHostName(String name) {
|
||||
if (Character.digit(name.charAt(0), 10) != -1) { // it is an IP
|
||||
try {
|
||||
return InetAddress.getByName(name).getHostAddress();
|
||||
} catch (UnknownHostException e) {
|
||||
return name;
|
||||
} else {
|
||||
try {
|
||||
InetAddress ipAddress = InetAddress.getByName(name);
|
||||
return ipAddress.getHostAddress();
|
||||
} catch (UnknownHostException e) {
|
||||
return name;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -248,6 +248,10 @@ abstract class SocketIOWithTimeout {
|
|||
}
|
||||
}
|
||||
|
||||
public void setTimeout(long timeoutMs) {
|
||||
this.timeout = timeoutMs;
|
||||
}
|
||||
|
||||
private static String timeoutExceptionString(SelectableChannel channel,
|
||||
long timeout, int ops) {
|
||||
|
||||
|
|
|
@ -28,9 +28,6 @@ import java.nio.channels.ReadableByteChannel;
|
|||
import java.nio.channels.SelectableChannel;
|
||||
import java.nio.channels.SelectionKey;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* This implements an input stream that can have a timeout while reading.
|
||||
* This sets non-blocking flag on the socket channel.
|
||||
|
@ -40,9 +37,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
* IllegalBlockingModeException.
|
||||
* Please use {@link SocketOutputStream} for writing.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
public class SocketInputStream extends InputStream
|
||||
class SocketInputStream extends InputStream
|
||||
implements ReadableByteChannel {
|
||||
|
||||
private Reader reader;
|
||||
|
@ -171,4 +166,8 @@ public class SocketInputStream extends InputStream
|
|||
public void waitForReadable() throws IOException {
|
||||
reader.waitForIO(SelectionKey.OP_READ);
|
||||
}
|
||||
|
||||
public void setTimeout(long timeoutMs) {
|
||||
reader.setTimeout(timeoutMs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.net;
|
||||
|
||||
import java.io.FilterInputStream;
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketException;
|
||||
import java.nio.channels.ReadableByteChannel;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A wrapper stream around a socket which allows setting of its timeout. If the
|
||||
* socket has a channel, this uses non-blocking IO via the package-private
|
||||
* {@link SocketInputStream} implementation. Otherwise, timeouts are managed by
|
||||
* setting the underlying socket timeout itself.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate("HDFS")
|
||||
@InterfaceStability.Unstable
|
||||
public class SocketInputWrapper extends FilterInputStream {
|
||||
private final Socket socket;
|
||||
private final boolean hasChannel;
|
||||
|
||||
SocketInputWrapper(Socket s, InputStream is) {
|
||||
super(is);
|
||||
this.socket = s;
|
||||
this.hasChannel = s.getChannel() != null;
|
||||
if (hasChannel) {
|
||||
Preconditions.checkArgument(is instanceof SocketInputStream,
|
||||
"Expected a SocketInputStream when there is a channel. " +
|
||||
"Got: %s", is);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the timeout for reads from this stream.
|
||||
*
|
||||
* Note: the behavior here can differ subtly depending on whether the
|
||||
* underlying socket has an associated Channel. In particular, if there is no
|
||||
* channel, then this call will affect the socket timeout for <em>all</em>
|
||||
* readers of this socket. If there is a channel, then this call will affect
|
||||
* the timeout only for <em>this</em> stream. As such, it is recommended to
|
||||
* only create one {@link SocketInputWrapper} instance per socket.
|
||||
*
|
||||
* @param timeoutMs
|
||||
* the new timeout, 0 for no timeout
|
||||
* @throws SocketException
|
||||
* if the timeout cannot be set
|
||||
*/
|
||||
public void setTimeout(long timeoutMs) throws SocketException {
|
||||
if (hasChannel) {
|
||||
((SocketInputStream)in).setTimeout(timeoutMs);
|
||||
} else {
|
||||
socket.setSoTimeout((int)timeoutMs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return an underlying ReadableByteChannel implementation.
|
||||
* @throws IllegalStateException if this socket does not have a channel
|
||||
*/
|
||||
public ReadableByteChannel getReadableByteChannel() {
|
||||
Preconditions.checkState(hasChannel,
|
||||
"Socket %s does not have a channel",
|
||||
this.socket);
|
||||
return (SocketInputStream)in;
|
||||
}
|
||||
}
|
|
@ -1,232 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.security.Principal;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.SSLServerSocket;
|
||||
import javax.net.ssl.SSLServerSocketFactory;
|
||||
import javax.net.ssl.SSLSocket;
|
||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.FilterConfig;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.ServletRequest;
|
||||
import javax.servlet.ServletResponse;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletRequestWrapper;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.mortbay.io.EndPoint;
|
||||
import org.mortbay.jetty.HttpSchemes;
|
||||
import org.mortbay.jetty.Request;
|
||||
import org.mortbay.jetty.security.ServletSSL;
|
||||
import org.mortbay.jetty.security.SslSocketConnector;
|
||||
|
||||
/**
|
||||
* Extend Jetty's {@link SslSocketConnector} to optionally also provide
|
||||
* Kerberos5ized SSL sockets. The only change in behavior from superclass
|
||||
* is that we no longer honor requests to turn off NeedAuthentication when
|
||||
* running with Kerberos support.
|
||||
*/
|
||||
public class Krb5AndCertsSslSocketConnector extends SslSocketConnector {
|
||||
public static final List<String> KRB5_CIPHER_SUITES =
|
||||
Collections.unmodifiableList(Collections.singletonList(
|
||||
"TLS_KRB5_WITH_3DES_EDE_CBC_SHA"));
|
||||
static {
|
||||
SecurityUtil.initKrb5CipherSuites();
|
||||
}
|
||||
|
||||
private static final Log LOG = LogFactory
|
||||
.getLog(Krb5AndCertsSslSocketConnector.class);
|
||||
|
||||
private static final String REMOTE_PRINCIPAL = "remote_principal";
|
||||
|
||||
public enum MODE {KRB, CERTS, BOTH} // Support Kerberos, certificates or both?
|
||||
|
||||
private final boolean useKrb;
|
||||
private final boolean useCerts;
|
||||
|
||||
public Krb5AndCertsSslSocketConnector() {
|
||||
super();
|
||||
useKrb = true;
|
||||
useCerts = false;
|
||||
|
||||
setPasswords();
|
||||
}
|
||||
|
||||
public Krb5AndCertsSslSocketConnector(MODE mode) {
|
||||
super();
|
||||
useKrb = mode == MODE.KRB || mode == MODE.BOTH;
|
||||
useCerts = mode == MODE.CERTS || mode == MODE.BOTH;
|
||||
setPasswords();
|
||||
logIfDebug("useKerb = " + useKrb + ", useCerts = " + useCerts);
|
||||
}
|
||||
|
||||
// If not using Certs, set passwords to random gibberish or else
|
||||
// Jetty will actually prompt the user for some.
|
||||
private void setPasswords() {
|
||||
if(!useCerts) {
|
||||
Random r = new Random();
|
||||
System.setProperty("jetty.ssl.password", String.valueOf(r.nextLong()));
|
||||
System.setProperty("jetty.ssl.keypassword", String.valueOf(r.nextLong()));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SSLServerSocketFactory createFactory() throws Exception {
|
||||
if(useCerts)
|
||||
return super.createFactory();
|
||||
|
||||
SSLContext context = super.getProvider()==null
|
||||
? SSLContext.getInstance(super.getProtocol())
|
||||
:SSLContext.getInstance(super.getProtocol(), super.getProvider());
|
||||
context.init(null, null, null);
|
||||
|
||||
return context.getServerSocketFactory();
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see org.mortbay.jetty.security.SslSocketConnector#newServerSocket(java.lang.String, int, int)
|
||||
*/
|
||||
@Override
|
||||
protected ServerSocket newServerSocket(String host, int port, int backlog)
|
||||
throws IOException {
|
||||
logIfDebug("Creating new KrbServerSocket for: " + host);
|
||||
SSLServerSocket ss = null;
|
||||
|
||||
if(useCerts) // Get the server socket from the SSL super impl
|
||||
ss = (SSLServerSocket)super.newServerSocket(host, port, backlog);
|
||||
else { // Create a default server socket
|
||||
try {
|
||||
ss = (SSLServerSocket)(host == null
|
||||
? createFactory().createServerSocket(port, backlog) :
|
||||
createFactory().createServerSocket(port, backlog, InetAddress.getByName(host)));
|
||||
} catch (Exception e)
|
||||
{
|
||||
LOG.warn("Could not create KRB5 Listener", e);
|
||||
throw new IOException("Could not create KRB5 Listener: " + e.toString());
|
||||
}
|
||||
}
|
||||
|
||||
// Add Kerberos ciphers to this socket server if needed.
|
||||
if(useKrb) {
|
||||
ss.setNeedClientAuth(true);
|
||||
String [] combined;
|
||||
if(useCerts) { // combine the cipher suites
|
||||
String[] certs = ss.getEnabledCipherSuites();
|
||||
combined = new String[certs.length + KRB5_CIPHER_SUITES.size()];
|
||||
System.arraycopy(certs, 0, combined, 0, certs.length);
|
||||
System.arraycopy(KRB5_CIPHER_SUITES.toArray(new String[0]), 0, combined,
|
||||
certs.length, KRB5_CIPHER_SUITES.size());
|
||||
} else { // Just enable Kerberos auth
|
||||
combined = KRB5_CIPHER_SUITES.toArray(new String[0]);
|
||||
}
|
||||
|
||||
ss.setEnabledCipherSuites(combined);
|
||||
}
|
||||
|
||||
return ss;
|
||||
};
|
||||
|
||||
@Override
|
||||
public void customize(EndPoint endpoint, Request request) throws IOException {
|
||||
if(useKrb) { // Add Kerberos-specific info
|
||||
SSLSocket sslSocket = (SSLSocket)endpoint.getTransport();
|
||||
Principal remotePrincipal = sslSocket.getSession().getPeerPrincipal();
|
||||
logIfDebug("Remote principal = " + remotePrincipal);
|
||||
request.setScheme(HttpSchemes.HTTPS);
|
||||
request.setAttribute(REMOTE_PRINCIPAL, remotePrincipal);
|
||||
|
||||
if(!useCerts) { // Add extra info that would have been added by super
|
||||
String cipherSuite = sslSocket.getSession().getCipherSuite();
|
||||
Integer keySize = Integer.valueOf(ServletSSL.deduceKeyLength(cipherSuite));;
|
||||
|
||||
request.setAttribute("javax.servlet.request.cipher_suite", cipherSuite);
|
||||
request.setAttribute("javax.servlet.request.key_size", keySize);
|
||||
}
|
||||
}
|
||||
|
||||
if(useCerts) super.customize(endpoint, request);
|
||||
}
|
||||
|
||||
private void logIfDebug(String s) {
|
||||
if(LOG.isDebugEnabled())
|
||||
LOG.debug(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter that takes the Kerberos principal identified in the
|
||||
* {@link Krb5AndCertsSslSocketConnector} and provides it the to the servlet
|
||||
* at runtime, setting the principal and short name.
|
||||
*/
|
||||
public static class Krb5SslFilter implements Filter {
|
||||
@Override
|
||||
public void doFilter(ServletRequest req, ServletResponse resp,
|
||||
FilterChain chain) throws IOException, ServletException {
|
||||
final Principal princ =
|
||||
(Principal)req.getAttribute(Krb5AndCertsSslSocketConnector.REMOTE_PRINCIPAL);
|
||||
|
||||
if(princ == null || !(princ instanceof KerberosPrincipal)) {
|
||||
// Should never actually get here, since should be rejected at socket
|
||||
// level.
|
||||
LOG.warn("User not authenticated via kerberos from " + req.getRemoteAddr());
|
||||
((HttpServletResponse)resp).sendError(HttpServletResponse.SC_FORBIDDEN,
|
||||
"User not authenticated via Kerberos");
|
||||
return;
|
||||
}
|
||||
|
||||
// Provide principal information for servlet at runtime
|
||||
ServletRequest wrapper =
|
||||
new HttpServletRequestWrapper((HttpServletRequest) req) {
|
||||
@Override
|
||||
public Principal getUserPrincipal() {
|
||||
return princ;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the full name of this remote user.
|
||||
* @see javax.servlet.http.HttpServletRequestWrapper#getRemoteUser()
|
||||
*/
|
||||
@Override
|
||||
public String getRemoteUser() {
|
||||
return princ.getName();
|
||||
}
|
||||
};
|
||||
|
||||
chain.doFilter(wrapper, resp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(FilterConfig arg0) throws ServletException {
|
||||
/* Nothing to do here */
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() { /* Nothing to do here */ }
|
||||
}
|
||||
}
|
|
@ -17,14 +17,11 @@
|
|||
package org.apache.hadoop.security;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.net.UnknownHostException;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
@ -45,6 +42,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenInfo;
|
||||
|
||||
|
@ -134,79 +133,6 @@ public class SecurityUtil {
|
|||
return isTGSPrincipal(ticket.getServer());
|
||||
}
|
||||
|
||||
/**
|
||||
* Explicitly pull the service ticket for the specified host. This solves a
|
||||
* problem with Java's Kerberos SSL problem where the client cannot
|
||||
* authenticate against a cross-realm service. It is necessary for clients
|
||||
* making kerberized https requests to call this method on the target URL
|
||||
* to ensure that in a cross-realm environment the remote host will be
|
||||
* successfully authenticated.
|
||||
*
|
||||
* This method is internal to Hadoop and should not be used by other
|
||||
* applications. This method should not be considered stable or open:
|
||||
* it will be removed when the Java behavior is changed.
|
||||
*
|
||||
* @param remoteHost Target URL the krb-https client will access
|
||||
* @throws IOException if the service ticket cannot be retrieved
|
||||
*/
|
||||
public static void fetchServiceTicket(URL remoteHost) throws IOException {
|
||||
if(!UserGroupInformation.isSecurityEnabled())
|
||||
return;
|
||||
|
||||
String serviceName = "host/" + remoteHost.getHost();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Fetching service ticket for host at: " + serviceName);
|
||||
Object serviceCred = null;
|
||||
Method credsToTicketMeth;
|
||||
Class<?> krb5utilClass;
|
||||
try {
|
||||
Class<?> principalClass;
|
||||
Class<?> credentialsClass;
|
||||
|
||||
if (System.getProperty("java.vendor").contains("IBM")) {
|
||||
principalClass = Class.forName("com.ibm.security.krb5.PrincipalName");
|
||||
|
||||
credentialsClass = Class.forName("com.ibm.security.krb5.Credentials");
|
||||
krb5utilClass = Class.forName("com.ibm.security.jgss.mech.krb5");
|
||||
} else {
|
||||
principalClass = Class.forName("sun.security.krb5.PrincipalName");
|
||||
credentialsClass = Class.forName("sun.security.krb5.Credentials");
|
||||
krb5utilClass = Class.forName("sun.security.jgss.krb5.Krb5Util");
|
||||
}
|
||||
@SuppressWarnings("rawtypes")
|
||||
Constructor principalConstructor = principalClass.getConstructor(String.class,
|
||||
int.class);
|
||||
Field KRB_NT_SRV_HST = principalClass.getDeclaredField("KRB_NT_SRV_HST");
|
||||
Method acquireServiceCredsMeth =
|
||||
credentialsClass.getDeclaredMethod("acquireServiceCreds",
|
||||
String.class, credentialsClass);
|
||||
Method ticketToCredsMeth = krb5utilClass.getDeclaredMethod("ticketToCreds",
|
||||
KerberosTicket.class);
|
||||
credsToTicketMeth = krb5utilClass.getDeclaredMethod("credsToTicket",
|
||||
credentialsClass);
|
||||
|
||||
Object principal = principalConstructor.newInstance(serviceName,
|
||||
KRB_NT_SRV_HST.get(principalClass));
|
||||
|
||||
serviceCred = acquireServiceCredsMeth.invoke(credentialsClass,
|
||||
principal.toString(),
|
||||
ticketToCredsMeth.invoke(krb5utilClass, getTgtFromSubject()));
|
||||
} catch (Exception e) {
|
||||
throw new IOException("Can't get service ticket for: "
|
||||
+ serviceName, e);
|
||||
}
|
||||
if (serviceCred == null) {
|
||||
throw new IOException("Can't get service ticket for " + serviceName);
|
||||
}
|
||||
try {
|
||||
Subject.getSubject(AccessController.getContext()).getPrivateCredentials()
|
||||
.add(credsToTicketMeth.invoke(krb5utilClass, serviceCred));
|
||||
} catch (Exception e) {
|
||||
throw new IOException("Can't get service ticket for: "
|
||||
+ serviceName, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Kerberos principal name pattern to valid Kerberos principal
|
||||
* names. It replaces hostname pattern with hostname, which should be
|
||||
|
@ -513,6 +439,30 @@ public class SecurityUtil {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a (if need be) secure connection to a URL in a secure environment
|
||||
* that is using SPNEGO to authenticate its URLs. All Namenode and Secondary
|
||||
* Namenode URLs that are protected via SPNEGO should be accessed via this
|
||||
* method.
|
||||
*
|
||||
* @param url to authenticate via SPNEGO.
|
||||
* @return A connection that has been authenticated via SPNEGO
|
||||
* @throws IOException If unable to authenticate via SPNEGO
|
||||
*/
|
||||
public static URLConnection openSecureHttpConnection(URL url) throws IOException {
|
||||
if(!UserGroupInformation.isSecurityEnabled()) {
|
||||
return url.openConnection();
|
||||
}
|
||||
|
||||
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
|
||||
try {
|
||||
return new AuthenticatedURL().openConnection(url, token);
|
||||
} catch (AuthenticationException e) {
|
||||
throw new IOException("Exception trying to open authenticated connection to "
|
||||
+ url, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves a host subject to the security requirements determined by
|
||||
* hadoop.security.token.service.use_ip.
|
||||
|
@ -664,10 +614,4 @@ public class SecurityUtil {
|
|||
}
|
||||
}
|
||||
|
||||
public static void initKrb5CipherSuites() {
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
System.setProperty("https.cipherSuites",
|
||||
Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ public class ServiceAuthorizationManager {
|
|||
public static final Log AUDITLOG =
|
||||
LogFactory.getLog("SecurityLogger."+ServiceAuthorizationManager.class.getName());
|
||||
|
||||
private static final String AUTHZ_SUCCESSFULL_FOR = "Authorization successfull for ";
|
||||
private static final String AUTHZ_SUCCESSFUL_FOR = "Authorization successful for ";
|
||||
private static final String AUTHZ_FAILED_FOR = "Authorization failed for ";
|
||||
|
||||
|
||||
|
@ -108,7 +108,7 @@ public class ServiceAuthorizationManager {
|
|||
" is not authorized for protocol " + protocol +
|
||||
", expected client Kerberos principal is " + clientPrincipal);
|
||||
}
|
||||
AUDITLOG.info(AUTHZ_SUCCESSFULL_FOR + user + " for protocol="+protocol);
|
||||
AUDITLOG.info(AUTHZ_SUCCESSFUL_FOR + user + " for protocol="+protocol);
|
||||
}
|
||||
|
||||
public synchronized void refresh(Configuration conf,
|
||||
|
|
|
@ -18,10 +18,15 @@
|
|||
|
||||
package org.apache.hadoop.security.token;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.ServiceLoader;
|
||||
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
|
@ -37,6 +42,7 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableComparator;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
/**
|
||||
* The client-side form of the token.
|
||||
|
@ -45,6 +51,9 @@ import org.apache.hadoop.io.WritableUtils;
|
|||
@InterfaceStability.Evolving
|
||||
public class Token<T extends TokenIdentifier> implements Writable {
|
||||
public static final Log LOG = LogFactory.getLog(Token.class);
|
||||
|
||||
private static Map<Text, Class<? extends TokenIdentifier>> tokenKindMap;
|
||||
|
||||
private byte[] identifier;
|
||||
private byte[] password;
|
||||
private Text kind;
|
||||
|
@ -100,13 +109,49 @@ public class Token<T extends TokenIdentifier> implements Writable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the token identifier
|
||||
* @return the token identifier
|
||||
* Get the token identifier's byte representation
|
||||
* @return the token identifier's byte representation
|
||||
*/
|
||||
public byte[] getIdentifier() {
|
||||
return identifier;
|
||||
}
|
||||
|
||||
private static synchronized Class<? extends TokenIdentifier>
|
||||
getClassForIdentifier(Text kind) {
|
||||
if (tokenKindMap == null) {
|
||||
tokenKindMap = Maps.newHashMap();
|
||||
for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
|
||||
tokenKindMap.put(id.getKind(), id.getClass());
|
||||
}
|
||||
}
|
||||
Class<? extends TokenIdentifier> cls = tokenKindMap.get(kind);
|
||||
if (cls == null) {
|
||||
LOG.warn("Cannot find class for token kind " + kind);
|
||||
return null;
|
||||
}
|
||||
return cls;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the token identifier object, or null if it could not be constructed
|
||||
* (because the class could not be loaded, for example).
|
||||
* @return the token identifier, or null
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public T decodeIdentifier() throws IOException {
|
||||
Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
|
||||
if (cls == null) {
|
||||
return null;
|
||||
}
|
||||
TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
|
||||
ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
|
||||
DataInputStream in = new DataInputStream(buf);
|
||||
tokenIdentifier.readFields(in);
|
||||
in.close();
|
||||
return (T) tokenIdentifier;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the token password/secret
|
||||
* @return the token password/secret
|
||||
|
@ -261,15 +306,30 @@ public class Token<T extends TokenIdentifier> implements Writable {
|
|||
}
|
||||
}
|
||||
|
||||
private void identifierToString(StringBuilder buffer) {
|
||||
T id = null;
|
||||
try {
|
||||
id = decodeIdentifier();
|
||||
} catch (IOException e) {
|
||||
// handle in the finally block
|
||||
} finally {
|
||||
if (id != null) {
|
||||
buffer.append("(").append(id).append(")");
|
||||
} else {
|
||||
addBinaryBuffer(buffer, identifier);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
buffer.append("Ident: ");
|
||||
addBinaryBuffer(buffer, identifier);
|
||||
buffer.append(", Kind: ");
|
||||
buffer.append("Kind: ");
|
||||
buffer.append(kind.toString());
|
||||
buffer.append(", Service: ");
|
||||
buffer.append(service.toString());
|
||||
buffer.append(", Ident: ");
|
||||
identifierToString(buffer);
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -22,11 +22,20 @@ import java.util.regex.Pattern;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
import com.google.common.collect.ComparisonChain;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public abstract class VersionUtil {
|
||||
|
||||
private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)");
|
||||
|
||||
/**
|
||||
* Suffix added by maven for nightly builds and other snapshot releases.
|
||||
* These releases are considered to precede the non-SNAPSHOT version
|
||||
* with the same version number.
|
||||
*/
|
||||
private static final String SNAPSHOT_SUFFIX = "-SNAPSHOT";
|
||||
|
||||
/**
|
||||
* This function splits the two versions on "." and performs a
|
||||
* naturally-ordered comparison of the resulting components. For example, the
|
||||
|
@ -48,6 +57,11 @@ public abstract class VersionUtil {
|
|||
* between the two versions, then the version with fewer components is
|
||||
* considered to precede the version with more components.
|
||||
*
|
||||
* In addition to the above rules, there is one special case: maven SNAPSHOT
|
||||
* releases are considered to precede a non-SNAPSHOT release with an
|
||||
* otherwise identical version number. For example, 2.0-SNAPSHOT precedes
|
||||
* 2.0.
|
||||
*
|
||||
* This function returns a negative integer if version1 precedes version2, a
|
||||
* positive integer if version2 precedes version1, and 0 if and only if the
|
||||
* two versions' components are identical in value and cardinality.
|
||||
|
@ -61,6 +75,11 @@ public abstract class VersionUtil {
|
|||
* versions are equal.
|
||||
*/
|
||||
public static int compareVersions(String version1, String version2) {
|
||||
boolean isSnapshot1 = version1.endsWith(SNAPSHOT_SUFFIX);
|
||||
boolean isSnapshot2 = version2.endsWith(SNAPSHOT_SUFFIX);
|
||||
version1 = stripSnapshotSuffix(version1);
|
||||
version2 = stripSnapshotSuffix(version2);
|
||||
|
||||
String[] version1Parts = version1.split("\\.");
|
||||
String[] version2Parts = version2.split("\\.");
|
||||
|
||||
|
@ -87,7 +106,19 @@ public abstract class VersionUtil {
|
|||
return component1.length() - component2.length();
|
||||
}
|
||||
}
|
||||
return version1Parts.length - version2Parts.length;
|
||||
|
||||
return ComparisonChain.start()
|
||||
.compare(version1Parts.length, version2Parts.length)
|
||||
.compare(isSnapshot2, isSnapshot1)
|
||||
.result();
|
||||
}
|
||||
|
||||
private static String stripSnapshotSuffix(String version) {
|
||||
if (version.endsWith(SNAPSHOT_SUFFIX)) {
|
||||
return version.substring(0, version.length() - SNAPSHOT_SUFFIX.length());
|
||||
} else {
|
||||
return version;
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isNumeric(String s) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# AC_COMPUTE_NEEDED_DSO(LIBRARY, PREPROC_SYMBOL)
|
||||
# AC_COMPUTE_NEEDED_DSO(LIBRARY, TEST_PROGRAM, PREPROC_SYMBOL)
|
||||
# --------------------------------------------------
|
||||
# Compute the 'actual' dynamic-library used
|
||||
# for LIBRARY and set it to PREPROC_SYMBOL
|
||||
|
@ -6,7 +6,7 @@ AC_DEFUN([AC_COMPUTE_NEEDED_DSO],
|
|||
[
|
||||
AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1,
|
||||
[
|
||||
echo 'int main(int argc, char **argv){return 0;}' > conftest.c
|
||||
echo '$2' > conftest.c
|
||||
if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then
|
||||
dnl Try objdump and ldd in that order to get the dynamic library
|
||||
if test ! -z "`which objdump | grep -v 'no objdump'`"; then
|
||||
|
@ -24,5 +24,5 @@ AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_lib
|
|||
rm -f conftest*
|
||||
]
|
||||
)
|
||||
AC_DEFINE_UNQUOTED($2, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
|
||||
AC_DEFINE_UNQUOTED($3, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
|
||||
])# AC_COMPUTE_NEEDED_DSO
|
||||
|
|
|
@ -87,10 +87,20 @@ CPPFLAGS=$cppflags_bak
|
|||
AC_SUBST([JNI_CPPFLAGS])
|
||||
|
||||
dnl Check for zlib headers
|
||||
AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
|
||||
AC_CHECK_HEADERS([zlib.h zconf.h],
|
||||
AC_COMPUTE_NEEDED_DSO(z,
|
||||
[#include "zlib.h"
|
||||
int main(int argc, char **argv){zlibVersion();return 0;}],
|
||||
HADOOP_ZLIB_LIBRARY),
|
||||
AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
|
||||
|
||||
dnl Check for snappy headers
|
||||
AC_CHECK_HEADERS([snappy-c.h], AC_COMPUTE_NEEDED_DSO(snappy,HADOOP_SNAPPY_LIBRARY), AC_MSG_WARN(Snappy headers were not found... building without snappy.))
|
||||
AC_CHECK_HEADERS([snappy-c.h],
|
||||
AC_COMPUTE_NEEDED_DSO(snappy,
|
||||
[#include "snappy-c.h"
|
||||
int main(int argc, char **argv){snappy_compress(0,0,0,0);return 0;}],
|
||||
HADOOP_SNAPPY_LIBRARY),
|
||||
AC_MSG_WARN(Snappy headers were not found... building without snappy.))
|
||||
|
||||
dnl Check for headers needed by the native Group resolution implementation
|
||||
AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
|
||||
|
|
|
@ -70,7 +70,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
|
|||
|
||||
// set the name of the group for subsequent calls to getnetgrent
|
||||
// note that we want to end group lokup regardless whether setnetgrent
|
||||
// was successfull or not (as long as it was called we need to call
|
||||
// was successful or not (as long as it was called we need to call
|
||||
// endnetgrent)
|
||||
setnetgrentCalledFlag = 1;
|
||||
if(setnetgrent(cgroup) == 1) {
|
||||
|
|
|
@ -48,10 +48,10 @@ done
|
|||
export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
|
||||
|
||||
# Command specific options appended to HADOOP_OPTS when specified
|
||||
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS"
|
||||
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
|
||||
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
|
||||
|
||||
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
|
||||
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
|
||||
|
||||
# The ZKFC does not need a large heap, and keeping it small avoids
|
||||
# any potential for long GC pauses
|
||||
|
|
|
@ -128,13 +128,6 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.secondary.https.port</name>
|
||||
<value>50490</value>
|
||||
<description>The https port where secondary-namenode binds</description>
|
||||
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.kerberos.principal</name>
|
||||
<value>dn/_HOST@${local.realm}</value>
|
||||
|
|
|
@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
|||
#
|
||||
#Security appender
|
||||
#
|
||||
hadoop.security.logger=INFO,console
|
||||
hadoop.security.logger=INFO,NullAppender
|
||||
hadoop.security.log.maxfilesize=256MB
|
||||
hadoop.security.log.maxbackupindex=20
|
||||
log4j.category.SecurityLogger=${hadoop.security.logger}
|
||||
|
@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
|
|||
#
|
||||
# hdfs audit logging
|
||||
#
|
||||
hdfs.audit.logger=INFO,console
|
||||
hdfs.audit.logger=INFO,NullAppender
|
||||
hdfs.audit.log.maxfilesize=256MB
|
||||
hdfs.audit.log.maxbackupindex=20
|
||||
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
|
||||
|
@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
|
|||
#
|
||||
# mapred audit logging
|
||||
#
|
||||
mapred.audit.logger=INFO,console
|
||||
mapred.audit.logger=INFO,NullAppender
|
||||
mapred.audit.log.maxfilesize=256MB
|
||||
mapred.audit.log.maxbackupindex=20
|
||||
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
|
||||
|
|
|
@ -19,7 +19,6 @@ option java_package = "org.apache.hadoop.ipc.protobuf";
|
|||
option java_outer_classname = "RpcPayloadHeaderProtos";
|
||||
option java_generate_equals_and_hash = true;
|
||||
|
||||
|
||||
/**
|
||||
* This is the rpc payload header. It is sent with every rpc call.
|
||||
*
|
||||
|
@ -34,8 +33,6 @@ option java_generate_equals_and_hash = true;
|
|||
*
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* RpcKind determine the rpcEngine and the serialization of the rpc payload
|
||||
*/
|
||||
|
@ -54,5 +51,27 @@ enum RpcPayloadOperationProto {
|
|||
message RpcPayloadHeaderProto { // the header for the RpcRequest
|
||||
optional RpcKindProto rpcKind = 1;
|
||||
optional RpcPayloadOperationProto rpcOp = 2;
|
||||
optional uint32 callId = 3; // each rpc has a callId that is also used in response
|
||||
required uint32 callId = 3; // each rpc has a callId that is also used in response
|
||||
}
|
||||
|
||||
enum RpcStatusProto {
|
||||
SUCCESS = 0; // RPC succeeded
|
||||
ERROR = 1; // RPC Failed
|
||||
FATAL = 2; // Fatal error - connection is closed
|
||||
}
|
||||
|
||||
/**
|
||||
* Rpc Response Header
|
||||
* - If successfull then the Respose follows after this header
|
||||
* - length (4 byte int), followed by the response
|
||||
* - If error or fatal - the exception info follow
|
||||
* - length (4 byte int) Class name of exception - UTF-8 string
|
||||
* - length (4 byte int) Stacktrace - UTF-8 string
|
||||
* - if the strings are null then the length is -1
|
||||
* In case of Fatal error then the respose contains the Serverside's IPC version
|
||||
*/
|
||||
message RpcResponseHeaderProto {
|
||||
required uint32 callId = 1; // callId used in Request
|
||||
required RpcStatusProto status = 2;
|
||||
optional uint32 serverIpcVersionNum = 3; // in case of an fatal IPC error
|
||||
}
|
||||
|
|
|
@ -62,6 +62,15 @@
|
|||
<description>Is service-level authorization enabled?</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.instrumentation.requires.admin</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
Indicates if administrator ACLs are required to access
|
||||
instrumentation servlets (JMX, METRICS, CONF, STACKS).
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.authentication</name>
|
||||
<value>simple</value>
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
|
||||
public class TestBlockLocation extends TestCase {
|
||||
// Verify fix of bug identified in HADOOP-6004
|
||||
public void testDeserialization() throws IOException {
|
||||
// Create a test BlockLocation
|
||||
String[] names = {"one", "two" };
|
||||
String[] hosts = {"three", "four" };
|
||||
String[] topologyPaths = {"five", "six"};
|
||||
long offset = 25l;
|
||||
long length = 55l;
|
||||
|
||||
BlockLocation bl = new BlockLocation(names, hosts, topologyPaths,
|
||||
offset, length);
|
||||
|
||||
DataOutputBuffer dob = new DataOutputBuffer();
|
||||
|
||||
// Serialize it
|
||||
try {
|
||||
bl.write(dob);
|
||||
} catch (IOException e) {
|
||||
fail("Unable to serialize data: " + e.getMessage());
|
||||
}
|
||||
|
||||
byte[] bytes = dob.getData();
|
||||
DataInput da = new DataInputStream(new ByteArrayInputStream(bytes));
|
||||
|
||||
// Try to re-create the BlockLocation the same way as is done during
|
||||
// deserialization
|
||||
BlockLocation bl2 = new BlockLocation();
|
||||
|
||||
try {
|
||||
bl2.readFields(da);
|
||||
} catch (IOException e) {
|
||||
fail("Unable to deserialize BlockLocation: " + e.getMessage());
|
||||
}
|
||||
|
||||
// Check that we got back what we started with
|
||||
verifyDeserialization(bl2.getHosts(), hosts);
|
||||
verifyDeserialization(bl2.getNames(), names);
|
||||
verifyDeserialization(bl2.getTopologyPaths(), topologyPaths);
|
||||
assertEquals(bl2.getOffset(), offset);
|
||||
assertEquals(bl2.getLength(), length);
|
||||
}
|
||||
|
||||
private void verifyDeserialization(String[] ar1, String[] ar2) {
|
||||
assertEquals(ar1.length, ar2.length);
|
||||
|
||||
for(int i = 0; i < ar1.length; i++)
|
||||
assertEquals(ar1[i], ar2[i]);
|
||||
}
|
||||
}
|
|
@ -18,11 +18,14 @@
|
|||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||
|
||||
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -233,4 +236,16 @@ public class TestLocalFileSystem {
|
|||
assertTrue("Did not delete file", fs.delete(file1));
|
||||
assertTrue("Did not delete non-empty dir", fs.delete(dir1));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStatistics() throws Exception {
|
||||
FileSystem.getLocal(new Configuration());
|
||||
int fileSchemeCount = 0;
|
||||
for (Statistics stats : FileSystem.getAllStatistics()) {
|
||||
if (stats.getScheme().equals("file")) {
|
||||
fileSchemeCount++;
|
||||
}
|
||||
}
|
||||
assertEquals(1, fileSchemeCount);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,11 +71,8 @@ public class ViewFileSystemBaseTest {
|
|||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
|
||||
// In case previous test was killed before cleanup
|
||||
fsTarget.delete(targetTestRoot, true);
|
||||
initializeTargetTestRoot();
|
||||
|
||||
fsTarget.mkdirs(targetTestRoot);
|
||||
// Make user and data dirs - we creates links to them in the mount table
|
||||
fsTarget.mkdirs(new Path(targetTestRoot,"user"));
|
||||
fsTarget.mkdirs(new Path(targetTestRoot,"data"));
|
||||
|
@ -99,7 +96,16 @@ public class ViewFileSystemBaseTest {
|
|||
fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
|
||||
}
|
||||
|
||||
void initializeTargetTestRoot() throws IOException {
|
||||
targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
|
||||
// In case previous test was killed before cleanup
|
||||
fsTarget.delete(targetTestRoot, true);
|
||||
|
||||
fsTarget.mkdirs(targetTestRoot);
|
||||
}
|
||||
|
||||
void setupMountPoints() {
|
||||
ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
|
||||
ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri());
|
||||
ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri());
|
||||
ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri());
|
||||
|
@ -121,7 +127,7 @@ public class ViewFileSystemBaseTest {
|
|||
}
|
||||
|
||||
int getExpectedMountPoints() {
|
||||
return 7;
|
||||
return 8;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -166,7 +172,7 @@ public class ViewFileSystemBaseTest {
|
|||
}
|
||||
}
|
||||
}
|
||||
Assert.assertEquals(expectedTokenCount / 2, delTokens.size());
|
||||
Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size());
|
||||
}
|
||||
|
||||
int getExpectedDelegationTokenCountWithCredentials() {
|
||||
|
@ -309,6 +315,16 @@ public class ViewFileSystemBaseTest {
|
|||
Assert.assertTrue("Renamed dest should exist as dir in target",
|
||||
fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
|
||||
|
||||
// Make a directory under a directory that's mounted from the root of another FS
|
||||
fsView.mkdirs(new Path("/targetRoot/dirFoo"));
|
||||
Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
|
||||
boolean dirFooPresent = false;
|
||||
for (FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) {
|
||||
if (fileStatus.getPath().getName().equals("dirFoo")) {
|
||||
dirFooPresent = true;
|
||||
}
|
||||
}
|
||||
Assert.assertTrue(dirFooPresent);
|
||||
}
|
||||
|
||||
// rename across mount points that point to same target also fail
|
||||
|
@ -418,7 +434,7 @@ public class ViewFileSystemBaseTest {
|
|||
}
|
||||
|
||||
int getExpectedDirPaths() {
|
||||
return 6;
|
||||
return 7;
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
|
|||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileContextTestHelper;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.FileContextTestHelper.fileType;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FsConstants;
|
||||
|
@ -77,12 +78,8 @@ public class ViewFsBaseTest {
|
|||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
initializeTargetTestRoot();
|
||||
|
||||
targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
|
||||
// In case previous test was killed before cleanup
|
||||
fcTarget.delete(targetTestRoot, true);
|
||||
|
||||
fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
|
||||
// Make user and data dirs - we creates links to them in the mount table
|
||||
fcTarget.mkdir(new Path(targetTestRoot,"user"),
|
||||
FileContext.DEFAULT_PERM, true);
|
||||
|
@ -100,6 +97,7 @@ public class ViewFsBaseTest {
|
|||
|
||||
// Set up the defaultMT in the config with our mount point links
|
||||
conf = new Configuration();
|
||||
ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
|
||||
ConfigUtil.addLink(conf, "/user",
|
||||
new Path(targetTestRoot,"user").toUri());
|
||||
ConfigUtil.addLink(conf, "/user2",
|
||||
|
@ -119,6 +117,14 @@ public class ViewFsBaseTest {
|
|||
// Also try viewfs://default/ - note authority is name of mount table
|
||||
}
|
||||
|
||||
void initializeTargetTestRoot() throws IOException {
|
||||
targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
|
||||
// In case previous test was killed before cleanup
|
||||
fcTarget.delete(targetTestRoot, true);
|
||||
|
||||
fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
fcTarget.delete(FileContextTestHelper.getTestRootPath(fcTarget), true);
|
||||
|
@ -128,7 +134,11 @@ public class ViewFsBaseTest {
|
|||
public void testGetMountPoints() {
|
||||
ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem();
|
||||
MountPoint[] mountPoints = viewfs.getMountPoints();
|
||||
Assert.assertEquals(7, mountPoints.length);
|
||||
Assert.assertEquals(8, mountPoints.length);
|
||||
}
|
||||
|
||||
int getExpectedDelegationTokenCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -140,7 +150,7 @@ public class ViewFsBaseTest {
|
|||
public void testGetDelegationTokens() throws IOException {
|
||||
List<Token<?>> delTokens =
|
||||
fcView.getDelegationTokens(new Path("/"), "sanjay");
|
||||
Assert.assertEquals(0, delTokens.size());
|
||||
Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size());
|
||||
}
|
||||
|
||||
|
||||
|
@ -281,6 +291,19 @@ public class ViewFsBaseTest {
|
|||
Assert.assertTrue("Renamed dest should exist as dir in target",
|
||||
isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
|
||||
|
||||
// Make a directory under a directory that's mounted from the root of another FS
|
||||
fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false);
|
||||
Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo")));
|
||||
boolean dirFooPresent = false;
|
||||
RemoteIterator<FileStatus> dirContents = fcView.listStatus(new Path(
|
||||
"/targetRoot/"));
|
||||
while (dirContents.hasNext()) {
|
||||
FileStatus fileStatus = dirContents.next();
|
||||
if (fileStatus.getPath().getName().equals("dirFoo")) {
|
||||
dirFooPresent = true;
|
||||
}
|
||||
}
|
||||
Assert.assertTrue(dirFooPresent);
|
||||
}
|
||||
|
||||
// rename across mount points that point to same target also fail
|
||||
|
@ -358,7 +381,7 @@ public class ViewFsBaseTest {
|
|||
|
||||
FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
|
||||
FileStatus fs;
|
||||
Assert.assertEquals(6, dirPaths.length);
|
||||
Assert.assertEquals(7, dirPaths.length);
|
||||
fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths);
|
||||
Assert.assertNotNull(fs);
|
||||
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.hadoop.http;
|
||||
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
import org.junit.Assert;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
|
@ -70,6 +71,12 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
return createServer(TEST, conf);
|
||||
}
|
||||
|
||||
public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl)
|
||||
throws IOException {
|
||||
prepareTestWebapp();
|
||||
return createServer(TEST, conf, adminsAcl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create but do not start the test webapp server. The test webapp dir is
|
||||
* prepared/checked in advance.
|
||||
|
@ -132,6 +139,11 @@ public class HttpServerFunctionalTest extends Assert {
|
|||
throws IOException {
|
||||
return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
|
||||
}
|
||||
|
||||
public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
|
||||
throws IOException {
|
||||
return new HttpServer(webapp, "0.0.0.0", 0, true, conf, adminsAcl);
|
||||
}
|
||||
/**
|
||||
* Create an HttpServer instance for the given webapp
|
||||
* @param webapp the webapp to work with
|
||||
|
|
|
@ -60,7 +60,6 @@ import org.apache.hadoop.security.authorize.AccessControlList;
|
|||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
|
@ -360,6 +359,8 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
Configuration conf = new Configuration();
|
||||
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
|
||||
true);
|
||||
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
|
||||
true);
|
||||
conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
|
||||
DummyFilterInitializer.class.getName());
|
||||
|
||||
|
@ -468,6 +469,26 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRequiresAuthorizationAccess() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
ServletContext context = Mockito.mock(ServletContext.class);
|
||||
Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
|
||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
||||
|
||||
//requires admin access to instrumentation, FALSE by default
|
||||
Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response));
|
||||
|
||||
//requires admin access to instrumentation, TRUE
|
||||
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
|
||||
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
|
||||
AccessControlList acls = Mockito.mock(AccessControlList.class);
|
||||
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
|
||||
Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
|
||||
Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
|
||||
}
|
||||
|
||||
@Test public void testBindAddress() throws Exception {
|
||||
checkBindAddress("0.0.0.0", 0, false).stop();
|
||||
// hang onto this one for a bit more testing
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.io;
|
|||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.CharacterCodingException;
|
||||
import java.util.Random;
|
||||
|
@ -107,7 +108,6 @@ public class TestText extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public void testIO() throws Exception {
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
|
@ -137,6 +137,40 @@ public class TestText extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void doTestLimitedIO(String str, int strLen) throws IOException {
|
||||
DataOutputBuffer out = new DataOutputBuffer();
|
||||
DataInputBuffer in = new DataInputBuffer();
|
||||
|
||||
out.reset();
|
||||
try {
|
||||
Text.writeString(out, str, strLen);
|
||||
fail("expected writeString to fail when told to write a string " +
|
||||
"that was too long! The string was '" + str + "'");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
Text.writeString(out, str, strLen + 1);
|
||||
|
||||
// test that it reads correctly
|
||||
in.reset(out.getData(), out.getLength());
|
||||
in.mark(strLen);
|
||||
String after;
|
||||
try {
|
||||
after = Text.readString(in, strLen);
|
||||
fail("expected readString to fail when told to read a string " +
|
||||
"that was too long! The string was '" + str + "'");
|
||||
} catch (IOException e) {
|
||||
}
|
||||
in.reset();
|
||||
after = Text.readString(in, strLen + 1);
|
||||
assertTrue(str.equals(after));
|
||||
}
|
||||
|
||||
public void testLimitedIO() throws Exception {
|
||||
doTestLimitedIO("abcd", 4);
|
||||
doTestLimitedIO("", 0);
|
||||
doTestLimitedIO("1", 1);
|
||||
}
|
||||
|
||||
public void testCompare() throws Exception {
|
||||
DataOutputBuffer out1 = new DataOutputBuffer();
|
||||
DataOutputBuffer out2 = new DataOutputBuffer();
|
||||
|
@ -192,16 +226,6 @@ public class TestText extends TestCase {
|
|||
assertTrue(text.find("\u20ac", 5)==11);
|
||||
}
|
||||
|
||||
public void testClear() {
|
||||
Text text = new Text();
|
||||
assertEquals("", text.toString());
|
||||
assertEquals(0, text.getBytes().length);
|
||||
text = new Text("abcd\u20acbdcd\u20ac");
|
||||
text.clear();
|
||||
assertEquals("", text.toString());
|
||||
assertEquals(0, text.getBytes().length);
|
||||
}
|
||||
|
||||
public void testFindAfterUpdatingContents() throws Exception {
|
||||
Text text = new Text("abcd");
|
||||
text.set("a".getBytes());
|
||||
|
|
|
@ -322,6 +322,29 @@ public class TestRPC {
|
|||
server.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testProxyAddress() throws Exception {
|
||||
Server server = RPC.getServer(TestProtocol.class,
|
||||
new TestImpl(), ADDRESS, 0, conf);
|
||||
TestProtocol proxy = null;
|
||||
|
||||
try {
|
||||
server.start();
|
||||
InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
// create a client
|
||||
proxy = (TestProtocol)RPC.getProxy(
|
||||
TestProtocol.class, TestProtocol.versionID, addr, conf);
|
||||
|
||||
assertEquals(addr, RPC.getServerAddress(proxy));
|
||||
} finally {
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSlowRpc() throws Exception {
|
||||
System.out.println("Testing Slow RPC");
|
||||
|
|
|
@ -25,11 +25,16 @@ import java.net.ConnectException;
|
|||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.URI;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Enumeration;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import junit.framework.AssertionFailedError;
|
||||
|
||||
|
@ -37,7 +42,9 @@ import org.apache.commons.lang.StringUtils;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.NetUtilsTestResolver;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
@ -50,6 +57,13 @@ public class TestNetUtils {
|
|||
private static final int LOCAL_PORT = 8080;
|
||||
private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
|
||||
|
||||
/**
|
||||
* Some slop around expected times when making sure timeouts behave
|
||||
* as expected. We assume that they will be accurate to within
|
||||
* this threshold.
|
||||
*/
|
||||
static final long TIME_FUDGE_MILLIS = 200;
|
||||
|
||||
/**
|
||||
* Test that we can't accidentally connect back to the connecting socket due
|
||||
* to a quirk in the TCP spec.
|
||||
|
@ -81,6 +95,79 @@ public class TestNetUtils {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSocketReadTimeoutWithChannel() throws Exception {
|
||||
doSocketReadTimeoutTest(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSocketReadTimeoutWithoutChannel() throws Exception {
|
||||
doSocketReadTimeoutTest(false);
|
||||
}
|
||||
|
||||
|
||||
private void doSocketReadTimeoutTest(boolean withChannel)
|
||||
throws IOException {
|
||||
// Binding a ServerSocket is enough to accept connections.
|
||||
// Rely on the backlog to accept for us.
|
||||
ServerSocket ss = new ServerSocket(0);
|
||||
|
||||
Socket s;
|
||||
if (withChannel) {
|
||||
s = NetUtils.getDefaultSocketFactory(new Configuration())
|
||||
.createSocket();
|
||||
Assume.assumeNotNull(s.getChannel());
|
||||
} else {
|
||||
s = new Socket();
|
||||
assertNull(s.getChannel());
|
||||
}
|
||||
|
||||
SocketInputWrapper stm = null;
|
||||
try {
|
||||
NetUtils.connect(s, ss.getLocalSocketAddress(), 1000);
|
||||
|
||||
stm = NetUtils.getInputStream(s, 1000);
|
||||
assertReadTimeout(stm, 1000);
|
||||
|
||||
// Change timeout, make sure it applies.
|
||||
stm.setTimeout(1);
|
||||
assertReadTimeout(stm, 1);
|
||||
|
||||
// If there is a channel, then setting the socket timeout
|
||||
// should not matter. If there is not a channel, it will
|
||||
// take effect.
|
||||
s.setSoTimeout(1000);
|
||||
if (withChannel) {
|
||||
assertReadTimeout(stm, 1);
|
||||
} else {
|
||||
assertReadTimeout(stm, 1000);
|
||||
}
|
||||
} finally {
|
||||
IOUtils.closeStream(stm);
|
||||
IOUtils.closeSocket(s);
|
||||
ss.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void assertReadTimeout(SocketInputWrapper stm, int timeoutMillis)
|
||||
throws IOException {
|
||||
long st = System.nanoTime();
|
||||
try {
|
||||
stm.read();
|
||||
fail("Didn't time out");
|
||||
} catch (SocketTimeoutException ste) {
|
||||
assertTimeSince(st, timeoutMillis);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertTimeSince(long startNanos, int expectedMillis) {
|
||||
long durationNano = System.nanoTime() - startNanos;
|
||||
long millis = TimeUnit.MILLISECONDS.convert(
|
||||
durationNano, TimeUnit.NANOSECONDS);
|
||||
assertTrue("Expected " + expectedMillis + "ms, but took " + millis,
|
||||
Math.abs(millis - expectedMillis) < TIME_FUDGE_MILLIS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for {
|
||||
* @throws UnknownHostException @link NetUtils#getLocalInetAddress(String)
|
||||
|
@ -512,6 +599,26 @@ public class TestNetUtils {
|
|||
assertEquals("scheme://host.a.b/path", uri.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for {@link NetUtils#normalizeHostNames}
|
||||
*/
|
||||
@Test
|
||||
public void testNormalizeHostName() {
|
||||
List<String> hosts = Arrays.asList(new String[] {"127.0.0.1",
|
||||
"localhost", "3w.org", "UnknownHost"});
|
||||
List<String> normalizedHosts = NetUtils.normalizeHostNames(hosts);
|
||||
// when ipaddress is normalized, same address is expected in return
|
||||
assertEquals(normalizedHosts.get(0), hosts.get(0));
|
||||
// for normalizing a resolvable hostname, resolved ipaddress is expected in return
|
||||
assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
|
||||
assertEquals(normalizedHosts.get(1), hosts.get(0));
|
||||
// this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric,
|
||||
// its ipaddress is expected to return
|
||||
assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
|
||||
// return the same hostname after normalizing a irresolvable hostname.
|
||||
assertEquals(normalizedHosts.get(3), hosts.get(3));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetHostNameOfIP() {
|
||||
assertNull(NetUtils.getHostNameOfIP(null));
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.net;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.nio.channels.Pipe;
|
||||
|
@ -26,8 +27,13 @@ import java.util.Arrays;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.test.MultithreadedTestUtil;
|
||||
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
|
||||
import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* This tests timout out from SocketInputStream and
|
||||
|
@ -36,14 +42,17 @@ import junit.framework.TestCase;
|
|||
* Normal read and write using these streams are tested by pretty much
|
||||
* every DFS unit test.
|
||||
*/
|
||||
public class TestSocketIOWithTimeout extends TestCase {
|
||||
public class TestSocketIOWithTimeout {
|
||||
|
||||
static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
|
||||
|
||||
private static int TIMEOUT = 1*1000;
|
||||
private static String TEST_STRING = "1234567890";
|
||||
|
||||
private void doIO(InputStream in, OutputStream out) throws IOException {
|
||||
private MultithreadedTestUtil.TestContext ctx = new TestContext();
|
||||
|
||||
private void doIO(InputStream in, OutputStream out,
|
||||
int expectedTimeout) throws IOException {
|
||||
/* Keep on writing or reading until we get SocketTimeoutException.
|
||||
* It expects this exception to occur within 100 millis of TIMEOUT.
|
||||
*/
|
||||
|
@ -61,34 +70,15 @@ public class TestSocketIOWithTimeout extends TestCase {
|
|||
long diff = System.currentTimeMillis() - start;
|
||||
LOG.info("Got SocketTimeoutException as expected after " +
|
||||
diff + " millis : " + e.getMessage());
|
||||
assertTrue(Math.abs(TIMEOUT - diff) <= 200);
|
||||
assertTrue(Math.abs(expectedTimeout - diff) <=
|
||||
TestNetUtils.TIME_FUDGE_MILLIS);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Just reads one byte from the input stream.
|
||||
*/
|
||||
static class ReadRunnable implements Runnable {
|
||||
private InputStream in;
|
||||
|
||||
public ReadRunnable(InputStream in) {
|
||||
this.in = in;
|
||||
}
|
||||
public void run() {
|
||||
try {
|
||||
in.read();
|
||||
} catch (IOException e) {
|
||||
LOG.info("Got expection while reading as expected : " +
|
||||
e.getMessage());
|
||||
return;
|
||||
}
|
||||
assertTrue(false);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSocketIOWithTimeout() throws IOException {
|
||||
@Test
|
||||
public void testSocketIOWithTimeout() throws Exception {
|
||||
|
||||
// first open pipe:
|
||||
Pipe pipe = Pipe.open();
|
||||
|
@ -96,7 +86,7 @@ public class TestSocketIOWithTimeout extends TestCase {
|
|||
Pipe.SinkChannel sink = pipe.sink();
|
||||
|
||||
try {
|
||||
InputStream in = new SocketInputStream(source, TIMEOUT);
|
||||
final InputStream in = new SocketInputStream(source, TIMEOUT);
|
||||
OutputStream out = new SocketOutputStream(sink, TIMEOUT);
|
||||
|
||||
byte[] writeBytes = TEST_STRING.getBytes();
|
||||
|
@ -105,37 +95,62 @@ public class TestSocketIOWithTimeout extends TestCase {
|
|||
|
||||
out.write(writeBytes);
|
||||
out.write(byteWithHighBit);
|
||||
doIO(null, out);
|
||||
doIO(null, out, TIMEOUT);
|
||||
|
||||
in.read(readBytes);
|
||||
assertTrue(Arrays.equals(writeBytes, readBytes));
|
||||
assertEquals(byteWithHighBit & 0xff, in.read());
|
||||
doIO(in, null);
|
||||
doIO(in, null, TIMEOUT);
|
||||
|
||||
// Change timeout on the read side.
|
||||
((SocketInputStream)in).setTimeout(TIMEOUT * 2);
|
||||
doIO(in, null, TIMEOUT * 2);
|
||||
|
||||
|
||||
/*
|
||||
* Verify that it handles interrupted threads properly.
|
||||
* Use a large timeout and expect the thread to return quickly.
|
||||
* Use a large timeout and expect the thread to return quickly
|
||||
* upon interruption.
|
||||
*/
|
||||
in = new SocketInputStream(source, 0);
|
||||
Thread thread = new Thread(new ReadRunnable(in));
|
||||
thread.start();
|
||||
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException ignored) {}
|
||||
|
||||
((SocketInputStream)in).setTimeout(0);
|
||||
TestingThread thread = new TestingThread(ctx) {
|
||||
@Override
|
||||
public void doWork() throws Exception {
|
||||
try {
|
||||
in.read();
|
||||
fail("Did not fail with interrupt");
|
||||
} catch (InterruptedIOException ste) {
|
||||
LOG.info("Got expection while reading as expected : " +
|
||||
ste.getMessage());
|
||||
}
|
||||
}
|
||||
};
|
||||
ctx.addThread(thread);
|
||||
ctx.startThreads();
|
||||
// If the thread is interrupted before it calls read()
|
||||
// then it throws ClosedByInterruptException due to
|
||||
// some Java quirk. Waiting for it to call read()
|
||||
// gets it into select(), so we get the expected
|
||||
// InterruptedIOException.
|
||||
Thread.sleep(1000);
|
||||
thread.interrupt();
|
||||
|
||||
try {
|
||||
thread.join();
|
||||
} catch (InterruptedException e) {
|
||||
throw new IOException("Unexpected InterruptedException : " + e);
|
||||
}
|
||||
ctx.stop();
|
||||
|
||||
//make sure the channels are still open
|
||||
assertTrue(source.isOpen());
|
||||
assertTrue(sink.isOpen());
|
||||
|
||||
// Nevertheless, the output stream is closed, because
|
||||
// a partial write may have succeeded (see comment in
|
||||
// SocketOutputStream#write(byte[]), int, int)
|
||||
try {
|
||||
out.write(1);
|
||||
fail("Did not throw");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"stream is closed", ioe);
|
||||
}
|
||||
|
||||
out.close();
|
||||
assertFalse(sink.isOpen());
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestTableMapping {
|
|||
public void setUp() throws IOException {
|
||||
mappingFile = File.createTempFile(getClass().getSimpleName(), ".txt");
|
||||
Files.write("a.b.c /rack1\n" +
|
||||
"1.2.3\t/rack2\n", mappingFile, Charsets.UTF_8);
|
||||
"1.2.3.4\t/rack2\n", mappingFile, Charsets.UTF_8);
|
||||
mappingFile.deleteOnExit();
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ public class TestTableMapping {
|
|||
|
||||
List<String> names = new ArrayList<String>();
|
||||
names.add("a.b.c");
|
||||
names.add("1.2.3");
|
||||
names.add("1.2.3.4");
|
||||
|
||||
List<String> result = mapping.resolve(names);
|
||||
assertEquals(names.size(), result.size());
|
||||
|
@ -73,7 +73,7 @@ public class TestTableMapping {
|
|||
|
||||
List<String> names = new ArrayList<String>();
|
||||
names.add("a.b.c");
|
||||
names.add("1.2.3");
|
||||
names.add("1.2.3.4");
|
||||
|
||||
List<String> result1 = mapping.resolve(names);
|
||||
assertEquals(names.size(), result1.size());
|
||||
|
@ -96,7 +96,7 @@ public class TestTableMapping {
|
|||
|
||||
List<String> names = new ArrayList<String>();
|
||||
names.add("a.b.c");
|
||||
names.add("1.2.3");
|
||||
names.add("1.2.3.4");
|
||||
|
||||
List<String> result = mapping.resolve(names);
|
||||
assertEquals(names.size(), result.size());
|
||||
|
@ -114,7 +114,7 @@ public class TestTableMapping {
|
|||
|
||||
List<String> names = new ArrayList<String>();
|
||||
names.add("a.b.c");
|
||||
names.add("1.2.3");
|
||||
names.add("1.2.3.4");
|
||||
|
||||
List<String> result = mapping.resolve(names);
|
||||
assertEquals(names.size(), result.size());
|
||||
|
@ -134,7 +134,7 @@ public class TestTableMapping {
|
|||
|
||||
List<String> names = new ArrayList<String>();
|
||||
names.add("a.b.c");
|
||||
names.add("1.2.3");
|
||||
names.add("1.2.3.4");
|
||||
|
||||
List<String> result = mapping.resolve(names);
|
||||
assertEquals(names.size(), result.size());
|
||||
|
|
|
@ -18,11 +18,15 @@
|
|||
|
||||
package org.apache.hadoop.security.token;
|
||||
|
||||
import static junit.framework.Assert.assertEquals;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.io.*;
|
||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
||||
import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
|
||||
import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
|
@ -95,4 +99,19 @@ public class TestToken extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDecodeIdentifier() throws IOException {
|
||||
TestDelegationTokenSecretManager secretManager =
|
||||
new TestDelegationTokenSecretManager(0, 0, 0, 0);
|
||||
secretManager.startThreads();
|
||||
TestDelegationTokenIdentifier id = new TestDelegationTokenIdentifier(
|
||||
new Text("owner"), new Text("renewer"), new Text("realUser"));
|
||||
|
||||
Token<TestDelegationTokenIdentifier> token =
|
||||
new Token<TestDelegationTokenIdentifier>(id, secretManager);
|
||||
TokenIdentifier idCopy = token.decodeIdentifier();
|
||||
|
||||
assertNotSame(id, idCopy);
|
||||
assertEquals(id, idCopy);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.util;
|
|||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestVersionUtil {
|
||||
|
@ -30,6 +29,8 @@ public class TestVersionUtil {
|
|||
assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
|
||||
assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
|
||||
assertEquals(0, VersionUtil.compareVersions("1", "1"));
|
||||
assertEquals(0, VersionUtil.compareVersions(
|
||||
"2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT"));
|
||||
|
||||
// Assert that lower versions are lower, and higher versions are higher.
|
||||
assertExpectedValues("1", "2.0.0");
|
||||
|
@ -52,6 +53,13 @@ public class TestVersionUtil {
|
|||
assertExpectedValues("1.0.0a2", "1.0.0a10");
|
||||
assertExpectedValues("1.0", "1.a");
|
||||
assertExpectedValues("1.0", "1.a0");
|
||||
|
||||
// Snapshot builds precede their eventual releases.
|
||||
assertExpectedValues("1.0-SNAPSHOT", "1.0");
|
||||
assertExpectedValues("1.0", "1.0.0-SNAPSHOT");
|
||||
assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0");
|
||||
assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT");
|
||||
assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1");
|
||||
}
|
||||
|
||||
private static void assertExpectedValues(String lower, String higher) {
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
|
||||
org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier
|
|
@ -368,9 +368,6 @@ Release 2.0.0 - UNRELEASED
|
|||
HDFS-2505. Add a test to verify getFileChecksum(..) with ViewFS. (Ravi
|
||||
Prakash via szetszwo)
|
||||
|
||||
HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
|
||||
and epoch in JournalProtocol. (suresh via szetszwo)
|
||||
|
||||
HDFS-3240. Drop log level of "heartbeat: ..." in BPServiceActor to DEBUG
|
||||
(todd)
|
||||
|
||||
|
@ -419,6 +416,44 @@ Release 2.0.0 - UNRELEASED
|
|||
|
||||
HDFS-3339. Change INode to package private. (John George via szetszwo)
|
||||
|
||||
HDFS-3303. Remove Writable implementation from RemoteEditLogManifest.
|
||||
(Brandon Li via szetszwo)
|
||||
|
||||
HDFS-2617. Replaced Kerberized SSL for image transfer and fsck
|
||||
with SPNEGO-based solution. (jghoman, tucu, and atm via eli)
|
||||
|
||||
HDFS-3365. Enable users to disable socket caching in DFS client
|
||||
configuration (todd)
|
||||
|
||||
HDFS-3375. Put client name in DataXceiver thread name for readBlock
|
||||
and keepalive (todd)
|
||||
|
||||
HDFS-3363. Define BlockCollection and MutableBlockCollection interfaces
|
||||
so that INodeFile and INodeFileUnderConstruction do not have to be used in
|
||||
block management. (John George via szetszwo)
|
||||
|
||||
HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
|
||||
logging is enabled. (atm)
|
||||
|
||||
HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of
|
||||
final releases. (todd)
|
||||
|
||||
HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and
|
||||
BlocksMap to {get|set|add}BlockCollection(..). (John George via szetszwo)
|
||||
|
||||
HDFS-3134. harden edit log loader against malformed or malicious input.
|
||||
(Colin Patrick McCabe via eli)
|
||||
|
||||
HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
|
||||
|
||||
HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli)
|
||||
|
||||
HDFS-3400. DNs should be able start with jsvc even if security is disabled.
|
||||
(atm via eli)
|
||||
|
||||
HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
|
||||
and epoch in JournalProtocol. (suresh via szetszwo)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||
|
@ -432,6 +467,8 @@ Release 2.0.0 - UNRELEASED
|
|||
HDFS-2476. More CPU efficient data structure for under-replicated,
|
||||
over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
|
||||
|
||||
HDFS-3378. Remove DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY and DEFAULT. (eli)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol.
|
||||
|
@ -589,6 +626,33 @@ Release 2.0.0 - UNRELEASED
|
|||
HDFS-3330. If GetImageServlet throws an Error or RTE, response should not
|
||||
have HTTP "OK" status. (todd)
|
||||
|
||||
HDFS-3351. NameNode#initializeGenericKeys should always set fs.defaultFS
|
||||
regardless of whether HA or Federation is enabled. (atm)
|
||||
|
||||
HDFS-3359. DFSClient.close should close cached sockets. (todd)
|
||||
|
||||
HDFS-3350. In INode, add final to compareTo(..), equals(..) and hashCode(),
|
||||
and remove synchronized from updatePermissionStatus(..). (szetszwo)
|
||||
|
||||
HDFS-3357. DataXceiver reads from client socket with incorrect/no timeout
|
||||
(todd)
|
||||
|
||||
HDFS-3376. DFSClient fails to make connection to DN if there are many
|
||||
unusable cached sockets (todd)
|
||||
|
||||
HDFS-3328. NPE in DataNode.getIpcPort. (eli)
|
||||
|
||||
HDFS-3396. FUSE build fails on Ubuntu 12.04. (Colin Patrick McCabe via eli)
|
||||
|
||||
HDFS-3395. NN doesn't start with HA+security enabled and HTTP address
|
||||
set to 0.0.0.0. (atm)
|
||||
|
||||
HDFS-3385. The last block of INodeFileUnderConstruction is not
|
||||
necessarily a BlockInfoUnderConstruction, so do not cast it in
|
||||
FSNamesystem.recoverLeaseInternal(..). (szetszwo)
|
||||
|
||||
HDFS-3026. HA: Handle failure during HA state transition. (atm)
|
||||
|
||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||
|
||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||
|
|
|
@ -18,4 +18,5 @@ bin_PROGRAMS = fuse_dfs
|
|||
fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c fuse_impls_chown.c fuse_impls_create.c fuse_impls_flush.c fuse_impls_getattr.c fuse_impls_mkdir.c fuse_impls_mknod.c fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c fuse_impls_unlink.c fuse_impls_write.c
|
||||
AM_CFLAGS= -Wall -g
|
||||
AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/../../src/main/native -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
|
||||
AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm -lm
|
||||
AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib64 -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -L$(FUSE_HOME)/lib -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server
|
||||
fuse_dfs_LDADD=-lfuse -lhdfs -ljvm -lm
|
||||
|
|
|
@ -57,16 +57,21 @@ shift
|
|||
|
||||
# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
|
||||
if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
|
||||
if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
|
||||
HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
|
||||
fi
|
||||
if [ -n "$JSVC_HOME" ]; then
|
||||
if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
|
||||
HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
|
||||
fi
|
||||
|
||||
if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
|
||||
HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
|
||||
fi
|
||||
if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
|
||||
HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
|
||||
fi
|
||||
|
||||
HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
|
||||
starting_secure_dn="true"
|
||||
HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
|
||||
starting_secure_dn="true"
|
||||
else
|
||||
echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
|
||||
"isn't set. Falling back to starting insecure DN."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$COMMAND" = "namenode" ] ; then
|
||||
|
@ -129,12 +134,12 @@ if [ "$starting_secure_dn" = "true" ]; then
|
|||
if [ "$HADOOP_PID_DIR" = "" ]; then
|
||||
HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
|
||||
else
|
||||
HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
|
||||
HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
|
||||
fi
|
||||
|
||||
JSVC=$JSVC_HOME/jsvc
|
||||
if [ ! -f $JSVC ]; then
|
||||
echo "JSVC_HOME is not set correctly so jsvc can not be found. Jsvc is required to run secure datanodes. "
|
||||
echo "JSVC_HOME is not set correctly so jsvc cannot be found. Jsvc is required to run secure datanodes. "
|
||||
echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
|
||||
"and set JSVC_HOME to the directory containing the jsvc binary."
|
||||
exit
|
||||
|
|
|
@ -560,6 +560,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
void abort() {
|
||||
clientRunning = false;
|
||||
closeAllFilesBeingWritten(true);
|
||||
socketCache.clear();
|
||||
closeConnectionToNamenode();
|
||||
}
|
||||
|
||||
|
@ -597,6 +598,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
public synchronized void close() throws IOException {
|
||||
if(clientRunning) {
|
||||
closeAllFilesBeingWritten(false);
|
||||
socketCache.clear();
|
||||
clientRunning = false;
|
||||
leaserenewer.closeClient(this);
|
||||
// close connections to the namenode
|
||||
|
|
|
@ -99,8 +99,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY = "dfs.namenode.secondary.https-port";
|
||||
public static final int DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT = 50490;
|
||||
public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
|
||||
public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60;
|
||||
public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
|
||||
|
@ -147,7 +145,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained";
|
||||
public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M
|
||||
public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version";
|
||||
public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0";
|
||||
public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
|
||||
|
||||
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
|
||||
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
|
||||
|
@ -265,7 +263,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
|
||||
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT;
|
||||
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
|
||||
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0";
|
||||
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
|
||||
|
||||
public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
|
||||
public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;
|
||||
|
@ -319,10 +317,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.kerberos.principal";
|
||||
public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
|
||||
public static final String DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.kerberos.principal";
|
||||
public static final String DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.namenode.kerberos.https.principal";
|
||||
public static final String DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal";
|
||||
public static final String DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY = "dfs.secondary.namenode.keytab.file";
|
||||
public static final String DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal";
|
||||
public static final String DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal";
|
||||
public static final String DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal";
|
||||
public static final String DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
|
||||
public static final int DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
|
||||
|
||||
|
|
|
@ -864,7 +864,13 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
|
|||
// Allow retry since there is no way of knowing whether the cached socket
|
||||
// is good until we actually use it.
|
||||
for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) {
|
||||
Socket sock = socketCache.get(dnAddr);
|
||||
Socket sock = null;
|
||||
// Don't use the cache on the last attempt - it's possible that there
|
||||
// are arbitrarily many unusable sockets in the cache, but we don't
|
||||
// want to fail the read.
|
||||
if (retries < nCachedConnRetry) {
|
||||
sock = socketCache.get(dnAddr);
|
||||
}
|
||||
if (sock == null) {
|
||||
fromCache = false;
|
||||
|
||||
|
|
|
@ -714,8 +714,11 @@ public class DFSUtil {
|
|||
public static String substituteForWildcardAddress(String configuredAddress,
|
||||
String defaultHost) throws IOException {
|
||||
InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
|
||||
InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
|
||||
+ ":0");
|
||||
if (sockAddr.getAddress().isAnyLocalAddress()) {
|
||||
if(UserGroupInformation.isSecurityEnabled()) {
|
||||
if (UserGroupInformation.isSecurityEnabled() &&
|
||||
defaultSockAddr.getAddress().isAnyLocalAddress()) {
|
||||
throw new IOException("Cannot use a wildcard address with security. " +
|
||||
"Must explicitly set bind address for Kerberos");
|
||||
}
|
||||
|
|
|
@ -81,7 +81,6 @@ public class HdfsConfiguration extends Configuration {
|
|||
deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
|
||||
deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
|
||||
deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
||||
deprecate("dfs.secondary.https.port", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY);
|
||||
deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
|
||||
deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
|
||||
deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
|
||||
|
|
|
@ -144,7 +144,7 @@ public class HftpFileSystem extends FileSystem
|
|||
}
|
||||
|
||||
protected URI getNamenodeSecureUri(URI uri) {
|
||||
return DFSUtil.createUri("https", getNamenodeSecureAddr(uri));
|
||||
return DFSUtil.createUri("http", getNamenodeSecureAddr(uri));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -247,7 +247,7 @@ public class HftpFileSystem extends FileSystem
|
|||
c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
|
||||
} catch (Exception e) {
|
||||
LOG.info("Couldn't get a delegation token from " + nnHttpUrl +
|
||||
" using https.");
|
||||
" using http.");
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("error was ", e);
|
||||
}
|
||||
|
@ -686,11 +686,11 @@ public class HftpFileSystem extends FileSystem
|
|||
Configuration conf) throws IOException {
|
||||
// update the kerberos credentials, if they are coming from a keytab
|
||||
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||
// use https to renew the token
|
||||
// use http to renew the token
|
||||
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
|
||||
return
|
||||
DelegationTokenFetcher.renewDelegationToken
|
||||
(DFSUtil.createUri("https", serviceAddr).toString(),
|
||||
(DFSUtil.createUri("http", serviceAddr).toString(),
|
||||
(Token<DelegationTokenIdentifier>) token);
|
||||
}
|
||||
|
||||
|
@ -700,10 +700,10 @@ public class HftpFileSystem extends FileSystem
|
|||
Configuration conf) throws IOException {
|
||||
// update the kerberos credentials, if they are coming from a keytab
|
||||
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
|
||||
// use https to cancel the token
|
||||
// use http to cancel the token
|
||||
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
|
||||
DelegationTokenFetcher.cancelDelegationToken
|
||||
(DFSUtil.createUri("https", serviceAddr).toString(),
|
||||
(DFSUtil.createUri("http", serviceAddr).toString(),
|
||||
(Token<DelegationTokenIdentifier>) token);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.util.DirectBufferPool;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.SocketInputStream;
|
||||
import org.apache.hadoop.net.SocketInputWrapper;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
||||
|
@ -450,11 +450,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
//
|
||||
// Get bytes in block, set streams
|
||||
//
|
||||
Preconditions.checkArgument(sock.getChannel() != null,
|
||||
"Socket %s does not have an associated Channel.",
|
||||
sock);
|
||||
SocketInputStream sin =
|
||||
(SocketInputStream)NetUtils.getInputStream(sock);
|
||||
SocketInputWrapper sin = NetUtils.getInputStream(sock);
|
||||
ReadableByteChannel ch = sin.getReadableByteChannel();
|
||||
DataInputStream in = new DataInputStream(sin);
|
||||
|
||||
BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
|
||||
|
@ -477,7 +474,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
}
|
||||
|
||||
return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
|
||||
sin, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
|
||||
ch, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
|
||||
}
|
||||
|
||||
static void checkSuccess(
|
||||
|
|
|
@ -47,6 +47,9 @@ class SocketCache {
|
|||
public SocketCache(int capacity) {
|
||||
multimap = LinkedListMultimap.create();
|
||||
this.capacity = capacity;
|
||||
if (capacity <= 0) {
|
||||
LOG.debug("SocketCache disabled in configuration.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -55,6 +58,10 @@ class SocketCache {
|
|||
* @return A socket with unknown state, possibly closed underneath. Or null.
|
||||
*/
|
||||
public synchronized Socket get(SocketAddress remote) {
|
||||
if (capacity <= 0) { // disabled
|
||||
return null;
|
||||
}
|
||||
|
||||
List<Socket> socklist = multimap.get(remote);
|
||||
if (socklist == null) {
|
||||
return null;
|
||||
|
@ -76,6 +83,12 @@ class SocketCache {
|
|||
* @param sock socket not used by anyone.
|
||||
*/
|
||||
public synchronized void put(Socket sock) {
|
||||
if (capacity <= 0) {
|
||||
// Cache disabled.
|
||||
IOUtils.closeSocket(sock);
|
||||
return;
|
||||
}
|
||||
|
||||
Preconditions.checkNotNull(sock);
|
||||
|
||||
SocketAddress remoteAddr = sock.getRemoteSocketAddress();
|
||||
|
|
|
@ -148,7 +148,8 @@ public class BlockTokenIdentifier extends TokenIdentifier {
|
|||
userId = WritableUtils.readString(in);
|
||||
blockPoolId = WritableUtils.readString(in);
|
||||
blockId = WritableUtils.readVLong(in);
|
||||
int length = WritableUtils.readVInt(in);
|
||||
int length = WritableUtils.readVIntInRange(in, 0,
|
||||
AccessMode.class.getEnumConstants().length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
modes.add(WritableUtils.readEnum(in, AccessMode.class));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
|
||||
/**
|
||||
* This interface is used by the block manager to expose a
|
||||
* few characteristics of a collection of Block/BlockUnderConstruction.
|
||||
*/
|
||||
public interface BlockCollection {
|
||||
/**
|
||||
* Get the last block of the collection.
|
||||
* Make sure it has the right type.
|
||||
*/
|
||||
public <T extends BlockInfo> T getLastBlock() throws IOException;
|
||||
|
||||
/**
|
||||
* Get content summary.
|
||||
*/
|
||||
public ContentSummary computeContentSummary();
|
||||
|
||||
/** @return the number of blocks */
|
||||
public int numBlocks();
|
||||
|
||||
public BlockInfo[] getBlocks();
|
||||
/**
|
||||
* Get preferred block size for the collection
|
||||
* @return preferred block size in bytes
|
||||
*/
|
||||
public long getPreferredBlockSize();
|
||||
|
||||
/**
|
||||
* Get block replication for the collection
|
||||
* @return block replication value
|
||||
*/
|
||||
public short getReplication();
|
||||
|
||||
/**
|
||||
* Get name of collection.
|
||||
*/
|
||||
public String getName();
|
||||
}
|
|
@ -22,18 +22,17 @@ import java.util.LinkedList;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightGSet;
|
||||
|
||||
/**
|
||||
* BlockInfo class maintains for a given block
|
||||
* the {@link INodeFile} it is part of and datanodes where the replicas of
|
||||
* the {@link BlockCollection} it is part of and datanodes where the replicas of
|
||||
* the block are stored.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class BlockInfo extends Block implements
|
||||
LightWeightGSet.LinkedElement {
|
||||
private INodeFile inode;
|
||||
private BlockCollection bc;
|
||||
|
||||
/** For implementing {@link LightWeightGSet.LinkedElement} interface */
|
||||
private LightWeightGSet.LinkedElement nextLinkedElement;
|
||||
|
@ -58,13 +57,13 @@ public class BlockInfo extends Block implements
|
|||
*/
|
||||
public BlockInfo(int replication) {
|
||||
this.triplets = new Object[3*replication];
|
||||
this.inode = null;
|
||||
this.bc = null;
|
||||
}
|
||||
|
||||
public BlockInfo(Block blk, int replication) {
|
||||
super(blk);
|
||||
this.triplets = new Object[3*replication];
|
||||
this.inode = null;
|
||||
this.bc = null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -73,16 +72,16 @@ public class BlockInfo extends Block implements
|
|||
* @param from BlockInfo to copy from.
|
||||
*/
|
||||
protected BlockInfo(BlockInfo from) {
|
||||
this(from, from.inode.getReplication());
|
||||
this.inode = from.inode;
|
||||
this(from, from.bc.getReplication());
|
||||
this.bc = from.bc;
|
||||
}
|
||||
|
||||
public INodeFile getINode() {
|
||||
return inode;
|
||||
public BlockCollection getBlockCollection() {
|
||||
return bc;
|
||||
}
|
||||
|
||||
public void setINode(INodeFile inode) {
|
||||
this.inode = inode;
|
||||
public void setBlockCollection(BlockCollection bc) {
|
||||
this.bc = bc;
|
||||
}
|
||||
|
||||
DatanodeDescriptor getDatanode(int index) {
|
||||
|
@ -335,7 +334,7 @@ public class BlockInfo extends Block implements
|
|||
BlockUCState s, DatanodeDescriptor[] targets) {
|
||||
if(isComplete()) {
|
||||
return new BlockInfoUnderConstruction(
|
||||
this, getINode().getReplication(), s, targets);
|
||||
this, getBlockCollection().getReplication(), s, targets);
|
||||
}
|
||||
// the block is already under construction
|
||||
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this;
|
||||
|
|
|
@ -234,7 +234,7 @@ public class BlockInfoUnderConstruction extends BlockInfo {
|
|||
blockRecoveryId = recoveryId;
|
||||
if (replicas.size() == 0) {
|
||||
NameNode.stateChangeLog.warn("BLOCK*"
|
||||
+ " INodeFileUnderConstruction.initLeaseRecovery:"
|
||||
+ " BlockInfoUnderConstruction.initLeaseRecovery:"
|
||||
+ " No blocks found, lease removed.");
|
||||
}
|
||||
|
||||
|
|
|
@ -55,8 +55,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.common.Util;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
|
@ -142,7 +140,7 @@ public class BlockManager {
|
|||
private final long replicationRecheckInterval;
|
||||
|
||||
/**
|
||||
* Mapping: Block -> { INode, datanodes, self ref }
|
||||
* Mapping: Block -> { BlockCollection, datanodes, self ref }
|
||||
* Updated only in response to client-sent information.
|
||||
*/
|
||||
final BlocksMap blocksMap;
|
||||
|
@ -192,7 +190,7 @@ public class BlockManager {
|
|||
public final short minReplication;
|
||||
/** Default number of replicas */
|
||||
public final int defaultReplication;
|
||||
/** The maximum number of entries returned by getCorruptInodes() */
|
||||
/** value returned by MAX_CORRUPT_FILES_RETURNED */
|
||||
final int maxCorruptFilesReturned;
|
||||
|
||||
/** variable to enable check for enough racks */
|
||||
|
@ -384,7 +382,7 @@ public class BlockManager {
|
|||
numReplicas.decommissionedReplicas();
|
||||
|
||||
if (block instanceof BlockInfo) {
|
||||
String fileName = ((BlockInfo)block).getINode().getFullPathName();
|
||||
String fileName = ((BlockInfo)block).getBlockCollection().getName();
|
||||
out.print(fileName + ": ");
|
||||
}
|
||||
// l: == live:, d: == decommissioned c: == corrupt e: == excess
|
||||
|
@ -454,17 +452,17 @@ public class BlockManager {
|
|||
* Commit the last block of the file and mark it as complete if it has
|
||||
* meets the minimum replication requirement
|
||||
*
|
||||
* @param fileINode file inode
|
||||
* @param bc block collection
|
||||
* @param commitBlock - contains client reported block length and generation
|
||||
* @return true if the last block is changed to committed state.
|
||||
* @throws IOException if the block does not have at least a minimal number
|
||||
* of replicas reported from data-nodes.
|
||||
*/
|
||||
public boolean commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode,
|
||||
public boolean commitOrCompleteLastBlock(MutableBlockCollection bc,
|
||||
Block commitBlock) throws IOException {
|
||||
if(commitBlock == null)
|
||||
return false; // not committing, this is a block allocation retry
|
||||
BlockInfo lastBlock = fileINode.getLastBlock();
|
||||
BlockInfo lastBlock = bc.getLastBlock();
|
||||
if(lastBlock == null)
|
||||
return false; // no blocks in file yet
|
||||
if(lastBlock.isComplete())
|
||||
|
@ -472,22 +470,22 @@ public class BlockManager {
|
|||
|
||||
final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock);
|
||||
if(countNodes(lastBlock).liveReplicas() >= minReplication)
|
||||
completeBlock(fileINode,fileINode.numBlocks()-1, false);
|
||||
completeBlock(bc, bc.numBlocks()-1, false);
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a specified block of the file to a complete block.
|
||||
* @param fileINode file
|
||||
* @param bc file
|
||||
* @param blkIndex block index in the file
|
||||
* @throws IOException if the block does not have at least a minimal number
|
||||
* of replicas reported from data-nodes.
|
||||
*/
|
||||
private BlockInfo completeBlock(final INodeFile fileINode,
|
||||
private BlockInfo completeBlock(final MutableBlockCollection bc,
|
||||
final int blkIndex, boolean force) throws IOException {
|
||||
if(blkIndex < 0)
|
||||
return null;
|
||||
BlockInfo curBlock = fileINode.getBlocks()[blkIndex];
|
||||
BlockInfo curBlock = bc.getBlocks()[blkIndex];
|
||||
if(curBlock.isComplete())
|
||||
return curBlock;
|
||||
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock;
|
||||
|
@ -500,7 +498,7 @@ public class BlockManager {
|
|||
"Cannot complete block: block has not been COMMITTED by the client");
|
||||
BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
|
||||
// replace penultimate block in file
|
||||
fileINode.setBlock(blkIndex, completeBlock);
|
||||
bc.setBlock(blkIndex, completeBlock);
|
||||
|
||||
// Since safe-mode only counts complete blocks, and we now have
|
||||
// one more complete block, we need to adjust the total up, and
|
||||
|
@ -516,12 +514,12 @@ public class BlockManager {
|
|||
return blocksMap.replaceBlock(completeBlock);
|
||||
}
|
||||
|
||||
private BlockInfo completeBlock(final INodeFile fileINode,
|
||||
private BlockInfo completeBlock(final MutableBlockCollection bc,
|
||||
final BlockInfo block, boolean force) throws IOException {
|
||||
BlockInfo[] fileBlocks = fileINode.getBlocks();
|
||||
BlockInfo[] fileBlocks = bc.getBlocks();
|
||||
for(int idx = 0; idx < fileBlocks.length; idx++)
|
||||
if(fileBlocks[idx] == block) {
|
||||
return completeBlock(fileINode, idx, force);
|
||||
return completeBlock(bc, idx, force);
|
||||
}
|
||||
return block;
|
||||
}
|
||||
|
@ -531,10 +529,10 @@ public class BlockManager {
|
|||
* regardless of whether enough replicas are present. This is necessary
|
||||
* when tailing edit logs as a Standby.
|
||||
*/
|
||||
public BlockInfo forceCompleteBlock(final INodeFile fileINode,
|
||||
public BlockInfo forceCompleteBlock(final MutableBlockCollection bc,
|
||||
final BlockInfoUnderConstruction block) throws IOException {
|
||||
block.commitBlock(block);
|
||||
return completeBlock(fileINode, block, true);
|
||||
return completeBlock(bc, block, true);
|
||||
}
|
||||
|
||||
|
||||
|
@ -548,14 +546,14 @@ public class BlockManager {
|
|||
* The methods returns null if there is no partial block at the end.
|
||||
* The client is supposed to allocate a new block with the next call.
|
||||
*
|
||||
* @param fileINode file
|
||||
* @param bc file
|
||||
* @return the last block locations if the block is partial or null otherwise
|
||||
*/
|
||||
public LocatedBlock convertLastBlockToUnderConstruction(
|
||||
INodeFileUnderConstruction fileINode) throws IOException {
|
||||
BlockInfo oldBlock = fileINode.getLastBlock();
|
||||
MutableBlockCollection bc) throws IOException {
|
||||
BlockInfo oldBlock = bc.getLastBlock();
|
||||
if(oldBlock == null ||
|
||||
fileINode.getPreferredBlockSize() == oldBlock.getNumBytes())
|
||||
bc.getPreferredBlockSize() == oldBlock.getNumBytes())
|
||||
return null;
|
||||
assert oldBlock == getStoredBlock(oldBlock) :
|
||||
"last block of the file is not in blocksMap";
|
||||
|
@ -563,7 +561,7 @@ public class BlockManager {
|
|||
DatanodeDescriptor[] targets = getNodes(oldBlock);
|
||||
|
||||
BlockInfoUnderConstruction ucBlock =
|
||||
fileINode.setLastBlock(oldBlock, targets);
|
||||
bc.setLastBlock(oldBlock, targets);
|
||||
blocksMap.replaceBlock(ucBlock);
|
||||
|
||||
// Remove block from replication queue.
|
||||
|
@ -583,7 +581,7 @@ public class BlockManager {
|
|||
// always decrement total blocks
|
||||
-1);
|
||||
|
||||
final long fileLength = fileINode.computeContentSummary().getLength();
|
||||
final long fileLength = bc.computeContentSummary().getLength();
|
||||
final long pos = fileLength - ucBlock.getNumBytes();
|
||||
return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
|
||||
}
|
||||
|
@ -923,8 +921,8 @@ public class BlockManager {
|
|||
" does not exist. ");
|
||||
}
|
||||
|
||||
INodeFile inode = storedBlock.getINode();
|
||||
if (inode == null) {
|
||||
BlockCollection bc = storedBlock.getBlockCollection();
|
||||
if (bc == null) {
|
||||
NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
|
||||
"block " + storedBlock +
|
||||
" could not be marked as corrupt as it" +
|
||||
|
@ -938,7 +936,7 @@ public class BlockManager {
|
|||
|
||||
// Add this replica to corruptReplicas Map
|
||||
corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason);
|
||||
if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
|
||||
if (countNodes(storedBlock).liveReplicas() >= bc.getReplication()) {
|
||||
// the block is over-replicated so invalidate the replicas immediately
|
||||
invalidateBlock(storedBlock, node);
|
||||
} else if (namesystem.isPopulatingReplQueues()) {
|
||||
|
@ -1051,7 +1049,7 @@ public class BlockManager {
|
|||
int requiredReplication, numEffectiveReplicas;
|
||||
List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
|
||||
DatanodeDescriptor srcNode;
|
||||
INodeFile fileINode = null;
|
||||
BlockCollection bc = null;
|
||||
int additionalReplRequired;
|
||||
|
||||
int scheduledWork = 0;
|
||||
|
@ -1063,15 +1061,15 @@ public class BlockManager {
|
|||
for (int priority = 0; priority < blocksToReplicate.size(); priority++) {
|
||||
for (Block block : blocksToReplicate.get(priority)) {
|
||||
// block should belong to a file
|
||||
fileINode = blocksMap.getINode(block);
|
||||
bc = blocksMap.getBlockCollection(block);
|
||||
// abandoned block or block reopened for append
|
||||
if(fileINode == null || fileINode.isUnderConstruction()) {
|
||||
if(bc == null || bc instanceof MutableBlockCollection) {
|
||||
neededReplications.remove(block, priority); // remove from neededReplications
|
||||
neededReplications.decrementReplicationIndex(priority);
|
||||
continue;
|
||||
}
|
||||
|
||||
requiredReplication = fileINode.getReplication();
|
||||
requiredReplication = bc.getReplication();
|
||||
|
||||
// get a source data-node
|
||||
containingNodes = new ArrayList<DatanodeDescriptor>();
|
||||
|
@ -1107,7 +1105,7 @@ public class BlockManager {
|
|||
} else {
|
||||
additionalReplRequired = 1; // Needed on a new rack
|
||||
}
|
||||
work.add(new ReplicationWork(block, fileINode, srcNode,
|
||||
work.add(new ReplicationWork(block, bc, srcNode,
|
||||
containingNodes, liveReplicaNodes, additionalReplRequired,
|
||||
priority));
|
||||
}
|
||||
|
@ -1129,8 +1127,8 @@ public class BlockManager {
|
|||
|
||||
// choose replication targets: NOT HOLDING THE GLOBAL LOCK
|
||||
// It is costly to extract the filename for which chooseTargets is called,
|
||||
// so for now we pass in the Inode itself.
|
||||
rw.targets = blockplacement.chooseTarget(rw.fileINode,
|
||||
// so for now we pass in the block collection itself.
|
||||
rw.targets = blockplacement.chooseTarget(rw.bc,
|
||||
rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes,
|
||||
excludedNodes, rw.block.getNumBytes());
|
||||
}
|
||||
|
@ -1149,15 +1147,15 @@ public class BlockManager {
|
|||
int priority = rw.priority;
|
||||
// Recheck since global lock was released
|
||||
// block should belong to a file
|
||||
fileINode = blocksMap.getINode(block);
|
||||
bc = blocksMap.getBlockCollection(block);
|
||||
// abandoned block or block reopened for append
|
||||
if(fileINode == null || fileINode.isUnderConstruction()) {
|
||||
if(bc == null || bc instanceof MutableBlockCollection) {
|
||||
neededReplications.remove(block, priority); // remove from neededReplications
|
||||
rw.targets = null;
|
||||
neededReplications.decrementReplicationIndex(priority);
|
||||
continue;
|
||||
}
|
||||
requiredReplication = fileINode.getReplication();
|
||||
requiredReplication = bc.getReplication();
|
||||
|
||||
// do not schedule more if enough replicas is already pending
|
||||
NumberReplicas numReplicas = countNodes(block);
|
||||
|
@ -1916,7 +1914,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
int numCurrentReplica = countLiveNodes(storedBlock);
|
||||
if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
|
||||
&& numCurrentReplica >= minReplication) {
|
||||
completeBlock(storedBlock.getINode(), storedBlock, false);
|
||||
completeBlock((MutableBlockCollection)storedBlock.getBlockCollection(), storedBlock, false);
|
||||
} else if (storedBlock.isComplete()) {
|
||||
// check whether safe replication is reached for the block
|
||||
// only complete blocks are counted towards that.
|
||||
|
@ -1944,7 +1942,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
} else {
|
||||
storedBlock = block;
|
||||
}
|
||||
if (storedBlock == null || storedBlock.getINode() == null) {
|
||||
if (storedBlock == null || storedBlock.getBlockCollection() == null) {
|
||||
// If this block does not belong to anyfile, then we are done.
|
||||
NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
|
||||
+ node + " size " + block.getNumBytes()
|
||||
|
@ -1954,8 +1952,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
return block;
|
||||
}
|
||||
assert storedBlock != null : "Block must be stored by now";
|
||||
INodeFile fileINode = storedBlock.getINode();
|
||||
assert fileINode != null : "Block must belong to a file";
|
||||
BlockCollection bc = storedBlock.getBlockCollection();
|
||||
assert bc != null : "Block must belong to a file";
|
||||
|
||||
// add block to the datanode
|
||||
boolean added = node.addBlock(storedBlock);
|
||||
|
@ -1981,7 +1979,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
|
||||
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
|
||||
numLiveReplicas >= minReplication) {
|
||||
storedBlock = completeBlock(fileINode, storedBlock, false);
|
||||
storedBlock = completeBlock((MutableBlockCollection)bc, storedBlock, false);
|
||||
} else if (storedBlock.isComplete()) {
|
||||
// check whether safe replication is reached for the block
|
||||
// only complete blocks are counted towards that
|
||||
|
@ -1992,7 +1990,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
}
|
||||
|
||||
// if file is under construction, then done for now
|
||||
if (fileINode.isUnderConstruction()) {
|
||||
if (bc instanceof MutableBlockCollection) {
|
||||
return storedBlock;
|
||||
}
|
||||
|
||||
|
@ -2002,7 +2000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
}
|
||||
|
||||
// handle underReplication/overReplication
|
||||
short fileReplication = fileINode.getReplication();
|
||||
short fileReplication = bc.getReplication();
|
||||
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
|
||||
neededReplications.remove(storedBlock, numCurrentReplica,
|
||||
num.decommissionedReplicas(), fileReplication);
|
||||
|
@ -2129,8 +2127,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
* what happened with it.
|
||||
*/
|
||||
private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
|
||||
INodeFile fileINode = block.getINode();
|
||||
if (fileINode == null) {
|
||||
BlockCollection bc = block.getBlockCollection();
|
||||
if (bc == null) {
|
||||
// block does not belong to any file
|
||||
addToInvalidates(block);
|
||||
return MisReplicationResult.INVALID;
|
||||
|
@ -2141,7 +2139,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
return MisReplicationResult.UNDER_CONSTRUCTION;
|
||||
}
|
||||
// calculate current replication
|
||||
short expectedReplication = fileINode.getReplication();
|
||||
short expectedReplication = bc.getReplication();
|
||||
NumberReplicas num = countNodes(block);
|
||||
int numCurrentReplica = num.liveReplicas();
|
||||
// add to under-replicated queue if need to be
|
||||
|
@ -2258,7 +2256,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
BlockPlacementPolicy replicator) {
|
||||
assert namesystem.hasWriteLock();
|
||||
// first form a rack to datanodes map and
|
||||
INodeFile inode = getINode(b);
|
||||
BlockCollection bc = getBlockCollection(b);
|
||||
final Map<String, List<DatanodeDescriptor>> rackMap
|
||||
= new HashMap<String, List<DatanodeDescriptor>>();
|
||||
for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
|
||||
|
@ -2298,7 +2296,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
|| (addedNode != null && !priSet.contains(addedNode))) ) {
|
||||
cur = delNodeHint;
|
||||
} else { // regular excessive replica removal
|
||||
cur = replicator.chooseReplicaToDelete(inode, b, replication,
|
||||
cur = replicator.chooseReplicaToDelete(bc, b, replication,
|
||||
priSet, remains);
|
||||
}
|
||||
firstOne = false;
|
||||
|
@ -2379,8 +2377,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
// necessary. In that case, put block on a possibly-will-
|
||||
// be-replicated list.
|
||||
//
|
||||
INodeFile fileINode = blocksMap.getINode(block);
|
||||
if (fileINode != null) {
|
||||
BlockCollection bc = blocksMap.getBlockCollection(block);
|
||||
if (bc != null) {
|
||||
namesystem.decrementSafeBlockCount(block);
|
||||
updateNeededReplications(block, -1, 0);
|
||||
}
|
||||
|
@ -2611,7 +2609,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
NumberReplicas num) {
|
||||
int curReplicas = num.liveReplicas();
|
||||
int curExpectedReplicas = getReplication(block);
|
||||
INodeFile fileINode = blocksMap.getINode(block);
|
||||
BlockCollection bc = blocksMap.getBlockCollection(block);
|
||||
Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
|
||||
StringBuilder nodeList = new StringBuilder();
|
||||
while (nodeIter.hasNext()) {
|
||||
|
@ -2624,7 +2622,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
+ ", corrupt replicas: " + num.corruptReplicas()
|
||||
+ ", decommissioned replicas: " + num.decommissionedReplicas()
|
||||
+ ", excess replicas: " + num.excessReplicas()
|
||||
+ ", Is Open File: " + fileINode.isUnderConstruction()
|
||||
+ ", Is Open File: " + (bc instanceof MutableBlockCollection)
|
||||
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: "
|
||||
+ srcNode + ", Is current datanode decommissioning: "
|
||||
+ srcNode.isDecommissionInProgress());
|
||||
|
@ -2639,8 +2637,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
final Iterator<? extends Block> it = srcNode.getBlockIterator();
|
||||
while(it.hasNext()) {
|
||||
final Block block = it.next();
|
||||
INodeFile fileINode = blocksMap.getINode(block);
|
||||
short expectedReplication = fileINode.getReplication();
|
||||
BlockCollection bc = blocksMap.getBlockCollection(block);
|
||||
short expectedReplication = bc.getReplication();
|
||||
NumberReplicas num = countNodes(block);
|
||||
int numCurrentReplica = num.liveReplicas();
|
||||
if (numCurrentReplica > expectedReplication) {
|
||||
|
@ -2662,9 +2660,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
final Iterator<? extends Block> it = srcNode.getBlockIterator();
|
||||
while(it.hasNext()) {
|
||||
final Block block = it.next();
|
||||
INodeFile fileINode = blocksMap.getINode(block);
|
||||
BlockCollection bc = blocksMap.getBlockCollection(block);
|
||||
|
||||
if (fileINode != null) {
|
||||
if (bc != null) {
|
||||
NumberReplicas num = countNodes(block);
|
||||
int curReplicas = num.liveReplicas();
|
||||
int curExpectedReplicas = getReplication(block);
|
||||
|
@ -2679,7 +2677,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
|
||||
decommissionOnlyReplicas++;
|
||||
}
|
||||
if (fileINode.isUnderConstruction()) {
|
||||
if (bc instanceof MutableBlockCollection) {
|
||||
underReplicatedInOpenFiles++;
|
||||
}
|
||||
}
|
||||
|
@ -2782,12 +2780,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
|
||||
/* get replication factor of a block */
|
||||
private int getReplication(Block block) {
|
||||
INodeFile fileINode = blocksMap.getINode(block);
|
||||
if (fileINode == null) { // block does not belong to any file
|
||||
BlockCollection bc = blocksMap.getBlockCollection(block);
|
||||
if (bc == null) { // block does not belong to any file
|
||||
return 0;
|
||||
}
|
||||
assert !fileINode.isDirectory() : "Block cannot belong to a directory.";
|
||||
return fileINode.getReplication();
|
||||
return bc.getReplication();
|
||||
}
|
||||
|
||||
|
||||
|
@ -2859,12 +2856,12 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
return this.neededReplications.getCorruptBlockSize();
|
||||
}
|
||||
|
||||
public BlockInfo addINode(BlockInfo block, INodeFile iNode) {
|
||||
return blocksMap.addINode(block, iNode);
|
||||
public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) {
|
||||
return blocksMap.addBlockCollection(block, bc);
|
||||
}
|
||||
|
||||
public INodeFile getINode(Block b) {
|
||||
return blocksMap.getINode(b);
|
||||
public BlockCollection getBlockCollection(Block b) {
|
||||
return blocksMap.getBlockCollection(b);
|
||||
}
|
||||
|
||||
/** @return an iterator of the datanodes. */
|
||||
|
@ -3003,7 +3000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
private static class ReplicationWork {
|
||||
|
||||
private Block block;
|
||||
private INodeFile fileINode;
|
||||
private BlockCollection bc;
|
||||
|
||||
private DatanodeDescriptor srcNode;
|
||||
private List<DatanodeDescriptor> containingNodes;
|
||||
|
@ -3014,14 +3011,14 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
|||
private int priority;
|
||||
|
||||
public ReplicationWork(Block block,
|
||||
INodeFile fileINode,
|
||||
BlockCollection bc,
|
||||
DatanodeDescriptor srcNode,
|
||||
List<DatanodeDescriptor> containingNodes,
|
||||
List<DatanodeDescriptor> liveReplicaNodes,
|
||||
int additionalReplRequired,
|
||||
int priority) {
|
||||
this.block = block;
|
||||
this.fileINode = fileINode;
|
||||
this.bc = bc;
|
||||
this.srcNode = srcNode;
|
||||
this.containingNodes = containingNodes;
|
||||
this.liveReplicaNodes = liveReplicaNodes;
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
import org.apache.hadoop.net.Node;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
@ -111,11 +110,11 @@ public abstract class BlockPlacementPolicy {
|
|||
* choose <i>numOfReplicas</i> data nodes for <i>writer</i>
|
||||
* If not, return as many as we can.
|
||||
* The base implemenatation extracts the pathname of the file from the
|
||||
* specified srcInode, but this could be a costly operation depending on the
|
||||
* specified srcBC, but this could be a costly operation depending on the
|
||||
* file system implementation. Concrete implementations of this class should
|
||||
* override this method to avoid this overhead.
|
||||
*
|
||||
* @param srcInode The inode of the file for which chooseTarget is being invoked.
|
||||
* @param srcBC block collection of file for which chooseTarget is invoked.
|
||||
* @param numOfReplicas additional number of replicas wanted.
|
||||
* @param writer the writer's machine, null if not in the cluster.
|
||||
* @param chosenNodes datanodes that have been chosen as targets.
|
||||
|
@ -123,13 +122,13 @@ public abstract class BlockPlacementPolicy {
|
|||
* @return array of DatanodeDescriptor instances chosen as target
|
||||
* and sorted as a pipeline.
|
||||
*/
|
||||
DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode,
|
||||
DatanodeDescriptor[] chooseTarget(BlockCollection srcBC,
|
||||
int numOfReplicas,
|
||||
DatanodeDescriptor writer,
|
||||
List<DatanodeDescriptor> chosenNodes,
|
||||
HashMap<Node, Node> excludedNodes,
|
||||
long blocksize) {
|
||||
return chooseTarget(srcInode.getFullPathName(), numOfReplicas, writer,
|
||||
return chooseTarget(srcBC.getName(), numOfReplicas, writer,
|
||||
chosenNodes, excludedNodes, blocksize);
|
||||
}
|
||||
|
||||
|
@ -150,7 +149,7 @@ public abstract class BlockPlacementPolicy {
|
|||
* Decide whether deleting the specified replica of the block still makes
|
||||
* the block conform to the configured block placement policy.
|
||||
*
|
||||
* @param srcInode The inode of the file to which the block-to-be-deleted belongs
|
||||
* @param srcBC block collection of file to which block-to-be-deleted belongs
|
||||
* @param block The block to be deleted
|
||||
* @param replicationFactor The required number of replicas for this block
|
||||
* @param existingReplicas The replica locations of this block that are present
|
||||
|
@ -159,7 +158,7 @@ public abstract class BlockPlacementPolicy {
|
|||
listed in the previous parameter.
|
||||
* @return the replica that is the best candidate for deletion
|
||||
*/
|
||||
abstract public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo srcInode,
|
||||
abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcBC,
|
||||
Block block,
|
||||
short replicationFactor,
|
||||
Collection<DatanodeDescriptor> existingReplicas,
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
import org.apache.hadoop.net.Node;
|
||||
import org.apache.hadoop.net.NodeBase;
|
||||
|
@ -547,7 +546,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
|
||||
public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
|
||||
Block block,
|
||||
short replicationFactor,
|
||||
Collection<DatanodeDescriptor> first,
|
||||
|
|
|
@ -20,13 +20,12 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
import java.util.Iterator;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.util.GSet;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightGSet;
|
||||
|
||||
/**
|
||||
* This class maintains the map from a block to its metadata.
|
||||
* block's metadata currently includes INode it belongs to and
|
||||
* block's metadata currently includes blockCollection it belongs to and
|
||||
* the datanodes that store the block.
|
||||
*/
|
||||
class BlocksMap {
|
||||
|
@ -93,21 +92,21 @@ class BlocksMap {
|
|||
blocks = null;
|
||||
}
|
||||
|
||||
INodeFile getINode(Block b) {
|
||||
BlockCollection getBlockCollection(Block b) {
|
||||
BlockInfo info = blocks.get(b);
|
||||
return (info != null) ? info.getINode() : null;
|
||||
return (info != null) ? info.getBlockCollection() : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add block b belonging to the specified file inode to the map.
|
||||
* Add block b belonging to the specified block collection to the map.
|
||||
*/
|
||||
BlockInfo addINode(BlockInfo b, INodeFile iNode) {
|
||||
BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) {
|
||||
BlockInfo info = blocks.get(b);
|
||||
if (info != b) {
|
||||
info = b;
|
||||
blocks.put(info);
|
||||
}
|
||||
info.setINode(iNode);
|
||||
info.setBlockCollection(bc);
|
||||
return info;
|
||||
}
|
||||
|
||||
|
@ -121,7 +120,7 @@ class BlocksMap {
|
|||
if (blockInfo == null)
|
||||
return;
|
||||
|
||||
blockInfo.setINode(null);
|
||||
blockInfo.setBlockCollection(null);
|
||||
for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
|
||||
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
|
||||
dn.removeBlock(blockInfo); // remove from the list and wipe the location
|
||||
|
@ -169,7 +168,7 @@ class BlocksMap {
|
|||
boolean removed = node.removeBlock(info);
|
||||
|
||||
if (info.getDatanode(0) == null // no datanodes left
|
||||
&& info.getINode() == null) { // does not belong to a file
|
||||
&& info.getBlockCollection() == null) { // does not belong to a file
|
||||
blocks.remove(b); // remove block from the map
|
||||
}
|
||||
return removed;
|
||||
|
|
|
@ -15,24 +15,30 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
|
||||
/**
|
||||
* This interface is used used the pluggable block placement policy
|
||||
* to expose a few characteristics of an Inode.
|
||||
* This interface is used by the block manager to expose a
|
||||
* few characteristics of a collection of Block/BlockUnderConstruction.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface FSInodeInfo {
|
||||
public interface MutableBlockCollection extends BlockCollection {
|
||||
/**
|
||||
* Set block
|
||||
*/
|
||||
public void setBlock(int idx, BlockInfo blk);
|
||||
|
||||
/**
|
||||
* a string representation of an inode
|
||||
*
|
||||
* @return the full pathname (from root) that this inode represents
|
||||
* Convert the last block of the collection to an under-construction block.
|
||||
* Set its locations.
|
||||
*/
|
||||
|
||||
public String getFullPathName() ;
|
||||
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
|
||||
DatanodeDescriptor[] targets) throws IOException;
|
||||
}
|
||||
|
||||
|
|
@ -235,6 +235,9 @@ class BPServiceActor implements Runnable {
|
|||
}
|
||||
|
||||
void reportBadBlocks(ExtendedBlock block) {
|
||||
if (bpRegistration == null) {
|
||||
return;
|
||||
}
|
||||
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
|
||||
LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) };
|
||||
|
||||
|
|
|
@ -860,7 +860,7 @@ public class DataNode extends Configured
|
|||
*/
|
||||
public String getDisplayName() {
|
||||
// NB: our DatanodeID may not be set yet
|
||||
return hostName + ":" + getIpcPort();
|
||||
return hostName + ":" + getXferPort();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -877,7 +877,6 @@ public class DataNode extends Configured
|
|||
/**
|
||||
* @return the datanode's IPC port
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public int getIpcPort() {
|
||||
return ipcServer.getListenerAddress().getPort();
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.MD5Hash;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.SocketInputWrapper;
|
||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
@ -83,13 +84,30 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
private final DataXceiverServer dataXceiverServer;
|
||||
|
||||
private long opStartTime; //the start time of receiving an Op
|
||||
private final SocketInputWrapper socketInputWrapper;
|
||||
|
||||
public DataXceiver(Socket s, DataNode datanode,
|
||||
/**
|
||||
* Client Name used in previous operation. Not available on first request
|
||||
* on the socket.
|
||||
*/
|
||||
private String previousOpClientName;
|
||||
|
||||
public static DataXceiver create(Socket s, DataNode dn,
|
||||
DataXceiverServer dataXceiverServer) throws IOException {
|
||||
|
||||
SocketInputWrapper iw = NetUtils.getInputStream(s);
|
||||
return new DataXceiver(s, iw, dn, dataXceiverServer);
|
||||
}
|
||||
|
||||
private DataXceiver(Socket s,
|
||||
SocketInputWrapper socketInput,
|
||||
DataNode datanode,
|
||||
DataXceiverServer dataXceiverServer) throws IOException {
|
||||
super(new DataInputStream(new BufferedInputStream(
|
||||
NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE)));
|
||||
socketInput, HdfsConstants.SMALL_BUFFER_SIZE)));
|
||||
|
||||
this.s = s;
|
||||
this.socketInputWrapper = socketInput;
|
||||
this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
|
||||
this.datanode = datanode;
|
||||
this.dnConf = datanode.getDnConf();
|
||||
|
@ -110,7 +128,11 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
*/
|
||||
private void updateCurrentThreadName(String status) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("DataXceiver for client ").append(remoteAddress);
|
||||
sb.append("DataXceiver for client ");
|
||||
if (previousOpClientName != null) {
|
||||
sb.append(previousOpClientName).append(" at ");
|
||||
}
|
||||
sb.append(remoteAddress);
|
||||
if (status != null) {
|
||||
sb.append(" [").append(status).append("]");
|
||||
}
|
||||
|
@ -128,8 +150,6 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
Op op = null;
|
||||
dataXceiverServer.childSockets.add(s);
|
||||
try {
|
||||
int stdTimeout = s.getSoTimeout();
|
||||
|
||||
// We process requests in a loop, and stay around for a short timeout.
|
||||
// This optimistic behaviour allows the other end to reuse connections.
|
||||
// Setting keepalive timeout to 0 disable this behavior.
|
||||
|
@ -139,7 +159,9 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
try {
|
||||
if (opsProcessed != 0) {
|
||||
assert dnConf.socketKeepaliveTimeout > 0;
|
||||
s.setSoTimeout(dnConf.socketKeepaliveTimeout);
|
||||
socketInputWrapper.setTimeout(dnConf.socketKeepaliveTimeout);
|
||||
} else {
|
||||
socketInputWrapper.setTimeout(dnConf.socketTimeout);
|
||||
}
|
||||
op = readOp();
|
||||
} catch (InterruptedIOException ignored) {
|
||||
|
@ -160,7 +182,7 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
|
||||
// restore normal timeout
|
||||
if (opsProcessed != 0) {
|
||||
s.setSoTimeout(stdTimeout);
|
||||
s.setSoTimeout(dnConf.socketTimeout);
|
||||
}
|
||||
|
||||
opStartTime = now();
|
||||
|
@ -190,6 +212,8 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
final String clientName,
|
||||
final long blockOffset,
|
||||
final long length) throws IOException {
|
||||
previousOpClientName = clientName;
|
||||
|
||||
OutputStream baseStream = NetUtils.getOutputStream(s,
|
||||
dnConf.socketWriteTimeout);
|
||||
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
||||
|
@ -283,7 +307,8 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
final long maxBytesRcvd,
|
||||
final long latestGenerationStamp,
|
||||
DataChecksum requestedChecksum) throws IOException {
|
||||
updateCurrentThreadName("Receiving block " + block + " client=" + clientname);
|
||||
previousOpClientName = clientname;
|
||||
updateCurrentThreadName("Receiving block " + block);
|
||||
final boolean isDatanode = clientname.length() == 0;
|
||||
final boolean isClient = !isDatanode;
|
||||
final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW
|
||||
|
@ -490,7 +515,7 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
final DatanodeInfo[] targets) throws IOException {
|
||||
checkAccess(null, true, blk, blockToken,
|
||||
Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
|
||||
|
||||
previousOpClientName = clientName;
|
||||
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
|
||||
|
||||
final DataOutputStream out = new DataOutputStream(
|
||||
|
|
|
@ -135,6 +135,7 @@ class DataXceiverServer implements Runnable {
|
|||
try {
|
||||
s = ss.accept();
|
||||
s.setTcpNoDelay(true);
|
||||
// Timeouts are set within DataXceiver.run()
|
||||
|
||||
// Make sure the xceiver count is not exceeded
|
||||
int curXceiverCount = datanode.getXceiverCount();
|
||||
|
@ -144,7 +145,8 @@ class DataXceiverServer implements Runnable {
|
|||
+ maxXceiverCount);
|
||||
}
|
||||
|
||||
new Daemon(datanode.threadGroup, new DataXceiver(s, datanode, this))
|
||||
new Daemon(datanode.threadGroup,
|
||||
DataXceiver.create(s, datanode, this))
|
||||
.start();
|
||||
} catch (SocketTimeoutException ignored) {
|
||||
// wake up to see if should continue to run
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.http.HttpServer;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.mortbay.jetty.nio.SelectChannelConnector;
|
||||
|
||||
/**
|
||||
|
@ -60,10 +61,7 @@ public class SecureDataNodeStarter implements Daemon {
|
|||
@Override
|
||||
public void init(DaemonContext context) throws Exception {
|
||||
System.err.println("Initializing secure datanode resources");
|
||||
// We should only start up a secure datanode in a Kerberos-secured cluster
|
||||
Configuration conf = new Configuration(); // Skip UGI method to not log in
|
||||
if(!conf.get(HADOOP_SECURITY_AUTHENTICATION).equals("kerberos"))
|
||||
throw new RuntimeException("Cannot start secure datanode in unsecure cluster");
|
||||
Configuration conf = new Configuration();
|
||||
|
||||
// Stash command-line arguments for regular datanode
|
||||
args = context.getArguments();
|
||||
|
@ -98,7 +96,8 @@ public class SecureDataNodeStarter implements Daemon {
|
|||
System.err.println("Successfully obtained privileged resources (streaming port = "
|
||||
+ ss + " ) (http listener port = " + listener.getConnection() +")");
|
||||
|
||||
if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) {
|
||||
if ((ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) &&
|
||||
UserGroupInformation.isSecurityEnabled()) {
|
||||
throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
|
||||
}
|
||||
System.err.println("Opened streaming server at " + streamingAddr);
|
||||
|
|
|
@ -309,7 +309,7 @@ public class FSDirectory implements Closeable {
|
|||
INodeFile newF = (INodeFile)newNode;
|
||||
BlockInfo[] blocks = newF.getBlocks();
|
||||
for (int i = 0; i < blocks.length; i++) {
|
||||
newF.setBlock(i, getBlockManager().addINode(blocks[i], newF));
|
||||
newF.setBlock(i, getBlockManager().addBlockCollection(blocks[i], newF));
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
@ -346,7 +346,7 @@ public class FSDirectory implements Closeable {
|
|||
fileINode.getReplication(),
|
||||
BlockUCState.UNDER_CONSTRUCTION,
|
||||
targets);
|
||||
getBlockManager().addINode(blockInfo, fileINode);
|
||||
getBlockManager().addBlockCollection(blockInfo, fileINode);
|
||||
fileINode.addBlock(blockInfo);
|
||||
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
|
@ -1127,7 +1127,7 @@ public class FSDirectory implements Closeable {
|
|||
|
||||
int index = 0;
|
||||
for (BlockInfo b : newnode.getBlocks()) {
|
||||
BlockInfo info = getBlockManager().addINode(b, newnode);
|
||||
BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
|
||||
newnode.setBlock(index, info); // inode refers to the block in BlocksMap
|
||||
index++;
|
||||
}
|
||||
|
|
|
@ -601,7 +601,7 @@ public class FSEditLogLoader {
|
|||
// OP_ADD operations as each block is allocated.
|
||||
newBI = new BlockInfo(newBlock, file.getReplication());
|
||||
}
|
||||
fsNamesys.getBlockManager().addINode(newBI, file);
|
||||
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
|
||||
file.addBlock(newBI);
|
||||
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
|
||||
}
|
||||
|
|
|
@ -203,6 +203,10 @@ public abstract class FSEditLogOp {
|
|||
}
|
||||
|
||||
<T extends AddCloseOp> T setBlocks(Block[] blocks) {
|
||||
if (blocks.length > MAX_BLOCKS) {
|
||||
throw new RuntimeException("Can't have more than " + MAX_BLOCKS +
|
||||
" in an AddCloseOp.");
|
||||
}
|
||||
this.blocks = blocks;
|
||||
return (T)this;
|
||||
}
|
||||
|
@ -296,10 +300,18 @@ public abstract class FSEditLogOp {
|
|||
}
|
||||
}
|
||||
|
||||
static final public int MAX_BLOCKS = 1024 * 1024 * 64;
|
||||
|
||||
private static Block[] readBlocks(
|
||||
DataInputStream in,
|
||||
int logVersion) throws IOException {
|
||||
int numBlocks = in.readInt();
|
||||
if (numBlocks < 0) {
|
||||
throw new IOException("invalid negative number of blocks");
|
||||
} else if (numBlocks > MAX_BLOCKS) {
|
||||
throw new IOException("invalid number of blocks: " + numBlocks +
|
||||
". The maximum number of blocks per file is " + MAX_BLOCKS);
|
||||
}
|
||||
Block[] blocks = new Block[numBlocks];
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
Block blk = new Block();
|
||||
|
@ -579,6 +591,7 @@ public abstract class FSEditLogOp {
|
|||
String trg;
|
||||
String[] srcs;
|
||||
long timestamp;
|
||||
final static public int MAX_CONCAT_SRC = 1024 * 1024;
|
||||
|
||||
private ConcatDeleteOp() {
|
||||
super(OP_CONCAT_DELETE);
|
||||
|
@ -594,7 +607,12 @@ public abstract class FSEditLogOp {
|
|||
}
|
||||
|
||||
ConcatDeleteOp setSources(String[] srcs) {
|
||||
if (srcs.length > MAX_CONCAT_SRC) {
|
||||
throw new RuntimeException("ConcatDeleteOp can only have " +
|
||||
MAX_CONCAT_SRC + " sources at most.");
|
||||
}
|
||||
this.srcs = srcs;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -624,8 +642,8 @@ public abstract class FSEditLogOp {
|
|||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||
this.length = in.readInt();
|
||||
if (length < 3) { // trg, srcs.., timestamp
|
||||
throw new IOException("Incorrect data format. "
|
||||
+ "Concat delete operation.");
|
||||
throw new IOException("Incorrect data format " +
|
||||
"for ConcatDeleteOp.");
|
||||
}
|
||||
}
|
||||
this.trg = FSImageSerialization.readString(in);
|
||||
|
@ -635,6 +653,15 @@ public abstract class FSEditLogOp {
|
|||
} else {
|
||||
srcSize = this.length - 1 - 1; // trg and timestamp
|
||||
}
|
||||
if (srcSize < 0) {
|
||||
throw new IOException("Incorrect data format. "
|
||||
+ "ConcatDeleteOp cannot have a negative number of data " +
|
||||
" sources.");
|
||||
} else if (srcSize > MAX_CONCAT_SRC) {
|
||||
throw new IOException("Incorrect data format. "
|
||||
+ "ConcatDeleteOp can have at most " + MAX_CONCAT_SRC +
|
||||
" sources, but we tried to have " + (length - 3) + " sources.");
|
||||
}
|
||||
this.srcs = new String [srcSize];
|
||||
for(int i=0; i<srcSize;i++) {
|
||||
srcs[i]= FSImageSerialization.readString(in);
|
||||
|
|
|
@ -1783,24 +1783,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
"Failed to close file " + src +
|
||||
". Lease recovery is in progress. Try again later.");
|
||||
} else {
|
||||
BlockInfoUnderConstruction lastBlock=pendingFile.getLastBlock();
|
||||
if(lastBlock != null && lastBlock.getBlockUCState() ==
|
||||
BlockUCState.UNDER_RECOVERY) {
|
||||
throw new RecoveryInProgressException(
|
||||
"Recovery in progress, file [" + src + "], " +
|
||||
"lease owner [" + lease.getHolder() + "]");
|
||||
} else {
|
||||
throw new AlreadyBeingCreatedException(
|
||||
"Failed to create file [" + src + "] for [" + holder +
|
||||
"] on client [" + clientMachine +
|
||||
"], because this file is already being created by [" +
|
||||
pendingFile.getClientName() + "] on [" +
|
||||
pendingFile.getClientMachine() + "]");
|
||||
}
|
||||
}
|
||||
final BlockInfo lastBlock = pendingFile.getLastBlock();
|
||||
if (lastBlock != null
|
||||
&& lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
|
||||
throw new RecoveryInProgressException("Recovery in progress, file ["
|
||||
+ src + "], " + "lease owner [" + lease.getHolder() + "]");
|
||||
} else {
|
||||
throw new AlreadyBeingCreatedException("Failed to create file ["
|
||||
+ src + "] for [" + holder + "] on client [" + clientMachine
|
||||
+ "], because this file is already being created by ["
|
||||
+ pendingFile.getClientName() + "] on ["
|
||||
+ pendingFile.getClientMachine() + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2840,7 +2837,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
if (storedBlock == null) {
|
||||
throw new IOException("Block (=" + lastblock + ") not found");
|
||||
}
|
||||
INodeFile iFile = storedBlock.getINode();
|
||||
INodeFile iFile = (INodeFile) storedBlock.getBlockCollection();
|
||||
if (!iFile.isUnderConstruction() || storedBlock.isComplete()) {
|
||||
throw new IOException("Unexpected block (=" + lastblock
|
||||
+ ") since the file (=" + iFile.getLocalName()
|
||||
|
@ -4135,7 +4132,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* Returns whether the given block is one pointed-to by a file.
|
||||
*/
|
||||
private boolean isValidBlock(Block b) {
|
||||
return (blockManager.getINode(b) != null);
|
||||
return (blockManager.getBlockCollection(b) != null);
|
||||
}
|
||||
|
||||
// Distributed upgrade manager
|
||||
|
@ -4394,7 +4391,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
// check file inode
|
||||
INodeFile file = storedBlock.getINode();
|
||||
INodeFile file = (INodeFile) storedBlock.getBlockCollection();
|
||||
if (file==null || !file.isUnderConstruction()) {
|
||||
throw new IOException("The file " + storedBlock +
|
||||
" belonged to does not exist or it is not under construction.");
|
||||
|
@ -4556,7 +4553,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
if (destinationExisted && dinfo.isDir()) {
|
||||
Path spath = new Path(src);
|
||||
Path parent = spath.getParent();
|
||||
if (isRoot(parent)) {
|
||||
if (parent.isRoot()) {
|
||||
overwrite = parent.toString();
|
||||
} else {
|
||||
overwrite = parent.toString() + Path.SEPARATOR;
|
||||
|
@ -4570,10 +4567,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
leaseManager.changeLease(src, dst, overwrite, replaceBy);
|
||||
}
|
||||
|
||||
private boolean isRoot(Path path) {
|
||||
return path.getParent() == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes leases.
|
||||
*/
|
||||
|
@ -4710,7 +4703,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
while (blkIterator.hasNext()) {
|
||||
Block blk = blkIterator.next();
|
||||
INode inode = blockManager.getINode(blk);
|
||||
INode inode = (INodeFile) blockManager.getBlockCollection(blk);
|
||||
skip++;
|
||||
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
|
||||
String src = FSDirectory.getFullPathName(inode);
|
||||
|
|
|
@ -27,6 +27,8 @@ import javax.servlet.ServletException;
|
|||
import javax.servlet.http.HttpServlet;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -34,7 +36,6 @@ import org.apache.commons.logging.LogFactory;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
|
@ -83,11 +84,11 @@ public class GetImageServlet extends HttpServlet {
|
|||
(Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF);
|
||||
|
||||
if(UserGroupInformation.isSecurityEnabled() &&
|
||||
!isValidRequestor(request.getRemoteUser(), conf)) {
|
||||
!isValidRequestor(request.getUserPrincipal().getName(), conf)) {
|
||||
response.sendError(HttpServletResponse.SC_FORBIDDEN,
|
||||
"Only Namenode and Secondary Namenode may access this servlet");
|
||||
LOG.warn("Received non-NN/SNN request for image or edits from "
|
||||
+ request.getRemoteHost());
|
||||
+ request.getUserPrincipal().getName() + " at " + request.getRemoteHost());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -156,15 +157,10 @@ public class GetImageServlet extends HttpServlet {
|
|||
}
|
||||
|
||||
// issue a HTTP get request to download the new fsimage
|
||||
MD5Hash downloadImageDigest = reloginIfNecessary().doAs(
|
||||
new PrivilegedExceptionAction<MD5Hash>() {
|
||||
@Override
|
||||
public MD5Hash run() throws Exception {
|
||||
return TransferFsImage.downloadImageToStorage(
|
||||
MD5Hash downloadImageDigest =
|
||||
TransferFsImage.downloadImageToStorage(
|
||||
parsedParams.getInfoServer(), txid,
|
||||
nnImage.getStorage(), true);
|
||||
}
|
||||
});
|
||||
nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest);
|
||||
|
||||
// Now that we have a new checkpoint, we might be able to
|
||||
|
@ -176,18 +172,6 @@ public class GetImageServlet extends HttpServlet {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// We may have lost our ticket since the last time we tried to open
|
||||
// an http connection, so log in just in case.
|
||||
private UserGroupInformation reloginIfNecessary() throws IOException {
|
||||
// This method is only called on the NN, therefore it is safe to
|
||||
// use these key values.
|
||||
return UserGroupInformation.loginUserFromKeytabAndReturnUGI(
|
||||
SecurityUtil.getServerPrincipal(conf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
|
||||
NameNode.getAddress(conf).getHostName()),
|
||||
conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
|
||||
}
|
||||
});
|
||||
|
||||
} catch (Throwable t) {
|
||||
|
@ -232,18 +216,10 @@ public class GetImageServlet extends HttpServlet {
|
|||
|
||||
Set<String> validRequestors = new HashSet<String>();
|
||||
|
||||
validRequestors.add(
|
||||
SecurityUtil.getServerPrincipal(conf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode
|
||||
.getAddress(conf).getHostName()));
|
||||
validRequestors.add(
|
||||
SecurityUtil.getServerPrincipal(conf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode
|
||||
.getAddress(conf).getHostName()));
|
||||
validRequestors.add(
|
||||
SecurityUtil.getServerPrincipal(conf
|
||||
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
|
||||
SecondaryNameNode.getHttpAddress(conf).getHostName()));
|
||||
validRequestors.add(
|
||||
SecurityUtil.getServerPrincipal(conf
|
||||
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
|
||||
|
@ -251,10 +227,6 @@ public class GetImageServlet extends HttpServlet {
|
|||
|
||||
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
|
||||
Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
|
||||
validRequestors.add(
|
||||
SecurityUtil.getServerPrincipal(otherNnConf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
|
||||
NameNode.getAddress(otherNnConf).getHostName()));
|
||||
validRequestors.add(
|
||||
SecurityUtil.getServerPrincipal(otherNnConf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
|
||||
|
@ -263,11 +235,11 @@ public class GetImageServlet extends HttpServlet {
|
|||
|
||||
for(String v : validRequestors) {
|
||||
if(v != null && v.equals(remoteUser)) {
|
||||
if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing: " + remoteUser);
|
||||
if(LOG.isInfoEnabled()) LOG.info("GetImageServlet allowing: " + remoteUser);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is rejecting: " + remoteUser);
|
||||
if(LOG.isInfoEnabled()) LOG.info("GetImageServlet rejecting: " + remoteUser);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,13 +30,15 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.primitives.SignedBytes;
|
||||
|
||||
/**
|
||||
* We keep an in-memory representation of the file/block hierarchy.
|
||||
* This is a base INode class containing common fields for file and
|
||||
* directory inodes.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
abstract class INode implements Comparable<byte[]>, FSInodeInfo {
|
||||
abstract class INode implements Comparable<byte[]> {
|
||||
/*
|
||||
* The inode name is in java UTF8 encoding;
|
||||
* The name in HdfsFileStatus should keep the same encoding as this.
|
||||
|
@ -143,8 +145,7 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
|
|||
protected PermissionStatus getPermissionStatus() {
|
||||
return new PermissionStatus(getUserName(),getGroupName(),getFsPermission());
|
||||
}
|
||||
private synchronized void updatePermissionStatus(
|
||||
PermissionStatusFormat f, long n) {
|
||||
private void updatePermissionStatus(PermissionStatusFormat f, long n) {
|
||||
permission = f.combine(n, permission);
|
||||
}
|
||||
/** Get user name */
|
||||
|
@ -263,7 +264,6 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
|
|||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getFullPathName() {
|
||||
// Get the full path name of this inode.
|
||||
return FSDirectory.getFullPathName(this);
|
||||
|
@ -400,49 +400,31 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
|
|||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Comparable interface
|
||||
//
|
||||
public int compareTo(byte[] o) {
|
||||
return compareBytes(name, o);
|
||||
private static final byte[] EMPTY_BYTES = {};
|
||||
|
||||
@Override
|
||||
public final int compareTo(byte[] bytes) {
|
||||
final byte[] left = name == null? EMPTY_BYTES: name;
|
||||
final byte[] right = bytes == null? EMPTY_BYTES: bytes;
|
||||
return SignedBytes.lexicographicalComparator().compare(left, right);
|
||||
}
|
||||
|
||||
public boolean equals(Object o) {
|
||||
if (!(o instanceof INode)) {
|
||||
@Override
|
||||
public final boolean equals(Object that) {
|
||||
if (this == that) {
|
||||
return true;
|
||||
}
|
||||
if (that == null || !(that instanceof INode)) {
|
||||
return false;
|
||||
}
|
||||
return Arrays.equals(this.name, ((INode)o).name);
|
||||
return Arrays.equals(this.name, ((INode)that).name);
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
return Arrays.hashCode(this.name);
|
||||
}
|
||||
|
||||
//
|
||||
// static methods
|
||||
//
|
||||
/**
|
||||
* Compare two byte arrays.
|
||||
*
|
||||
* @return a negative integer, zero, or a positive integer
|
||||
* as defined by {@link #compareTo(byte[])}.
|
||||
*/
|
||||
static int compareBytes(byte[] a1, byte[] a2) {
|
||||
if (a1==a2)
|
||||
return 0;
|
||||
int len1 = (a1==null ? 0 : a1.length);
|
||||
int len2 = (a2==null ? 0 : a2.length);
|
||||
int n = Math.min(len1, len2);
|
||||
byte b1, b2;
|
||||
for (int i=0; i<n; i++) {
|
||||
b1 = a1[i];
|
||||
b2 = a2[i];
|
||||
if (b1 != b2)
|
||||
return b1 - b2;
|
||||
}
|
||||
return len1 - len2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an INode; the inode's name is not set yet
|
||||
*
|
||||
|
|
|
@ -173,9 +173,9 @@ class INodeDirectory extends INode {
|
|||
*/
|
||||
int getExistingPathINodes(byte[][] components, INode[] existing,
|
||||
boolean resolveLink) throws UnresolvedLinkException {
|
||||
assert compareBytes(this.name, components[0]) == 0 :
|
||||
"Incorrect name " + getLocalName() + " expected " +
|
||||
DFSUtil.bytes2String(components[0]);
|
||||
assert this.compareTo(components[0]) == 0 :
|
||||
"Incorrect name " + getLocalName() + " expected "
|
||||
+ (components[0] == null? null: DFSUtil.bytes2String(components[0]));
|
||||
|
||||
INode curNode = this;
|
||||
int count = 0;
|
||||
|
@ -317,8 +317,7 @@ class INodeDirectory extends INode {
|
|||
INode newNode,
|
||||
INodeDirectory parent,
|
||||
boolean propagateModTime
|
||||
) throws FileNotFoundException,
|
||||
UnresolvedLinkException {
|
||||
) throws FileNotFoundException {
|
||||
// insert into the parent children list
|
||||
newNode.name = localname;
|
||||
if(parent.addChild(newNode, propagateModTime) == null)
|
||||
|
|
|
@ -20,15 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
|
||||
|
||||
/** I-node for closed file. */
|
||||
public class INodeFile extends INode {
|
||||
@InterfaceAudience.Private
|
||||
public class INodeFile extends INode implements BlockCollection {
|
||||
static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
|
||||
|
||||
//Number of bits for Block size
|
||||
|
@ -128,7 +131,7 @@ public class INodeFile extends INode {
|
|||
}
|
||||
|
||||
for(BlockInfo bi: newlist) {
|
||||
bi.setINode(this);
|
||||
bi.setBlockCollection(this);
|
||||
}
|
||||
this.blocks = newlist;
|
||||
}
|
||||
|
@ -161,13 +164,19 @@ public class INodeFile extends INode {
|
|||
if(blocks != null && v != null) {
|
||||
for (BlockInfo blk : blocks) {
|
||||
v.add(blk);
|
||||
blk.setINode(null);
|
||||
blk.setBlockCollection(null);
|
||||
}
|
||||
}
|
||||
blocks = null;
|
||||
return 1;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
// Get the full path name of this inode.
|
||||
return getFullPathName();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
long[] computeContentSummary(long[] summary) {
|
||||
summary[0] += computeFileSize(true);
|
||||
|
|
|
@ -25,13 +25,15 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
/**
|
||||
* I-node for file being written.
|
||||
*/
|
||||
public class INodeFileUnderConstruction extends INodeFile {
|
||||
public class INodeFileUnderConstruction extends INodeFile
|
||||
implements MutableBlockCollection {
|
||||
private String clientName; // lease holder
|
||||
private final String clientMachine;
|
||||
private final DatanodeDescriptor clientNode; // if client is a cluster node too.
|
||||
|
@ -154,7 +156,7 @@ public class INodeFileUnderConstruction extends INodeFile {
|
|||
BlockInfoUnderConstruction ucBlock =
|
||||
lastBlock.convertToBlockUnderConstruction(
|
||||
BlockUCState.UNDER_CONSTRUCTION, targets);
|
||||
ucBlock.setINode(this);
|
||||
ucBlock.setBlockCollection(this);
|
||||
setBlock(numBlocks()-1, ucBlock);
|
||||
return ucBlock;
|
||||
}
|
||||
|
|
|
@ -174,10 +174,8 @@ public class NameNode {
|
|||
DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||
DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
||||
DFS_NAMENODE_HTTPS_ADDRESS_KEY,
|
||||
DFS_NAMENODE_KEYTAB_FILE_KEY,
|
||||
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
|
||||
DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
|
||||
DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
|
||||
DFS_NAMENODE_BACKUP_ADDRESS_KEY,
|
||||
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
|
||||
|
@ -229,6 +227,7 @@ public class NameNode {
|
|||
private final boolean haEnabled;
|
||||
private final HAContext haContext;
|
||||
protected boolean allowStaleStandbyReads;
|
||||
private Runtime runtime = Runtime.getRuntime();
|
||||
|
||||
|
||||
/** httpServer */
|
||||
|
@ -382,8 +381,9 @@ public class NameNode {
|
|||
}
|
||||
|
||||
protected void setHttpServerAddress(Configuration conf) {
|
||||
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
||||
NetUtils.getHostPortString(getHttpAddress()));
|
||||
String hostPort = NetUtils.getHostPortString(getHttpAddress());
|
||||
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, hostPort);
|
||||
LOG.info("Web-server up at: " + hostPort);
|
||||
}
|
||||
|
||||
protected void loadNamesystem(Configuration conf) throws IOException {
|
||||
|
@ -503,11 +503,16 @@ public class NameNode {
|
|||
}
|
||||
|
||||
private void startTrashEmptier(Configuration conf) throws IOException {
|
||||
long trashInterval
|
||||
= conf.getLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
|
||||
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
|
||||
if(trashInterval == 0)
|
||||
long trashInterval = conf.getLong(
|
||||
CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
|
||||
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
|
||||
if (trashInterval == 0) {
|
||||
return;
|
||||
} else if (trashInterval < 0) {
|
||||
throw new IOException("Cannot start tresh emptier with negative interval."
|
||||
+ " Set " + CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY + " to a"
|
||||
+ " positive value.");
|
||||
}
|
||||
this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
|
||||
this.emptier.setDaemon(true);
|
||||
this.emptier.start();
|
||||
|
@ -1151,22 +1156,20 @@ public class NameNode {
|
|||
*/
|
||||
public static void initializeGenericKeys(Configuration conf,
|
||||
String nameserviceId, String namenodeId) {
|
||||
if ((nameserviceId == null || nameserviceId.isEmpty()) &&
|
||||
(namenodeId == null || namenodeId.isEmpty())) {
|
||||
return;
|
||||
}
|
||||
if ((nameserviceId != null && !nameserviceId.isEmpty()) ||
|
||||
(namenodeId != null && !namenodeId.isEmpty())) {
|
||||
if (nameserviceId != null) {
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
|
||||
}
|
||||
if (namenodeId != null) {
|
||||
conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
|
||||
}
|
||||
|
||||
if (nameserviceId != null) {
|
||||
conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
|
||||
DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
|
||||
NAMENODE_SPECIFIC_KEYS);
|
||||
DFSUtil.setGenericConf(conf, nameserviceId, null,
|
||||
NAMESERVICE_SPECIFIC_KEYS);
|
||||
}
|
||||
if (namenodeId != null) {
|
||||
conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
|
||||
}
|
||||
|
||||
DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
|
||||
NAMENODE_SPECIFIC_KEYS);
|
||||
DFSUtil.setGenericConf(conf, nameserviceId, null,
|
||||
NAMESERVICE_SPECIFIC_KEYS);
|
||||
|
||||
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
|
||||
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
||||
|
@ -1263,13 +1266,36 @@ public class NameNode {
|
|||
return state.getServiceState();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public synchronized void setRuntimeForTesting(Runtime runtime) {
|
||||
this.runtime = runtime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Class used as expose {@link NameNode} as context to {@link HAState}
|
||||
* Shutdown the NN immediately in an ungraceful way. Used when it would be
|
||||
* unsafe for the NN to continue operating, e.g. during a failed HA state
|
||||
* transition.
|
||||
*
|
||||
* TODO(HA):
|
||||
* When entering and exiting state, on failing to start services,
|
||||
* appropriate action is needed todo either shutdown the node or recover
|
||||
* from failure.
|
||||
* @param t exception which warrants the shutdown. Printed to the NN log
|
||||
* before exit.
|
||||
* @throws ServiceFailedException thrown only for testing.
|
||||
*/
|
||||
private synchronized void doImmediateShutdown(Throwable t)
|
||||
throws ServiceFailedException {
|
||||
String message = "Error encountered requiring NN shutdown. " +
|
||||
"Shutting down immediately.";
|
||||
try {
|
||||
LOG.fatal(message, t);
|
||||
} catch (Throwable ignored) {
|
||||
// This is unlikely to happen, but there's nothing we can do if it does.
|
||||
}
|
||||
runtime.exit(1);
|
||||
// This code is only reached during testing, when runtime is stubbed out.
|
||||
throw new ServiceFailedException(message, t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Class used to expose {@link NameNode} as context to {@link HAState}
|
||||
*/
|
||||
protected class NameNodeHAContext implements HAContext {
|
||||
@Override
|
||||
|
@ -1284,32 +1310,52 @@ public class NameNode {
|
|||
|
||||
@Override
|
||||
public void startActiveServices() throws IOException {
|
||||
namesystem.startActiveServices();
|
||||
startTrashEmptier(conf);
|
||||
try {
|
||||
namesystem.startActiveServices();
|
||||
startTrashEmptier(conf);
|
||||
} catch (Throwable t) {
|
||||
doImmediateShutdown(t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stopActiveServices() throws IOException {
|
||||
if (namesystem != null) {
|
||||
namesystem.stopActiveServices();
|
||||
try {
|
||||
if (namesystem != null) {
|
||||
namesystem.stopActiveServices();
|
||||
}
|
||||
stopTrashEmptier();
|
||||
} catch (Throwable t) {
|
||||
doImmediateShutdown(t);
|
||||
}
|
||||
stopTrashEmptier();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startStandbyServices() throws IOException {
|
||||
namesystem.startStandbyServices(conf);
|
||||
try {
|
||||
namesystem.startStandbyServices(conf);
|
||||
} catch (Throwable t) {
|
||||
doImmediateShutdown(t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareToStopStandbyServices() throws ServiceFailedException {
|
||||
namesystem.prepareToStopStandbyServices();
|
||||
try {
|
||||
namesystem.prepareToStopStandbyServices();
|
||||
} catch (Throwable t) {
|
||||
doImmediateShutdown(t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stopStandbyServices() throws IOException {
|
||||
if (namesystem != null) {
|
||||
namesystem.stopStandbyServices();
|
||||
try {
|
||||
if (namesystem != null) {
|
||||
namesystem.stopStandbyServices();
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
doImmediateShutdown(t);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT;
|
||||
|
@ -43,6 +44,7 @@ import org.apache.hadoop.http.HttpServer;
|
|||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
|
||||
/**
|
||||
|
@ -81,124 +83,98 @@ public class NameNodeHttpServer {
|
|||
|
||||
public void start() throws IOException {
|
||||
final String infoHost = bindAddress.getHostName();
|
||||
int infoPort = bindAddress.getPort();
|
||||
|
||||
if(UserGroupInformation.isSecurityEnabled()) {
|
||||
String httpsUser = SecurityUtil.getServerPrincipal(conf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoHost);
|
||||
if (httpsUser == null) {
|
||||
LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY
|
||||
+ " not defined in config. Starting http server as "
|
||||
+ getDefaultServerPrincipal()
|
||||
+ ": Kerberized SSL may be not function correctly.");
|
||||
} else {
|
||||
// Kerberized SSL servers must be run from the host principal...
|
||||
LOG.info("Logging in as " + httpsUser + " to start http server.");
|
||||
SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoHost);
|
||||
}
|
||||
}
|
||||
|
||||
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
|
||||
try {
|
||||
this.httpServer = ugi.doAs(new PrivilegedExceptionAction<HttpServer>() {
|
||||
@Override
|
||||
public HttpServer run() throws IOException, InterruptedException {
|
||||
int infoPort = bindAddress.getPort();
|
||||
httpServer = new HttpServer("hdfs", infoHost, infoPort,
|
||||
infoPort == 0, conf,
|
||||
new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) {
|
||||
{
|
||||
if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
|
||||
//add SPNEGO authentication filter for webhdfs
|
||||
final String name = "SPNEGO";
|
||||
final String classname = AuthFilter.class.getName();
|
||||
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
|
||||
Map<String, String> params = getAuthFilterParams(conf);
|
||||
defineFilter(webAppContext, name, classname, params,
|
||||
new String[]{pathSpec});
|
||||
LOG.info("Added filter '" + name + "' (class=" + classname + ")");
|
||||
|
||||
// add webhdfs packages
|
||||
addJerseyResourcePackage(
|
||||
NamenodeWebHdfsMethods.class.getPackage().getName()
|
||||
+ ";" + Param.class.getPackage().getName(), pathSpec);
|
||||
}
|
||||
httpServer = new HttpServer("hdfs", infoHost, infoPort,
|
||||
infoPort == 0, conf,
|
||||
new AccessControlList(conf.get(DFS_ADMIN, " "))) {
|
||||
{
|
||||
// Add SPNEGO support to NameNode
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
String principalInConf = conf.get(
|
||||
DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
|
||||
if (principalInConf != null && !principalInConf.isEmpty()) {
|
||||
params.put("kerberos.principal",
|
||||
SecurityUtil.getServerPrincipal(principalInConf, infoHost));
|
||||
String httpKeytab = conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
|
||||
if (httpKeytab != null && !httpKeytab.isEmpty()) {
|
||||
params.put("kerberos.keytab", httpKeytab);
|
||||
}
|
||||
|
||||
private Map<String, String> getAuthFilterParams(Configuration conf)
|
||||
throws IOException {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
String principalInConf = conf
|
||||
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
|
||||
if (principalInConf != null && !principalInConf.isEmpty()) {
|
||||
params
|
||||
.put(
|
||||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
|
||||
SecurityUtil.getServerPrincipal(principalInConf,
|
||||
infoHost));
|
||||
}
|
||||
String httpKeytab = conf
|
||||
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
|
||||
if (httpKeytab != null && !httpKeytab.isEmpty()) {
|
||||
params.put(
|
||||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
|
||||
httpKeytab);
|
||||
}
|
||||
return params;
|
||||
}
|
||||
};
|
||||
params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
|
||||
|
||||
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
|
||||
boolean useKrb = UserGroupInformation.isSecurityEnabled();
|
||||
if (certSSL || useKrb) {
|
||||
boolean needClientAuth = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
|
||||
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
|
||||
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf
|
||||
.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
|
||||
Configuration sslConf = new HdfsConfiguration(false);
|
||||
if (certSSL) {
|
||||
sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||
DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
|
||||
}
|
||||
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
|
||||
useKrb);
|
||||
// assume same ssl port for all datanodes
|
||||
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(
|
||||
conf.get(DFS_DATANODE_HTTPS_ADDRESS_KEY,
|
||||
infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
|
||||
httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
|
||||
datanodeSslPort.getPort());
|
||||
defineFilter(webAppContext, SPNEGO_FILTER,
|
||||
AuthenticationFilter.class.getName(), params, null);
|
||||
}
|
||||
httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
|
||||
httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
|
||||
nn.getNameNodeAddress());
|
||||
httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, nn.getFSImage());
|
||||
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
|
||||
setupServlets(httpServer, conf);
|
||||
httpServer.start();
|
||||
|
||||
// The web-server port can be ephemeral... ensure we have the correct
|
||||
// info
|
||||
infoPort = httpServer.getPort();
|
||||
httpAddress = new InetSocketAddress(infoHost, infoPort);
|
||||
LOG.info(nn.getRole() + " Web-server up at: " + httpAddress);
|
||||
return httpServer;
|
||||
}
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
throw new IOException(e);
|
||||
} finally {
|
||||
if(UserGroupInformation.isSecurityEnabled() &&
|
||||
conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) {
|
||||
// Go back to being the correct Namenode principal
|
||||
LOG.info("Logging back in as NameNode user following http server start");
|
||||
nn.loginAsNameNodeUser(conf);
|
||||
if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
|
||||
//add SPNEGO authentication filter for webhdfs
|
||||
final String name = "SPNEGO";
|
||||
final String classname = AuthFilter.class.getName();
|
||||
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
|
||||
Map<String, String> params = getAuthFilterParams(conf);
|
||||
defineFilter(webAppContext, name, classname, params,
|
||||
new String[]{pathSpec});
|
||||
LOG.info("Added filter '" + name + "' (class=" + classname + ")");
|
||||
|
||||
// add webhdfs packages
|
||||
addJerseyResourcePackage(
|
||||
NamenodeWebHdfsMethods.class.getPackage().getName()
|
||||
+ ";" + Param.class.getPackage().getName(), pathSpec);
|
||||
}
|
||||
}
|
||||
|
||||
private Map<String, String> getAuthFilterParams(Configuration conf)
|
||||
throws IOException {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
String principalInConf = conf
|
||||
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
|
||||
if (principalInConf != null && !principalInConf.isEmpty()) {
|
||||
params
|
||||
.put(
|
||||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
|
||||
SecurityUtil.getServerPrincipal(principalInConf,
|
||||
bindAddress.getHostName()));
|
||||
}
|
||||
String httpKeytab = conf
|
||||
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
|
||||
if (httpKeytab != null && !httpKeytab.isEmpty()) {
|
||||
params.put(
|
||||
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
|
||||
httpKeytab);
|
||||
}
|
||||
return params;
|
||||
}
|
||||
};
|
||||
|
||||
boolean certSSL = conf.getBoolean("dfs.https.enable", false);
|
||||
if (certSSL) {
|
||||
boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
|
||||
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoHost + ":" + conf.get(
|
||||
"dfs.https.port", infoHost + ":" + 0));
|
||||
Configuration sslConf = new Configuration(false);
|
||||
if (certSSL) {
|
||||
sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
|
||||
"ssl-server.xml"));
|
||||
}
|
||||
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
|
||||
// assume same ssl port for all datanodes
|
||||
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
|
||||
"dfs.datanode.https.address", infoHost + ":" + 50475));
|
||||
httpServer.setAttribute("datanode.https.port", datanodeSslPort
|
||||
.getPort());
|
||||
}
|
||||
httpServer.setAttribute("name.node", nn);
|
||||
httpServer.setAttribute("name.node.address", bindAddress);
|
||||
httpServer.setAttribute("name.system.image", nn.getFSImage());
|
||||
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
|
||||
setupServlets(httpServer, conf);
|
||||
httpServer.start();
|
||||
httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort());
|
||||
}
|
||||
|
||||
|
||||
public void stop() throws Exception {
|
||||
if (httpServer != null) {
|
||||
httpServer.stop();
|
||||
|
|
|
@ -734,7 +734,7 @@ class NamenodeJspHelper {
|
|||
this.inode = null;
|
||||
} else {
|
||||
this.block = new Block(blockId);
|
||||
this.inode = blockManager.getINode(block);
|
||||
this.inode = (INodeFile) blockManager.getBlockCollection(block);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,10 @@ import java.security.PrivilegedAction;
|
|||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
|
@ -44,6 +46,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
|
@ -63,9 +66,9 @@ import org.apache.hadoop.ipc.RemoteException;
|
|||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
|
@ -108,7 +111,6 @@ public class SecondaryNameNode implements Runnable {
|
|||
private volatile boolean shouldRun;
|
||||
private HttpServer infoServer;
|
||||
private int infoPort;
|
||||
private int imagePort;
|
||||
private String infoBindAddress;
|
||||
|
||||
private Collection<URI> checkpointDirs;
|
||||
|
@ -231,61 +233,45 @@ public class SecondaryNameNode implements Runnable {
|
|||
checkpointConf = new CheckpointConf(conf);
|
||||
|
||||
// initialize the webserver for uploading files.
|
||||
// Kerberized SSL servers must be run from the host principal...
|
||||
UserGroupInformation httpUGI =
|
||||
UserGroupInformation.loginUserFromKeytabAndReturnUGI(
|
||||
SecurityUtil.getServerPrincipal(conf
|
||||
.get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
|
||||
infoBindAddress),
|
||||
conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
|
||||
try {
|
||||
infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {
|
||||
@Override
|
||||
public HttpServer run() throws IOException, InterruptedException {
|
||||
LOG.info("Starting web server as: " +
|
||||
UserGroupInformation.getCurrentUser().getUserName());
|
||||
|
||||
int tmpInfoPort = infoSocAddr.getPort();
|
||||
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
|
||||
tmpInfoPort == 0, conf,
|
||||
new AccessControlList(conf.get(DFS_ADMIN, " ")));
|
||||
|
||||
if(UserGroupInformation.isSecurityEnabled()) {
|
||||
SecurityUtil.initKrb5CipherSuites();
|
||||
InetSocketAddress secInfoSocAddr =
|
||||
NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt(
|
||||
DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
|
||||
DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT));
|
||||
imagePort = secInfoSocAddr.getPort();
|
||||
infoServer.addSslListener(secInfoSocAddr, conf, false, true);
|
||||
int tmpInfoPort = infoSocAddr.getPort();
|
||||
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
|
||||
tmpInfoPort == 0, conf,
|
||||
new AccessControlList(conf.get(DFS_ADMIN, " "))) {
|
||||
{
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
String principalInConf = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
|
||||
if (principalInConf != null && !principalInConf.isEmpty()) {
|
||||
params.put("kerberos.principal",
|
||||
SecurityUtil.getServerPrincipal(principalInConf, infoSocAddr.getHostName()));
|
||||
}
|
||||
String httpKeytab = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
|
||||
if (httpKeytab != null && !httpKeytab.isEmpty()) {
|
||||
params.put("kerberos.keytab", httpKeytab);
|
||||
}
|
||||
params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
|
||||
|
||||
infoServer.setAttribute("secondary.name.node", SecondaryNameNode.this);
|
||||
infoServer.setAttribute("name.system.image", checkpointImage);
|
||||
infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
|
||||
infoServer.addInternalServlet("getimage", "/getimage",
|
||||
GetImageServlet.class, true);
|
||||
infoServer.start();
|
||||
return infoServer;
|
||||
defineFilter(webAppContext, SPNEGO_FILTER, AuthenticationFilter.class.getName(),
|
||||
params, null);
|
||||
}
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
infoServer.setAttribute("secondary.name.node", this);
|
||||
infoServer.setAttribute("name.system.image", checkpointImage);
|
||||
infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
|
||||
infoServer.addInternalServlet("getimage", "/getimage",
|
||||
GetImageServlet.class, true);
|
||||
infoServer.start();
|
||||
|
||||
LOG.info("Web server init done");
|
||||
|
||||
// The web-server port can be ephemeral... ensure we have the correct info
|
||||
infoPort = infoServer.getPort();
|
||||
if (!UserGroupInformation.isSecurityEnabled()) {
|
||||
imagePort = infoPort;
|
||||
}
|
||||
|
||||
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort);
|
||||
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
|
||||
LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
|
||||
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
|
||||
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
|
||||
LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " +
|
||||
"(" + checkpointConf.getPeriod()/60 + " min)");
|
||||
"(" + checkpointConf.getPeriod() / 60 + " min)");
|
||||
LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns");
|
||||
}
|
||||
|
||||
|
@ -434,7 +420,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
throw new IOException("This is not a DFS");
|
||||
}
|
||||
|
||||
String configuredAddress = DFSUtil.getInfoServer(null, conf, true);
|
||||
String configuredAddress = DFSUtil.getInfoServer(null, conf, false);
|
||||
String address = DFSUtil.substituteForWildcardAddress(configuredAddress,
|
||||
fsName.getHost());
|
||||
LOG.debug("Will connect to NameNode at HTTP address: " + address);
|
||||
|
@ -446,7 +432,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
* for image transfers
|
||||
*/
|
||||
private InetSocketAddress getImageListenAddress() {
|
||||
return new InetSocketAddress(infoBindAddress, imagePort);
|
||||
return new InetSocketAddress(infoBindAddress, infoPort);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -507,7 +493,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
|
||||
|
||||
/**
|
||||
* @param argv The parameters passed to this program.
|
||||
* @param opts The parameters passed to this program.
|
||||
* @exception Exception if the filesystem does not exist.
|
||||
* @return 0 on success, non zero on error.
|
||||
*/
|
||||
|
@ -709,7 +695,7 @@ public class SecondaryNameNode implements Runnable {
|
|||
* Construct a checkpoint image.
|
||||
* @param conf Node configuration.
|
||||
* @param imageDirs URIs of storage for image.
|
||||
* @param editDirs URIs of storage for edit logs.
|
||||
* @param editsDirs URIs of storage for edit logs.
|
||||
* @throws IOException If storage cannot be access.
|
||||
*/
|
||||
CheckpointStorage(Configuration conf,
|
||||
|
|
|
@ -201,18 +201,16 @@ public class TransferFsImage {
|
|||
String queryString, List<File> localPaths,
|
||||
NNStorage dstStorage, boolean getChecksum) throws IOException {
|
||||
byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
|
||||
String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
|
||||
StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
|
||||
str.append(queryString);
|
||||
|
||||
String str = "http://" + nnHostPort + "/getimage?" + queryString;
|
||||
LOG.info("Opening connection to " + str);
|
||||
//
|
||||
// open connection to remote server
|
||||
//
|
||||
URL url = new URL(str.toString());
|
||||
URL url = new URL(str);
|
||||
|
||||
// Avoid Krb bug with cross-realm hosts
|
||||
SecurityUtil.fetchServiceTicket(url);
|
||||
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
|
||||
HttpURLConnection connection = (HttpURLConnection)
|
||||
SecurityUtil.openSecureHttpConnection(url);
|
||||
|
||||
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
|
||||
throw new HttpGetFailedException(
|
||||
|
|
|
@ -97,7 +97,6 @@ public class BootstrapStandby implements Tool, Configurable {
|
|||
static final int ERR_CODE_LOGS_UNAVAILABLE = 6;
|
||||
|
||||
public int run(String[] args) throws Exception {
|
||||
SecurityUtil.initKrb5CipherSuites();
|
||||
parseArgs(args);
|
||||
parseConfAndFindOtherNN();
|
||||
NameNode.checkAllowFormat(conf);
|
||||
|
@ -325,7 +324,7 @@ public class BootstrapStandby implements Tool, Configurable {
|
|||
"Could not determine valid IPC address for other NameNode (%s)" +
|
||||
", got: %s", otherNNId, otherIpcAddr);
|
||||
|
||||
otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, true);
|
||||
otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
|
||||
otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
|
||||
otherIpcAddr.getHostName());
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue