Merge trunk into auto-HA branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3042@1337645 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-05-12 20:52:34 +00:00
commit 4cd70e87be
232 changed files with 3406 additions and 2066 deletions

View File

@ -423,8 +423,8 @@ checkJavacWarnings () {
if [[ $? != 0 ]] ; then if [[ $? != 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT JIRA_COMMENT="$JIRA_COMMENT
-1 javac. The patch appears to cause tar ant target to fail." -1 javac. The patch appears to cause the build to fail."
return 1 return 2
fi fi
### Compare trunk and patch javac warning numbers ### Compare trunk and patch javac warning numbers
if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
@ -528,6 +528,24 @@ $JIRA_COMMENT_FOOTER"
return 0 return 0
} }
###############################################################################
### Install the new jars so tests and findbugs can find all of the updated jars
buildAndInstall () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Installing all of the jars"
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
echo "$MVN install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess"
$MVN install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess
return $?
}
############################################################################### ###############################################################################
### Check there are no changes in the number of Findbugs warnings ### Check there are no changes in the number of Findbugs warnings
checkFindbugsWarnings () { checkFindbugsWarnings () {
@ -882,15 +900,22 @@ if [[ $? != 0 ]] ; then
submitJiraComment 1 submitJiraComment 1
cleanupAndExit 1 cleanupAndExit 1
fi fi
checkJavadocWarnings
(( RESULT = RESULT + $? ))
checkJavacWarnings checkJavacWarnings
JAVAC_RET=$?
#2 is returned if the code could not compile
if [[ $JAVAC_RET == 2 ]] ; then
submitJiraComment 1
cleanupAndExit 1
fi
(( RESULT = RESULT + $JAVAC_RET ))
checkJavadocWarnings
(( RESULT = RESULT + $? )) (( RESULT = RESULT + $? ))
checkEclipseGeneration checkEclipseGeneration
(( RESULT = RESULT + $? )) (( RESULT = RESULT + $? ))
### Checkstyle not implemented yet ### Checkstyle not implemented yet
#checkStyle #checkStyle
#(( RESULT = RESULT + $? )) #(( RESULT = RESULT + $? ))
buildAndInstall
checkFindbugsWarnings checkFindbugsWarnings
(( RESULT = RESULT + $? )) (( RESULT = RESULT + $? ))
checkReleaseAuditWarnings checkReleaseAuditWarnings

View File

@ -26,7 +26,6 @@ import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException; import javax.security.auth.login.LoginException;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.Field;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.net.URL; import java.net.URL;
import java.security.AccessControlContext; import java.security.AccessControlContext;
@ -196,11 +195,10 @@ public class KerberosAuthenticator implements Authenticator {
try { try {
GSSManager gssManager = GSSManager.getInstance(); GSSManager gssManager = GSSManager.getInstance();
String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost(); String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost();
Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
GSSName serviceName = gssManager.createName(servicePrincipal, GSSName serviceName = gssManager.createName(servicePrincipal,
GSSName.NT_HOSTBASED_SERVICE); oid);
Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal, oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
gssManager);
gssContext = gssManager.createContext(serviceName, oid, null, gssContext = gssManager.createContext(serviceName, oid, null,
GSSContext.DEFAULT_LIFETIME); GSSContext.DEFAULT_LIFETIME);
gssContext.requestCredDeleg(true); gssContext.requestCredDeleg(true);

View File

@ -327,6 +327,8 @@ public class AuthenticationFilter implements Filter {
@Override @Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
throws IOException, ServletException { throws IOException, ServletException {
boolean unauthorizedResponse = true;
String unauthorizedMsg = "";
HttpServletRequest httpRequest = (HttpServletRequest) request; HttpServletRequest httpRequest = (HttpServletRequest) request;
HttpServletResponse httpResponse = (HttpServletResponse) response; HttpServletResponse httpResponse = (HttpServletResponse) response;
try { try {
@ -350,6 +352,7 @@ public class AuthenticationFilter implements Filter {
newToken = true; newToken = true;
} }
if (token != null) { if (token != null) {
unauthorizedResponse = false;
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName()); LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
} }
@ -378,17 +381,17 @@ public class AuthenticationFilter implements Filter {
} }
filterChain.doFilter(httpRequest, httpResponse); filterChain.doFilter(httpRequest, httpResponse);
} }
else {
throw new AuthenticationException("Missing AuthenticationToken");
}
} catch (AuthenticationException ex) { } catch (AuthenticationException ex) {
unauthorizedMsg = ex.toString();
LOG.warn("Authentication exception: " + ex.getMessage(), ex);
}
if (unauthorizedResponse) {
if (!httpResponse.isCommitted()) { if (!httpResponse.isCommitted()) {
Cookie cookie = createCookie(""); Cookie cookie = createCookie("");
cookie.setMaxAge(0); cookie.setMaxAge(0);
httpResponse.addCookie(cookie); httpResponse.addCookie(cookie);
httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, ex.getMessage()); httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, unauthorizedMsg);
} }
LOG.warn("Authentication exception: " + ex.getMessage(), ex);
} }
} }

View File

@ -22,7 +22,6 @@ import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method; import java.lang.reflect.Method;
import org.ietf.jgss.GSSException; import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.Oid; import org.ietf.jgss.Oid;
public class KerberosUtil { public class KerberosUtil {
@ -34,8 +33,7 @@ public class KerberosUtil {
: "com.sun.security.auth.module.Krb5LoginModule"; : "com.sun.security.auth.module.Krb5LoginModule";
} }
public static Oid getOidClassInstance(String servicePrincipal, public static Oid getOidInstance(String oidName)
GSSManager gssManager)
throws ClassNotFoundException, GSSException, NoSuchFieldException, throws ClassNotFoundException, GSSException, NoSuchFieldException,
IllegalAccessException { IllegalAccessException {
Class<?> oidClass; Class<?> oidClass;
@ -44,7 +42,7 @@ public class KerberosUtil {
} else { } else {
oidClass = Class.forName("sun.security.jgss.GSSUtil"); oidClass = Class.forName("sun.security.jgss.GSSUtil");
} }
Field oidField = oidClass.getDeclaredField("GSS_KRB5_MECH_OID"); Field oidField = oidClass.getDeclaredField(oidName);
return (Oid)oidField.get(oidClass); return (Oid)oidField.get(oidClass);
} }

View File

@ -145,10 +145,10 @@ public class TestKerberosAuthenticationHandler extends TestCase {
GSSContext gssContext = null; GSSContext gssContext = null;
try { try {
String servicePrincipal = KerberosTestUtils.getServerPrincipal(); String servicePrincipal = KerberosTestUtils.getServerPrincipal();
Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
GSSName serviceName = gssManager.createName(servicePrincipal, GSSName serviceName = gssManager.createName(servicePrincipal,
GSSName.NT_HOSTBASED_SERVICE); oid);
Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal, oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
gssManager);
gssContext = gssManager.createContext(serviceName, oid, null, gssContext = gssManager.createContext(serviceName, oid, null,
GSSContext.DEFAULT_LIFETIME); GSSContext.DEFAULT_LIFETIME);
gssContext.requestCredDeleg(true); gssContext.requestCredDeleg(true);

View File

@ -63,8 +63,6 @@ Trunk (unreleased changes)
HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh) HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh)
HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
HADOOP-8308. Support cross-project Jenkins builds. (tomwhite) HADOOP-8308. Support cross-project Jenkins builds. (tomwhite)
BUG FIXES BUG FIXES
@ -129,6 +127,15 @@ Trunk (unreleased changes)
HADOOP-8339. jenkins complaining about 16 javadoc warnings HADOOP-8339. jenkins complaining about 16 javadoc warnings
(Tom White and Robert Evans via tgraves) (Tom White and Robert Evans via tgraves)
HADOOP-8354. test-patch findbugs may fail if a dependent module is changed
(Tom White and Robert Evans)
HADOOP-8375. test-patch should stop immediately once it has found
compilation errors (bobby)
HADOOP-8395. Text shell command unnecessarily demands that a
SequenceFile's key class be WritableComparable (harsh)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-7761. Improve the performance of raw comparisons. (todd)
@ -139,6 +146,9 @@ Release 2.0.0 - UNRELEASED
HADOOP-7920. Remove Avro Rpc. (suresh) HADOOP-7920. Remove Avro Rpc. (suresh)
HADOOP-8388. Remove unused BlockLocation serialization.
(Colin Patrick McCabe via eli)
NEW FEATURES NEW FEATURES
HADOOP-7773. Add support for protocol buffer based RPC engine. HADOOP-7773. Add support for protocol buffer based RPC engine.
@ -163,6 +173,9 @@ Release 2.0.0 - UNRELEASED
HADOOP-8210. Common side of HDFS-3148: The client should be able HADOOP-8210. Common side of HDFS-3148: The client should be able
to use multiple local interfaces for data transfer. (eli) to use multiple local interfaces for data transfer. (eli)
HADOOP-8343. Allow configuration of authorization for JmxJsonServlet and
MetricsServlet (tucu)
IMPROVEMENTS IMPROVEMENTS
HADOOP-7524. Change RPC to allow multiple protocols including multuple HADOOP-7524. Change RPC to allow multiple protocols including multuple
@ -284,6 +297,34 @@ Release 2.0.0 - UNRELEASED
HADOOP-8214. make hadoop script recognize a full set of deprecated commands (rvs via tucu) HADOOP-8214. make hadoop script recognize a full set of deprecated commands (rvs via tucu)
HADOOP-8347. Hadoop Common logs misspell 'successful'.
(Philip Zeyliger via eli)
HADOOP-8350. Improve NetUtils.getInputStream to return a stream which has
a tunable timeout. (todd)
HADOOP-8356. FileSystem service loading mechanism should print the FileSystem
impl it is failing to load (tucu)
HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
final release. (todd)
HADOOP-8361. Avoid out-of-memory problems when deserializing strings.
(Colin Patrick McCabe via eli)
HADOOP-8353. hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop.
(Roman Shaposhnik via atm)
HADOOP-8224. Don't hardcode hdfs.audit.logger in the scripts.
(Tomohiko Kinebuchi via eli)
HADOOP-8113. Correction to BUILDING.txt: HDFS needs ProtocolBuffer, too
(not just MapReduce). Contributed by Eugene Koontz.
HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -314,8 +355,6 @@ Release 2.0.0 - UNRELEASED
HADOOP-8104. Inconsistent Jackson versions (tucu) HADOOP-8104. Inconsistent Jackson versions (tucu)
HADOOP-7940. The Text.clear() method does not clear the bytes as intended. (Csaba Miklos via harsh)
HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth. HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth.
(szetszwo) (szetszwo)
@ -406,6 +445,22 @@ Release 2.0.0 - UNRELEASED
HADOOP-8342. HDFS command fails with exception following merge of HADOOP-8342. HDFS command fails with exception following merge of
HADOOP-8325 (tucu) HADOOP-8325 (tucu)
HADOOP-8346. Makes oid changes to make SPNEGO work. Was broken due
to fixes introduced by the IBM JDK compatibility patch. (ddas)
HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu)
HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. (atm)
HADOOP-8328. Duplicate FileSystem Statistics object for 'file' scheme.
(tomwhite)
HADOOP-8359. Fix javadoc warnings in Configuration. (Anupam Seth via
szetszwo)
HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
starting with a numeric character. (Junping Du via suresh)
BREAKDOWN OF HADOOP-7454 SUBTASKS BREAKDOWN OF HADOOP-7454 SUBTASKS
HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
@ -464,6 +519,11 @@ Release 2.0.0 - UNRELEASED
HADOOP-8172. Configuration no longer sets all keys in a deprecated key HADOOP-8172. Configuration no longer sets all keys in a deprecated key
list. (Anupam Seth via bobby) list. (Anupam Seth via bobby)
HADOOP-7868. Hadoop native fails to compile when default linker
option is -Wl,--as-needed. (Trevor Robinson via eli)
HADOOP-8316. Audit logging should be disabled by default. (eli)
Release 0.23.3 - UNRELEASED Release 0.23.3 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -519,6 +579,13 @@ Release 0.23.3 - UNRELEASED
HADOOP-8335. Improve Configuration's address handling (Daryn Sharp via HADOOP-8335. Improve Configuration's address handling (Daryn Sharp via
bobby) bobby)
HADOOP-8327. distcpv2 and distcpv1 jars should not coexist (Dave Thompson
via bobby)
HADOOP-8341. Fix or filter findbugs issues in hadoop-tools (bobby)
HADOOP-8373. Port RPC.getServerAddress to 0.23 (Daryn Sharp via bobby)
Release 0.23.2 - UNRELEASED Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -109,8 +109,10 @@ fi
export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"} export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"} export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
# Set default scheduling priority # Set default scheduling priority
if [ "$HADOOP_NICENESS" = "" ]; then if [ "$HADOOP_NICENESS" = "" ]; then
@ -162,9 +164,15 @@ case $startStop in
(stop) (stop)
if [ -f $pid ]; then if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then TARGET_PID=`cat $pid`
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo stopping $command echo stopping $command
kill `cat $pid` kill $TARGET_PID
sleep $HADOOP_STOP_TIMEOUT
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
kill -9 $TARGET_PID
fi
else else
echo no $command to stop echo no $command to stop
fi fi

View File

@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# #
#Security appender #Security appender
# #
hadoop.security.logger=INFO,console hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20 hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger} log4j.category.SecurityLogger=${hadoop.security.logger}
@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
# #
# hdfs audit logging # hdfs audit logging
# #
hdfs.audit.logger=INFO,console hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20 hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
# #
# mapred audit logging # mapred audit logging
# #
mapred.audit.logger=INFO,console mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20 mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.conf; package org.apache.hadoop.conf;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer; import java.io.Writer;
import javax.servlet.ServletException; import javax.servlet.ServletException;
@ -57,9 +56,8 @@ public class ConfServlet extends HttpServlet {
public void doGet(HttpServletRequest request, HttpServletResponse response) public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException { throws ServletException, IOException {
// Do the authorization if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
if (!HttpServer.hasAdministratorAccess(getServletContext(), request, request, response)) {
response)) {
return; return;
} }

View File

@ -278,7 +278,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* @param key * @param key
* @param newKeys * @param newKeys
* @param customMessage * @param customMessage
* @deprecated use {@link addDeprecation(String key, String newKey, * @deprecated use {@link #addDeprecation(String key, String newKey,
String customMessage)} instead String customMessage)} instead
*/ */
@Deprecated @Deprecated
@ -328,7 +328,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* *
* @param key Key that is to be deprecated * @param key Key that is to be deprecated
* @param newKeys list of keys that take up the values of deprecated key * @param newKeys list of keys that take up the values of deprecated key
* @deprecated use {@link addDeprecation(String key, String newKey)} instead * @deprecated use {@link #addDeprecation(String key, String newKey)} instead
*/ */
@Deprecated @Deprecated
public synchronized static void addDeprecation(String key, String[] newKeys) { public synchronized static void addDeprecation(String key, String[] newKeys) {

View File

@ -346,7 +346,7 @@ public abstract class AbstractFileSystem {
path); path);
} else { } else {
throw new InvalidPathException( throw new InvalidPathException(
"Path without scheme with non-null autorhrity:" + path); "Path without scheme with non-null authority:" + path);
} }
} }
String thisScheme = this.getUri().getScheme(); String thisScheme = this.getUri().getScheme();

View File

@ -35,16 +35,7 @@ import org.apache.hadoop.io.WritableFactory;
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Stable @InterfaceStability.Stable
public class BlockLocation implements Writable { public class BlockLocation {
static { // register a ctor
WritableFactories.setFactory
(BlockLocation.class,
new WritableFactory() {
public Writable newInstance() { return new BlockLocation(); }
});
}
private String[] hosts; //hostnames of datanodes private String[] hosts; //hostnames of datanodes
private String[] names; //hostname:portNumber of datanodes private String[] names; //hostname:portNumber of datanodes
private String[] topologyPaths; // full path name in network topology private String[] topologyPaths; // full path name in network topology
@ -219,62 +210,6 @@ public class BlockLocation implements Writable {
} }
} }
/**
* Implement write of Writable
*/
public void write(DataOutput out) throws IOException {
out.writeLong(offset);
out.writeLong(length);
out.writeBoolean(corrupt);
out.writeInt(names.length);
for (int i=0; i < names.length; i++) {
Text name = new Text(names[i]);
name.write(out);
}
out.writeInt(hosts.length);
for (int i=0; i < hosts.length; i++) {
Text host = new Text(hosts[i]);
host.write(out);
}
out.writeInt(topologyPaths.length);
for (int i=0; i < topologyPaths.length; i++) {
Text host = new Text(topologyPaths[i]);
host.write(out);
}
}
/**
* Implement readFields of Writable
*/
public void readFields(DataInput in) throws IOException {
this.offset = in.readLong();
this.length = in.readLong();
this.corrupt = in.readBoolean();
int numNames = in.readInt();
this.names = new String[numNames];
for (int i = 0; i < numNames; i++) {
Text name = new Text();
name.readFields(in);
names[i] = name.toString();
}
int numHosts = in.readInt();
this.hosts = new String[numHosts];
for (int i = 0; i < numHosts; i++) {
Text host = new Text();
host.readFields(in);
hosts[i] = host.toString();
}
int numTops = in.readInt();
topologyPaths = new String[numTops];
for (int i = 0; i < numTops; i++) {
Text path = new Text();
path.readFields(in);
topologyPaths[i] = path.toString();
}
}
public String toString() { public String toString() {
StringBuilder result = new StringBuilder(); StringBuilder result = new StringBuilder();
result.append(offset); result.append(offset);
@ -289,4 +224,4 @@ public class BlockLocation implements Writable {
} }
return result.toString(); return result.toString();
} }
} }

View File

@ -228,6 +228,9 @@ public class CommonConfigurationKeysPublic {
public static final String HADOOP_SECURITY_AUTHORIZATION = public static final String HADOOP_SECURITY_AUTHORIZATION =
"hadoop.security.authorization"; "hadoop.security.authorization";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */ /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN =
"hadoop.security.instrumentation.requires.admin";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_SERVICE_USER_NAME_KEY = public static final String HADOOP_SECURITY_SERVICE_USER_NAME_KEY =
"hadoop.security.service.user.name.key"; "hadoop.security.service.user.name.key";
} }

View File

@ -254,7 +254,7 @@ public class FileStatus implements Writable, Comparable {
// Writable // Writable
////////////////////////////////////////////////// //////////////////////////////////////////////////
public void write(DataOutput out) throws IOException { public void write(DataOutput out) throws IOException {
Text.writeString(out, getPath().toString()); Text.writeString(out, getPath().toString(), Text.ONE_MEGABYTE);
out.writeLong(getLen()); out.writeLong(getLen());
out.writeBoolean(isDirectory()); out.writeBoolean(isDirectory());
out.writeShort(getReplication()); out.writeShort(getReplication());
@ -262,16 +262,16 @@ public class FileStatus implements Writable, Comparable {
out.writeLong(getModificationTime()); out.writeLong(getModificationTime());
out.writeLong(getAccessTime()); out.writeLong(getAccessTime());
getPermission().write(out); getPermission().write(out);
Text.writeString(out, getOwner()); Text.writeString(out, getOwner(), Text.ONE_MEGABYTE);
Text.writeString(out, getGroup()); Text.writeString(out, getGroup(), Text.ONE_MEGABYTE);
out.writeBoolean(isSymlink()); out.writeBoolean(isSymlink());
if (isSymlink()) { if (isSymlink()) {
Text.writeString(out, getSymlink().toString()); Text.writeString(out, getSymlink().toString(), Text.ONE_MEGABYTE);
} }
} }
public void readFields(DataInput in) throws IOException { public void readFields(DataInput in) throws IOException {
String strPath = Text.readString(in); String strPath = Text.readString(in, Text.ONE_MEGABYTE);
this.path = new Path(strPath); this.path = new Path(strPath);
this.length = in.readLong(); this.length = in.readLong();
this.isdir = in.readBoolean(); this.isdir = in.readBoolean();
@ -280,10 +280,10 @@ public class FileStatus implements Writable, Comparable {
modification_time = in.readLong(); modification_time = in.readLong();
access_time = in.readLong(); access_time = in.readLong();
permission.readFields(in); permission.readFields(in);
owner = Text.readString(in); owner = Text.readString(in, Text.ONE_MEGABYTE);
group = Text.readString(in); group = Text.readString(in, Text.ONE_MEGABYTE);
if (in.readBoolean()) { if (in.readBoolean()) {
this.symlink = new Path(Text.readString(in)); this.symlink = new Path(Text.readString(in, Text.ONE_MEGABYTE));
} else { } else {
this.symlink = null; this.symlink = null;
} }

View File

@ -199,7 +199,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* @return the protocol scheme for the FileSystem. * @return the protocol scheme for the FileSystem.
*/ */
public String getScheme() { public String getScheme() {
throw new UnsupportedOperationException("Not implemented by the FileSystem implementation"); throw new UnsupportedOperationException("Not implemented by the " + getClass().getSimpleName() + " FileSystem implementation");
} }
/** Returns a URI whose scheme and authority identify this FileSystem.*/ /** Returns a URI whose scheme and authority identify this FileSystem.*/

View File

@ -53,7 +53,7 @@ import org.apache.hadoop.util.Progressable;
public class FilterFileSystem extends FileSystem { public class FilterFileSystem extends FileSystem {
protected FileSystem fs; protected FileSystem fs;
private String swapScheme; protected String swapScheme;
/* /*
* so that extending classes can define it * so that extending classes can define it

View File

@ -39,6 +39,17 @@ public class LocalFileSystem extends ChecksumFileSystem {
public LocalFileSystem() { public LocalFileSystem() {
this(new RawLocalFileSystem()); this(new RawLocalFileSystem());
} }
@Override
public void initialize(URI name, Configuration conf) throws IOException {
if (fs.getConf() == null) {
fs.initialize(name, conf);
}
String scheme = name.getScheme();
if (!scheme.equals(fs.getUri().getScheme())) {
swapScheme = scheme;
}
}
/** /**
* Return the protocol scheme for the FileSystem. * Return the protocol scheme for the FileSystem.

View File

@ -223,6 +223,13 @@ public class Path implements Comparable {
return isUriPathAbsolute(); return isUriPathAbsolute();
} }
/**
* @return true if and only if this path represents the root of a file system
*/
public boolean isRoot() {
return getParent() == null;
}
/** Returns the final component of this path.*/ /** Returns the final component of this path.*/
public String getName() { public String getName() {
String path = uri.getPath(); String path = uri.getPath();

View File

@ -84,8 +84,8 @@ public class PermissionStatus implements Writable {
/** {@inheritDoc} */ /** {@inheritDoc} */
public void readFields(DataInput in) throws IOException { public void readFields(DataInput in) throws IOException {
username = Text.readString(in); username = Text.readString(in, Text.ONE_MEGABYTE);
groupname = Text.readString(in); groupname = Text.readString(in, Text.ONE_MEGABYTE);
permission = FsPermission.read(in); permission = FsPermission.read(in);
} }
@ -110,8 +110,8 @@ public class PermissionStatus implements Writable {
String username, String username,
String groupname, String groupname,
FsPermission permission) throws IOException { FsPermission permission) throws IOException {
Text.writeString(out, username); Text.writeString(out, username, Text.ONE_MEGABYTE);
Text.writeString(out, groupname); Text.writeString(out, groupname, Text.ONE_MEGABYTE);
permission.write(out); permission.write(out);
} }

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
@ -136,7 +135,7 @@ class Display extends FsCommand {
protected class TextRecordInputStream extends InputStream { protected class TextRecordInputStream extends InputStream {
SequenceFile.Reader r; SequenceFile.Reader r;
WritableComparable<?> key; Writable key;
Writable val; Writable val;
DataInputBuffer inbuf; DataInputBuffer inbuf;
@ -148,7 +147,7 @@ class Display extends FsCommand {
r = new SequenceFile.Reader(lconf, r = new SequenceFile.Reader(lconf,
SequenceFile.Reader.file(fpath)); SequenceFile.Reader.file(fpath));
key = ReflectionUtils.newInstance( key = ReflectionUtils.newInstance(
r.getKeyClass().asSubclass(WritableComparable.class), lconf); r.getKeyClass().asSubclass(Writable.class), lconf);
val = ReflectionUtils.newInstance( val = ReflectionUtils.newInstance(
r.getValueClass().asSubclass(Writable.class), lconf); r.getValueClass().asSubclass(Writable.class), lconf);
inbuf = new DataInputBuffer(); inbuf = new DataInputBuffer();

View File

@ -75,7 +75,8 @@ class ChRootedFileSystem extends FilterFileSystem {
protected Path fullPath(final Path path) { protected Path fullPath(final Path path) {
super.checkPath(path); super.checkPath(path);
return path.isAbsolute() ? return path.isAbsolute() ?
new Path(chRootPathPartString + path.toUri().getPath()) : new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
+ path.toUri().getPath()) :
new Path(chRootPathPartString + workingDir.toUri().getPath(), path); new Path(chRootPathPartString + workingDir.toUri().getPath(), path);
} }
@ -127,7 +128,7 @@ class ChRootedFileSystem extends FilterFileSystem {
} }
String pathPart = p.toUri().getPath(); String pathPart = p.toUri().getPath();
return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart
.substring(chRootPathPartString.length() + 1); .substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1));
} }
@Override @Override

View File

@ -79,7 +79,8 @@ class ChRootedFs extends AbstractFileSystem {
*/ */
protected Path fullPath(final Path path) { protected Path fullPath(final Path path) {
super.checkPath(path); super.checkPath(path);
return new Path(chRootPathPartString + path.toUri().getPath()); return new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
+ path.toUri().getPath());
} }
public ChRootedFs(final AbstractFileSystem fs, final Path theRoot) public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
@ -127,7 +128,8 @@ class ChRootedFs extends AbstractFileSystem {
} }
String pathPart = p.toUri().getPath(); String pathPart = p.toUri().getPath();
return (pathPart.length() == chRootPathPartString.length()) ? return (pathPart.length() == chRootPathPartString.length()) ?
"" : pathPart.substring(chRootPathPartString.length() + 1); "" : pathPart.substring(chRootPathPartString.length() +
(chRootPathPart.isRoot() ? 0 : 1));
} }

View File

@ -52,8 +52,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.jmx.JMXJsonServlet; import org.apache.hadoop.jmx.JMXJsonServlet;
import org.apache.hadoop.log.LogLevel; import org.apache.hadoop.log.LogLevel;
import org.apache.hadoop.metrics.MetricsServlet; import org.apache.hadoop.metrics.MetricsServlet;
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
@ -99,6 +97,7 @@ public class HttpServer implements FilterContainer {
// gets stored. // gets stored.
public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf"; public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
static final String ADMINS_ACL = "admins.acl"; static final String ADMINS_ACL = "admins.acl";
public static final String SPNEGO_FILTER = "SpnegoFilter";
public static final String BIND_ADDRESS = "bind.address"; public static final String BIND_ADDRESS = "bind.address";
@ -237,11 +236,7 @@ public class HttpServer implements FilterContainer {
webServer.addHandler(webAppContext); webServer.addHandler(webAppContext);
addDefaultApps(contexts, appDir, conf); addDefaultApps(contexts, appDir, conf);
defineFilter(webAppContext, "krb5Filter",
Krb5AndCertsSslSocketConnector.Krb5SslFilter.class.getName(),
null, null);
addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
final FilterInitializer[] initializers = getFilterInitializers(conf); final FilterInitializer[] initializers = getFilterInitializers(conf);
if (initializers != null) { if (initializers != null) {
@ -424,12 +419,13 @@ public class HttpServer implements FilterContainer {
* protect with Kerberos authentication. * protect with Kerberos authentication.
* Note: This method is to be used for adding servlets that facilitate * Note: This method is to be used for adding servlets that facilitate
* internal communication and not for user facing functionality. For * internal communication and not for user facing functionality. For
* servlets added using this method, filters (except internal Kerberized + * servlets added using this method, filters (except internal Kerberos
* filters) are not enabled. * filters) are not enabled.
* *
* @param name The name of the servlet (can be passed as null) * @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet * @param pathSpec The path spec for the servlet
* @param clazz The servlet class * @param clazz The servlet class
* @param requireAuth Require Kerberos authenticate to access servlet
*/ */
public void addInternalServlet(String name, String pathSpec, public void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz, boolean requireAuth) { Class<? extends HttpServlet> clazz, boolean requireAuth) {
@ -440,11 +436,11 @@ public class HttpServer implements FilterContainer {
webAppContext.addServlet(holder, pathSpec); webAppContext.addServlet(holder, pathSpec);
if(requireAuth && UserGroupInformation.isSecurityEnabled()) { if(requireAuth && UserGroupInformation.isSecurityEnabled()) {
LOG.info("Adding Kerberos filter to " + name); LOG.info("Adding Kerberos (SPNEGO) filter to " + name);
ServletHandler handler = webAppContext.getServletHandler(); ServletHandler handler = webAppContext.getServletHandler();
FilterMapping fmap = new FilterMapping(); FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec); fmap.setPathSpec(pathSpec);
fmap.setFilterName("krb5Filter"); fmap.setFilterName(SPNEGO_FILTER);
fmap.setDispatches(Handler.ALL); fmap.setDispatches(Handler.ALL);
handler.addFilterMapping(fmap); handler.addFilterMapping(fmap);
} }
@ -580,26 +576,14 @@ public class HttpServer implements FilterContainer {
webServer.addConnector(sslListener); webServer.addConnector(sslListener);
} }
/**
* Configure an ssl listener on the server.
* @param addr address to listen on
* @param sslConf conf to retrieve ssl options
* @param needClientAuth whether client authentication is required
*/
public void addSslListener(InetSocketAddress addr, Configuration sslConf,
boolean needClientAuth) throws IOException {
addSslListener(addr, sslConf, needClientAuth, false);
}
/** /**
* Configure an ssl listener on the server. * Configure an ssl listener on the server.
* @param addr address to listen on * @param addr address to listen on
* @param sslConf conf to retrieve ssl options * @param sslConf conf to retrieve ssl options
* @param needCertsAuth whether x509 certificate authentication is required * @param needCertsAuth whether x509 certificate authentication is required
* @param needKrbAuth whether to allow kerberos auth
*/ */
public void addSslListener(InetSocketAddress addr, Configuration sslConf, public void addSslListener(InetSocketAddress addr, Configuration sslConf,
boolean needCertsAuth, boolean needKrbAuth) throws IOException { boolean needCertsAuth) throws IOException {
if (webServer.isStarted()) { if (webServer.isStarted()) {
throw new IOException("Failed to add ssl listener"); throw new IOException("Failed to add ssl listener");
} }
@ -612,15 +596,7 @@ public class HttpServer implements FilterContainer {
System.setProperty("javax.net.ssl.trustStoreType", sslConf.get( System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
"ssl.server.truststore.type", "jks")); "ssl.server.truststore.type", "jks"));
} }
Krb5AndCertsSslSocketConnector.MODE mode; SslSocketConnector sslListener = new SslSocketConnector();
if(needCertsAuth && needKrbAuth)
mode = MODE.BOTH;
else if (!needCertsAuth && needKrbAuth)
mode = MODE.KRB;
else // Default to certificates
mode = MODE.CERTS;
SslSocketConnector sslListener = new Krb5AndCertsSslSocketConnector(mode);
sslListener.setHost(addr.getHostName()); sslListener.setHost(addr.getHostName());
sslListener.setPort(addr.getPort()); sslListener.setPort(addr.getPort());
sslListener.setKeystore(sslConf.get("ssl.server.keystore.location")); sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));
@ -779,6 +755,37 @@ public class HttpServer implements FilterContainer {
: "Inactive HttpServer"; : "Inactive HttpServer";
} }
/**
* Checks the user has privileges to access to instrumentation servlets.
* <p/>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
* (default value) it always returns TRUE.
* <p/>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
* it will check that if the current user is in the admin ACLS. If the user is
* in the admin ACLs it returns TRUE, otherwise it returns FALSE.
*
* @param servletContext the servlet context.
* @param request the servlet request.
* @param response the servlet response.
* @return TRUE/FALSE based on the logic decribed above.
*/
public static boolean isInstrumentationAccessAllowed(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response) throws IOException {
Configuration conf =
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
boolean access = true;
boolean adminAccess = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
false);
if (adminAccess) {
access = hasAdministratorAccess(servletContext, request, response);
}
return access;
}
/** /**
* Does the user sending the HttpServletRequest has the administrator ACLs? If * Does the user sending the HttpServletRequest has the administrator ACLs? If
* it isn't the case, response will be modified to send an error to the user. * it isn't the case, response will be modified to send an error to the user.
@ -794,7 +801,6 @@ public class HttpServer implements FilterContainer {
HttpServletResponse response) throws IOException { HttpServletResponse response) throws IOException {
Configuration conf = Configuration conf =
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
// If there is no authorization, anybody has administrator access. // If there is no authorization, anybody has administrator access.
if (!conf.getBoolean( if (!conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
@ -834,12 +840,11 @@ public class HttpServer implements FilterContainer {
@Override @Override
public void doGet(HttpServletRequest request, HttpServletResponse response) public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException { throws ServletException, IOException {
response.setContentType("text/plain; charset=UTF-8"); if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
// Do the authorization request, response)) {
if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
response)) {
return; return;
} }
response.setContentType("text/plain; charset=UTF-8");
PrintWriter out = response.getWriter(); PrintWriter out = response.getWriter();
ReflectionUtils.printThreadInfo(out, ""); ReflectionUtils.printThreadInfo(out, "");
out.close(); out.close();

View File

@ -239,7 +239,6 @@ public class Text extends BinaryComparable
*/ */
public void clear() { public void clear() {
length = 0; length = 0;
bytes = EMPTY_BYTES;
} }
/* /*
@ -413,6 +412,8 @@ public class Text extends BinaryComparable
return bytes; return bytes;
} }
static final public int ONE_MEGABYTE = 1024 * 1024;
/** Read a UTF8 encoded string from in /** Read a UTF8 encoded string from in
*/ */
public static String readString(DataInput in) throws IOException { public static String readString(DataInput in) throws IOException {
@ -421,7 +422,17 @@ public class Text extends BinaryComparable
in.readFully(bytes, 0, length); in.readFully(bytes, 0, length);
return decode(bytes); return decode(bytes);
} }
/** Read a UTF8 encoded string with a maximum size
*/
public static String readString(DataInput in, int maxLength)
throws IOException {
int length = WritableUtils.readVIntInRange(in, 0, maxLength - 1);
byte [] bytes = new byte[length];
in.readFully(bytes, 0, length);
return decode(bytes);
}
/** Write a UTF8 encoded string to out /** Write a UTF8 encoded string to out
*/ */
public static int writeString(DataOutput out, String s) throws IOException { public static int writeString(DataOutput out, String s) throws IOException {
@ -432,6 +443,22 @@ public class Text extends BinaryComparable
return length; return length;
} }
/** Write a UTF8 encoded string with a maximum size to out
*/
public static int writeString(DataOutput out, String s, int maxLength)
throws IOException {
ByteBuffer bytes = encode(s);
int length = bytes.limit();
if (length >= maxLength) {
throw new IOException("string was too long to write! Expected " +
"less than " + maxLength + " bytes, but got " +
length + " bytes.");
}
WritableUtils.writeVInt(out, length);
out.write(bytes.array(), 0, length);
return length;
}
////// states for validateUTF8 ////// states for validateUTF8
private static final int LEAD_BYTE = 0; private static final int LEAD_BYTE = 0;

View File

@ -53,6 +53,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto; import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto; import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto; import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
@ -845,24 +847,24 @@ public class Client {
touch(); touch();
try { try {
int id = in.readInt(); // try to read an id RpcResponseHeaderProto response =
RpcResponseHeaderProto.parseDelimitedFrom(in);
int callId = response.getCallId();
if (LOG.isDebugEnabled()) if (LOG.isDebugEnabled())
LOG.debug(getName() + " got value #" + id); LOG.debug(getName() + " got value #" + callId);
Call call = calls.get(id); Call call = calls.get(callId);
RpcStatusProto status = response.getStatus();
int state = in.readInt(); // read call status if (status == RpcStatusProto.SUCCESS) {
if (state == Status.SUCCESS.state) {
Writable value = ReflectionUtils.newInstance(valueClass, conf); Writable value = ReflectionUtils.newInstance(valueClass, conf);
value.readFields(in); // read value value.readFields(in); // read value
call.setRpcResponse(value); call.setRpcResponse(value);
calls.remove(id); calls.remove(callId);
} else if (state == Status.ERROR.state) { } else if (status == RpcStatusProto.ERROR) {
call.setException(new RemoteException(WritableUtils.readString(in), call.setException(new RemoteException(WritableUtils.readString(in),
WritableUtils.readString(in))); WritableUtils.readString(in)));
calls.remove(id); calls.remove(callId);
} else if (state == Status.FATAL.state) { } else if (status == RpcStatusProto.FATAL) {
// Close the connection // Close the connection
markClosed(new RemoteException(WritableUtils.readString(in), markClosed(new RemoteException(WritableUtils.readString(in),
WritableUtils.readString(in))); WritableUtils.readString(in)));

View File

@ -217,7 +217,7 @@ public abstract class Server {
public static final Log AUDITLOG = public static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+Server.class.getName()); LogFactory.getLog("SecurityLogger."+Server.class.getName());
private static final String AUTH_FAILED_FOR = "Auth failed for "; private static final String AUTH_FAILED_FOR = "Auth failed for ";
private static final String AUTH_SUCCESSFULL_FOR = "Auth successfull for "; private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
private static final ThreadLocal<Server> SERVER = new ThreadLocal<Server>(); private static final ThreadLocal<Server> SERVER = new ThreadLocal<Server>();
@ -1234,7 +1234,7 @@ public abstract class Server {
LOG.debug("SASL server successfully authenticated client: " + user); LOG.debug("SASL server successfully authenticated client: " + user);
} }
rpcMetrics.incrAuthenticationSuccesses(); rpcMetrics.incrAuthenticationSuccesses();
AUDITLOG.info(AUTH_SUCCESSFULL_FOR + user); AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user);
saslContextEstablished = true; saslContextEstablished = true;
} }
} else { } else {
@ -1339,7 +1339,7 @@ public abstract class Server {
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
+ ") is configured as simple. Please configure another method " + ") is configured as simple. Please configure another method "
+ "like kerberos or digest."); + "like kerberos or digest.");
setupResponse(authFailedResponse, authFailedCall, Status.FATAL, setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
null, ae.getClass().getName(), ae.getMessage()); null, ae.getClass().getName(), ae.getMessage());
responder.doRespond(authFailedCall); responder.doRespond(authFailedCall);
throw ae; throw ae;
@ -1420,7 +1420,7 @@ public abstract class Server {
Call fakeCall = new Call(-1, null, this); Call fakeCall = new Call(-1, null, this);
// Versions 3 and greater can interpret this exception // Versions 3 and greater can interpret this exception
// response in the same manner // response in the same manner
setupResponse(buffer, fakeCall, Status.FATAL, setupResponseOldVersionFatal(buffer, fakeCall,
null, VersionMismatch.class.getName(), errMsg); null, VersionMismatch.class.getName(), errMsg);
responder.doRespond(fakeCall); responder.doRespond(fakeCall);
@ -1443,7 +1443,7 @@ public abstract class Server {
ByteArrayOutputStream buffer = new ByteArrayOutputStream(); ByteArrayOutputStream buffer = new ByteArrayOutputStream();
Call fakeCall = new Call(-1, null, this); Call fakeCall = new Call(-1, null, this);
setupResponse(buffer, fakeCall, Status.FATAL, null, setupResponse(buffer, fakeCall, RpcStatusProto.FATAL, null,
IpcException.class.getName(), errMsg); IpcException.class.getName(), errMsg);
responder.doRespond(fakeCall); responder.doRespond(fakeCall);
} }
@ -1579,7 +1579,7 @@ public abstract class Server {
new Call(header.getCallId(), null, this); new Call(header.getCallId(), null, this);
ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null, setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
IOException.class.getName(), IOException.class.getName(),
"Unknown rpc kind " + header.getRpcKind()); "Unknown rpc kind " + header.getRpcKind());
responder.doRespond(readParamsFailedCall); responder.doRespond(readParamsFailedCall);
@ -1597,7 +1597,7 @@ public abstract class Server {
new Call(header.getCallId(), null, this); new Call(header.getCallId(), null, this);
ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null, setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
t.getClass().getName(), t.getClass().getName(),
"IPC server unable to read call parameters: " + t.getMessage()); "IPC server unable to read call parameters: " + t.getMessage());
responder.doRespond(readParamsFailedCall); responder.doRespond(readParamsFailedCall);
@ -1627,7 +1627,7 @@ public abstract class Server {
rpcMetrics.incrAuthorizationSuccesses(); rpcMetrics.incrAuthorizationSuccesses();
} catch (AuthorizationException ae) { } catch (AuthorizationException ae) {
rpcMetrics.incrAuthorizationFailures(); rpcMetrics.incrAuthorizationFailures();
setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null, setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL, null,
ae.getClass().getName(), ae.getMessage()); ae.getClass().getName(), ae.getMessage());
responder.doRespond(authFailedCall); responder.doRespond(authFailedCall);
return false; return false;
@ -1725,8 +1725,8 @@ public abstract class Server {
// responder.doResponse() since setupResponse may use // responder.doResponse() since setupResponse may use
// SASL to encrypt response data and SASL enforces // SASL to encrypt response data and SASL enforces
// its own message ordering. // its own message ordering.
setupResponse(buf, call, (error == null) ? Status.SUCCESS setupResponse(buf, call, (error == null) ? RpcStatusProto.SUCCESS
: Status.ERROR, value, errorClass, error); : RpcStatusProto.ERROR, value, errorClass, error);
// Discard the large buf and reset it back to smaller size // Discard the large buf and reset it back to smaller size
// to free up heap // to free up heap
@ -1859,40 +1859,79 @@ public abstract class Server {
/** /**
* Setup response for the IPC Call. * Setup response for the IPC Call.
* *
* @param response buffer to serialize the response into * @param responseBuf buffer to serialize the response into
* @param call {@link Call} to which we are setting up the response * @param call {@link Call} to which we are setting up the response
* @param status {@link Status} of the IPC call * @param status of the IPC call
* @param rv return value for the IPC Call, if the call was successful * @param rv return value for the IPC Call, if the call was successful
* @param errorClass error class, if the the call failed * @param errorClass error class, if the the call failed
* @param error error message, if the call failed * @param error error message, if the call failed
* @throws IOException * @throws IOException
*/ */
private void setupResponse(ByteArrayOutputStream response, private void setupResponse(ByteArrayOutputStream responseBuf,
Call call, Status status, Call call, RpcStatusProto status,
Writable rv, String errorClass, String error) Writable rv, String errorClass, String error)
throws IOException { throws IOException {
response.reset(); responseBuf.reset();
DataOutputStream out = new DataOutputStream(response); DataOutputStream out = new DataOutputStream(responseBuf);
out.writeInt(call.callId); // write call id RpcResponseHeaderProto.Builder response =
out.writeInt(status.state); // write status RpcResponseHeaderProto.newBuilder();
response.setCallId(call.callId);
response.setStatus(status);
if (status == Status.SUCCESS) {
if (status == RpcStatusProto.SUCCESS) {
try { try {
response.build().writeDelimitedTo(out);
rv.write(out); rv.write(out);
} catch (Throwable t) { } catch (Throwable t) {
LOG.warn("Error serializing call response for call " + call, t); LOG.warn("Error serializing call response for call " + call, t);
// Call back to same function - this is OK since the // Call back to same function - this is OK since the
// buffer is reset at the top, and since status is changed // buffer is reset at the top, and since status is changed
// to ERROR it won't infinite loop. // to ERROR it won't infinite loop.
setupResponse(response, call, Status.ERROR, setupResponse(responseBuf, call, RpcStatusProto.ERROR,
null, t.getClass().getName(), null, t.getClass().getName(),
StringUtils.stringifyException(t)); StringUtils.stringifyException(t));
return; return;
} }
} else { } else {
if (status == RpcStatusProto.FATAL) {
response.setServerIpcVersionNum(Server.CURRENT_VERSION);
}
response.build().writeDelimitedTo(out);
WritableUtils.writeString(out, errorClass); WritableUtils.writeString(out, errorClass);
WritableUtils.writeString(out, error); WritableUtils.writeString(out, error);
} }
if (call.connection.useWrap) {
wrapWithSasl(responseBuf, call);
}
call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray()));
}
/**
* Setup response for the IPC Call on Fatal Error from a
* client that is using old version of Hadoop.
* The response is serialized using the previous protocol's response
* layout.
*
* @param response buffer to serialize the response into
* @param call {@link Call} to which we are setting up the response
* @param rv return value for the IPC Call, if the call was successful
* @param errorClass error class, if the the call failed
* @param error error message, if the call failed
* @throws IOException
*/
private void setupResponseOldVersionFatal(ByteArrayOutputStream response,
Call call,
Writable rv, String errorClass, String error)
throws IOException {
final int OLD_VERSION_FATAL_STATUS = -1;
response.reset();
DataOutputStream out = new DataOutputStream(response);
out.writeInt(call.callId); // write call id
out.writeInt(OLD_VERSION_FATAL_STATUS); // write FATAL_STATUS
WritableUtils.writeString(out, errorClass);
WritableUtils.writeString(out, error);
if (call.connection.useWrap) { if (call.connection.useWrap) {
wrapWithSasl(response, call); wrapWithSasl(response, call);
} }

View File

@ -1,32 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
/**
* Status of a Hadoop IPC call.
*/
enum Status {
SUCCESS (0),
ERROR (1),
FATAL (-1);
int state;
private Status(int state) {
this.state = state;
}
}

View File

@ -148,9 +148,8 @@ public class JMXJsonServlet extends HttpServlet {
@Override @Override
public void doGet(HttpServletRequest request, HttpServletResponse response) { public void doGet(HttpServletRequest request, HttpServletResponse response) {
try { try {
// Do the authorization if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
if (!HttpServer.hasAdministratorAccess(getServletContext(), request, request, response)) {
response)) {
return; return;
} }
JsonGenerator jg = null; JsonGenerator jg = null;

View File

@ -106,9 +106,8 @@ public class MetricsServlet extends HttpServlet {
public void doGet(HttpServletRequest request, HttpServletResponse response) public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException { throws ServletException, IOException {
// Do the authorization if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
if (!HttpServer.hasAdministratorAccess(getServletContext(), request, request, response)) {
response)) {
return; return;
} }

View File

@ -140,7 +140,7 @@ public class NetUtils {
/** /**
* Util method to build socket addr from either: * Util method to build socket addr from either:
* <host>:<post> * <host>:<port>
* <fs>://<host>:<port>/<path> * <fs>://<host>:<port>/<path>
*/ */
public static InetSocketAddress createSocketAddr(String target) { public static InetSocketAddress createSocketAddr(String target) {
@ -150,7 +150,7 @@ public class NetUtils {
/** /**
* Util method to build socket addr from either: * Util method to build socket addr from either:
* <host> * <host>
* <host>:<post> * <host>:<port>
* <fs>://<host>:<port>/<path> * <fs>://<host>:<port>/<path>
*/ */
public static InetSocketAddress createSocketAddr(String target, public static InetSocketAddress createSocketAddr(String target,
@ -375,53 +375,44 @@ public class NetUtils {
} }
/** /**
* Same as getInputStream(socket, socket.getSoTimeout()).<br><br> * Same as <code>getInputStream(socket, socket.getSoTimeout()).</code>
* <br><br>
* *
* From documentation for {@link #getInputStream(Socket, long)}:<br>
* Returns InputStream for the socket. If the socket has an associated
* SocketChannel then it returns a
* {@link SocketInputStream} with the given timeout. If the socket does not
* have a channel, {@link Socket#getInputStream()} is returned. In the later
* case, the timeout argument is ignored and the timeout set with
* {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
*
* Any socket created using socket factories returned by {@link NetUtils},
* must use this interface instead of {@link Socket#getInputStream()}.
*
* @see #getInputStream(Socket, long) * @see #getInputStream(Socket, long)
*
* @param socket
* @return InputStream for reading from the socket.
* @throws IOException
*/ */
public static InputStream getInputStream(Socket socket) public static SocketInputWrapper getInputStream(Socket socket)
throws IOException { throws IOException {
return getInputStream(socket, socket.getSoTimeout()); return getInputStream(socket, socket.getSoTimeout());
} }
/** /**
* Returns InputStream for the socket. If the socket has an associated * Return a {@link SocketInputWrapper} for the socket and set the given
* SocketChannel then it returns a * timeout. If the socket does not have an associated channel, then its socket
* {@link SocketInputStream} with the given timeout. If the socket does not * timeout will be set to the specified value. Otherwise, a
* have a channel, {@link Socket#getInputStream()} is returned. In the later * {@link SocketInputStream} will be created which reads with the configured
* case, the timeout argument is ignored and the timeout set with * timeout.
* {@link Socket#setSoTimeout(int)} applies for reads.<br><br>
* *
* Any socket created using socket factories returned by {@link NetUtils}, * Any socket created using socket factories returned by {@link #NetUtils},
* must use this interface instead of {@link Socket#getInputStream()}. * must use this interface instead of {@link Socket#getInputStream()}.
* *
* In general, this should be called only once on each socket: see the note
* in {@link SocketInputWrapper#setTimeout(long)} for more information.
*
* @see Socket#getChannel() * @see Socket#getChannel()
* *
* @param socket * @param socket
* @param timeout timeout in milliseconds. This may not always apply. zero * @param timeout timeout in milliseconds. zero for waiting as
* for waiting as long as necessary. * long as necessary.
* @return InputStream for reading from the socket. * @return SocketInputWrapper for reading from the socket.
* @throws IOException * @throws IOException
*/ */
public static InputStream getInputStream(Socket socket, long timeout) public static SocketInputWrapper getInputStream(Socket socket, long timeout)
throws IOException { throws IOException {
return (socket.getChannel() == null) ? InputStream stm = (socket.getChannel() == null) ?
socket.getInputStream() : new SocketInputStream(socket, timeout); socket.getInputStream() : new SocketInputStream(socket);
SocketInputWrapper w = new SocketInputWrapper(socket, stm);
w.setTimeout(timeout);
return w;
} }
/** /**
@ -503,7 +494,7 @@ public class NetUtils {
* also takes a local address and port to bind the socket to. * also takes a local address and port to bind the socket to.
* *
* @param socket * @param socket
* @param address the remote address * @param endpoint the remote address
* @param localAddr the local address to bind the socket to * @param localAddr the local address to bind the socket to
* @param timeout timeout in milliseconds * @param timeout timeout in milliseconds
*/ */
@ -558,16 +549,11 @@ public class NetUtils {
* @return its IP address in the string format * @return its IP address in the string format
*/ */
public static String normalizeHostName(String name) { public static String normalizeHostName(String name) {
if (Character.digit(name.charAt(0), 10) != -1) { // it is an IP try {
return InetAddress.getByName(name).getHostAddress();
} catch (UnknownHostException e) {
return name; return name;
} else { }
try {
InetAddress ipAddress = InetAddress.getByName(name);
return ipAddress.getHostAddress();
} catch (UnknownHostException e) {
return name;
}
}
} }
/** /**

View File

@ -247,6 +247,10 @@ abstract class SocketIOWithTimeout {
ops)); ops));
} }
} }
public void setTimeout(long timeoutMs) {
this.timeout = timeoutMs;
}
private static String timeoutExceptionString(SelectableChannel channel, private static String timeoutExceptionString(SelectableChannel channel,
long timeout, int ops) { long timeout, int ops) {

View File

@ -28,9 +28,6 @@ import java.nio.channels.ReadableByteChannel;
import java.nio.channels.SelectableChannel; import java.nio.channels.SelectableChannel;
import java.nio.channels.SelectionKey; import java.nio.channels.SelectionKey;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** /**
* This implements an input stream that can have a timeout while reading. * This implements an input stream that can have a timeout while reading.
* This sets non-blocking flag on the socket channel. * This sets non-blocking flag on the socket channel.
@ -40,9 +37,7 @@ import org.apache.hadoop.classification.InterfaceStability;
* IllegalBlockingModeException. * IllegalBlockingModeException.
* Please use {@link SocketOutputStream} for writing. * Please use {@link SocketOutputStream} for writing.
*/ */
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) class SocketInputStream extends InputStream
@InterfaceStability.Unstable
public class SocketInputStream extends InputStream
implements ReadableByteChannel { implements ReadableByteChannel {
private Reader reader; private Reader reader;
@ -171,4 +166,8 @@ public class SocketInputStream extends InputStream
public void waitForReadable() throws IOException { public void waitForReadable() throws IOException {
reader.waitForIO(SelectionKey.OP_READ); reader.waitForIO(SelectionKey.OP_READ);
} }
public void setTimeout(long timeoutMs) {
reader.setTimeout(timeoutMs);
}
} }

View File

@ -0,0 +1,88 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.FilterInputStream;
import java.io.InputStream;
import java.net.Socket;
import java.net.SocketException;
import java.nio.channels.ReadableByteChannel;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
/**
* A wrapper stream around a socket which allows setting of its timeout. If the
* socket has a channel, this uses non-blocking IO via the package-private
* {@link SocketInputStream} implementation. Otherwise, timeouts are managed by
* setting the underlying socket timeout itself.
*/
@InterfaceAudience.LimitedPrivate("HDFS")
@InterfaceStability.Unstable
public class SocketInputWrapper extends FilterInputStream {
private final Socket socket;
private final boolean hasChannel;
SocketInputWrapper(Socket s, InputStream is) {
super(is);
this.socket = s;
this.hasChannel = s.getChannel() != null;
if (hasChannel) {
Preconditions.checkArgument(is instanceof SocketInputStream,
"Expected a SocketInputStream when there is a channel. " +
"Got: %s", is);
}
}
/**
* Set the timeout for reads from this stream.
*
* Note: the behavior here can differ subtly depending on whether the
* underlying socket has an associated Channel. In particular, if there is no
* channel, then this call will affect the socket timeout for <em>all</em>
* readers of this socket. If there is a channel, then this call will affect
* the timeout only for <em>this</em> stream. As such, it is recommended to
* only create one {@link SocketInputWrapper} instance per socket.
*
* @param timeoutMs
* the new timeout, 0 for no timeout
* @throws SocketException
* if the timeout cannot be set
*/
public void setTimeout(long timeoutMs) throws SocketException {
if (hasChannel) {
((SocketInputStream)in).setTimeout(timeoutMs);
} else {
socket.setSoTimeout((int)timeoutMs);
}
}
/**
* @return an underlying ReadableByteChannel implementation.
* @throws IllegalStateException if this socket does not have a channel
*/
public ReadableByteChannel getReadableByteChannel() {
Preconditions.checkState(hasChannel,
"Socket %s does not have a channel",
this.socket);
return (SocketInputStream)in;
}
}

View File

@ -1,232 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.security.Principal;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLServerSocket;
import javax.net.ssl.SSLServerSocketFactory;
import javax.net.ssl.SSLSocket;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.mortbay.io.EndPoint;
import org.mortbay.jetty.HttpSchemes;
import org.mortbay.jetty.Request;
import org.mortbay.jetty.security.ServletSSL;
import org.mortbay.jetty.security.SslSocketConnector;
/**
* Extend Jetty's {@link SslSocketConnector} to optionally also provide
* Kerberos5ized SSL sockets. The only change in behavior from superclass
* is that we no longer honor requests to turn off NeedAuthentication when
* running with Kerberos support.
*/
public class Krb5AndCertsSslSocketConnector extends SslSocketConnector {
public static final List<String> KRB5_CIPHER_SUITES =
Collections.unmodifiableList(Collections.singletonList(
"TLS_KRB5_WITH_3DES_EDE_CBC_SHA"));
static {
SecurityUtil.initKrb5CipherSuites();
}
private static final Log LOG = LogFactory
.getLog(Krb5AndCertsSslSocketConnector.class);
private static final String REMOTE_PRINCIPAL = "remote_principal";
public enum MODE {KRB, CERTS, BOTH} // Support Kerberos, certificates or both?
private final boolean useKrb;
private final boolean useCerts;
public Krb5AndCertsSslSocketConnector() {
super();
useKrb = true;
useCerts = false;
setPasswords();
}
public Krb5AndCertsSslSocketConnector(MODE mode) {
super();
useKrb = mode == MODE.KRB || mode == MODE.BOTH;
useCerts = mode == MODE.CERTS || mode == MODE.BOTH;
setPasswords();
logIfDebug("useKerb = " + useKrb + ", useCerts = " + useCerts);
}
// If not using Certs, set passwords to random gibberish or else
// Jetty will actually prompt the user for some.
private void setPasswords() {
if(!useCerts) {
Random r = new Random();
System.setProperty("jetty.ssl.password", String.valueOf(r.nextLong()));
System.setProperty("jetty.ssl.keypassword", String.valueOf(r.nextLong()));
}
}
@Override
protected SSLServerSocketFactory createFactory() throws Exception {
if(useCerts)
return super.createFactory();
SSLContext context = super.getProvider()==null
? SSLContext.getInstance(super.getProtocol())
:SSLContext.getInstance(super.getProtocol(), super.getProvider());
context.init(null, null, null);
return context.getServerSocketFactory();
}
/* (non-Javadoc)
* @see org.mortbay.jetty.security.SslSocketConnector#newServerSocket(java.lang.String, int, int)
*/
@Override
protected ServerSocket newServerSocket(String host, int port, int backlog)
throws IOException {
logIfDebug("Creating new KrbServerSocket for: " + host);
SSLServerSocket ss = null;
if(useCerts) // Get the server socket from the SSL super impl
ss = (SSLServerSocket)super.newServerSocket(host, port, backlog);
else { // Create a default server socket
try {
ss = (SSLServerSocket)(host == null
? createFactory().createServerSocket(port, backlog) :
createFactory().createServerSocket(port, backlog, InetAddress.getByName(host)));
} catch (Exception e)
{
LOG.warn("Could not create KRB5 Listener", e);
throw new IOException("Could not create KRB5 Listener: " + e.toString());
}
}
// Add Kerberos ciphers to this socket server if needed.
if(useKrb) {
ss.setNeedClientAuth(true);
String [] combined;
if(useCerts) { // combine the cipher suites
String[] certs = ss.getEnabledCipherSuites();
combined = new String[certs.length + KRB5_CIPHER_SUITES.size()];
System.arraycopy(certs, 0, combined, 0, certs.length);
System.arraycopy(KRB5_CIPHER_SUITES.toArray(new String[0]), 0, combined,
certs.length, KRB5_CIPHER_SUITES.size());
} else { // Just enable Kerberos auth
combined = KRB5_CIPHER_SUITES.toArray(new String[0]);
}
ss.setEnabledCipherSuites(combined);
}
return ss;
};
@Override
public void customize(EndPoint endpoint, Request request) throws IOException {
if(useKrb) { // Add Kerberos-specific info
SSLSocket sslSocket = (SSLSocket)endpoint.getTransport();
Principal remotePrincipal = sslSocket.getSession().getPeerPrincipal();
logIfDebug("Remote principal = " + remotePrincipal);
request.setScheme(HttpSchemes.HTTPS);
request.setAttribute(REMOTE_PRINCIPAL, remotePrincipal);
if(!useCerts) { // Add extra info that would have been added by super
String cipherSuite = sslSocket.getSession().getCipherSuite();
Integer keySize = Integer.valueOf(ServletSSL.deduceKeyLength(cipherSuite));;
request.setAttribute("javax.servlet.request.cipher_suite", cipherSuite);
request.setAttribute("javax.servlet.request.key_size", keySize);
}
}
if(useCerts) super.customize(endpoint, request);
}
private void logIfDebug(String s) {
if(LOG.isDebugEnabled())
LOG.debug(s);
}
/**
* Filter that takes the Kerberos principal identified in the
* {@link Krb5AndCertsSslSocketConnector} and provides it the to the servlet
* at runtime, setting the principal and short name.
*/
public static class Krb5SslFilter implements Filter {
@Override
public void doFilter(ServletRequest req, ServletResponse resp,
FilterChain chain) throws IOException, ServletException {
final Principal princ =
(Principal)req.getAttribute(Krb5AndCertsSslSocketConnector.REMOTE_PRINCIPAL);
if(princ == null || !(princ instanceof KerberosPrincipal)) {
// Should never actually get here, since should be rejected at socket
// level.
LOG.warn("User not authenticated via kerberos from " + req.getRemoteAddr());
((HttpServletResponse)resp).sendError(HttpServletResponse.SC_FORBIDDEN,
"User not authenticated via Kerberos");
return;
}
// Provide principal information for servlet at runtime
ServletRequest wrapper =
new HttpServletRequestWrapper((HttpServletRequest) req) {
@Override
public Principal getUserPrincipal() {
return princ;
}
/*
* Return the full name of this remote user.
* @see javax.servlet.http.HttpServletRequestWrapper#getRemoteUser()
*/
@Override
public String getRemoteUser() {
return princ.getName();
}
};
chain.doFilter(wrapper, resp);
}
@Override
public void init(FilterConfig arg0) throws ServletException {
/* Nothing to do here */
}
@Override
public void destroy() { /* Nothing to do here */ }
}
}

View File

@ -17,14 +17,11 @@
package org.apache.hadoop.security; package org.apache.hadoop.security;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URI; import java.net.URI;
import java.net.URL; import java.net.URL;
import java.net.URLConnection;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.security.AccessController; import java.security.AccessController;
import java.security.PrivilegedAction; import java.security.PrivilegedAction;
@ -45,6 +42,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenInfo;
@ -134,79 +133,6 @@ public class SecurityUtil {
return isTGSPrincipal(ticket.getServer()); return isTGSPrincipal(ticket.getServer());
} }
/**
* Explicitly pull the service ticket for the specified host. This solves a
* problem with Java's Kerberos SSL problem where the client cannot
* authenticate against a cross-realm service. It is necessary for clients
* making kerberized https requests to call this method on the target URL
* to ensure that in a cross-realm environment the remote host will be
* successfully authenticated.
*
* This method is internal to Hadoop and should not be used by other
* applications. This method should not be considered stable or open:
* it will be removed when the Java behavior is changed.
*
* @param remoteHost Target URL the krb-https client will access
* @throws IOException if the service ticket cannot be retrieved
*/
public static void fetchServiceTicket(URL remoteHost) throws IOException {
if(!UserGroupInformation.isSecurityEnabled())
return;
String serviceName = "host/" + remoteHost.getHost();
if (LOG.isDebugEnabled())
LOG.debug("Fetching service ticket for host at: " + serviceName);
Object serviceCred = null;
Method credsToTicketMeth;
Class<?> krb5utilClass;
try {
Class<?> principalClass;
Class<?> credentialsClass;
if (System.getProperty("java.vendor").contains("IBM")) {
principalClass = Class.forName("com.ibm.security.krb5.PrincipalName");
credentialsClass = Class.forName("com.ibm.security.krb5.Credentials");
krb5utilClass = Class.forName("com.ibm.security.jgss.mech.krb5");
} else {
principalClass = Class.forName("sun.security.krb5.PrincipalName");
credentialsClass = Class.forName("sun.security.krb5.Credentials");
krb5utilClass = Class.forName("sun.security.jgss.krb5.Krb5Util");
}
@SuppressWarnings("rawtypes")
Constructor principalConstructor = principalClass.getConstructor(String.class,
int.class);
Field KRB_NT_SRV_HST = principalClass.getDeclaredField("KRB_NT_SRV_HST");
Method acquireServiceCredsMeth =
credentialsClass.getDeclaredMethod("acquireServiceCreds",
String.class, credentialsClass);
Method ticketToCredsMeth = krb5utilClass.getDeclaredMethod("ticketToCreds",
KerberosTicket.class);
credsToTicketMeth = krb5utilClass.getDeclaredMethod("credsToTicket",
credentialsClass);
Object principal = principalConstructor.newInstance(serviceName,
KRB_NT_SRV_HST.get(principalClass));
serviceCred = acquireServiceCredsMeth.invoke(credentialsClass,
principal.toString(),
ticketToCredsMeth.invoke(krb5utilClass, getTgtFromSubject()));
} catch (Exception e) {
throw new IOException("Can't get service ticket for: "
+ serviceName, e);
}
if (serviceCred == null) {
throw new IOException("Can't get service ticket for " + serviceName);
}
try {
Subject.getSubject(AccessController.getContext()).getPrivateCredentials()
.add(credsToTicketMeth.invoke(krb5utilClass, serviceCred));
} catch (Exception e) {
throw new IOException("Can't get service ticket for: "
+ serviceName, e);
}
}
/** /**
* Convert Kerberos principal name pattern to valid Kerberos principal * Convert Kerberos principal name pattern to valid Kerberos principal
* names. It replaces hostname pattern with hostname, which should be * names. It replaces hostname pattern with hostname, which should be
@ -513,6 +439,30 @@ public class SecurityUtil {
} }
} }
/**
* Open a (if need be) secure connection to a URL in a secure environment
* that is using SPNEGO to authenticate its URLs. All Namenode and Secondary
* Namenode URLs that are protected via SPNEGO should be accessed via this
* method.
*
* @param url to authenticate via SPNEGO.
* @return A connection that has been authenticated via SPNEGO
* @throws IOException If unable to authenticate via SPNEGO
*/
public static URLConnection openSecureHttpConnection(URL url) throws IOException {
if(!UserGroupInformation.isSecurityEnabled()) {
return url.openConnection();
}
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
try {
return new AuthenticatedURL().openConnection(url, token);
} catch (AuthenticationException e) {
throw new IOException("Exception trying to open authenticated connection to "
+ url, e);
}
}
/** /**
* Resolves a host subject to the security requirements determined by * Resolves a host subject to the security requirements determined by
* hadoop.security.token.service.use_ip. * hadoop.security.token.service.use_ip.
@ -664,10 +614,4 @@ public class SecurityUtil {
} }
} }
public static void initKrb5CipherSuites() {
if (UserGroupInformation.isSecurityEnabled()) {
System.setProperty("https.cipherSuites",
Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
}
}
} }

View File

@ -59,7 +59,7 @@ public class ServiceAuthorizationManager {
public static final Log AUDITLOG = public static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+ServiceAuthorizationManager.class.getName()); LogFactory.getLog("SecurityLogger."+ServiceAuthorizationManager.class.getName());
private static final String AUTHZ_SUCCESSFULL_FOR = "Authorization successfull for "; private static final String AUTHZ_SUCCESSFUL_FOR = "Authorization successful for ";
private static final String AUTHZ_FAILED_FOR = "Authorization failed for "; private static final String AUTHZ_FAILED_FOR = "Authorization failed for ";
@ -108,7 +108,7 @@ public class ServiceAuthorizationManager {
" is not authorized for protocol " + protocol + " is not authorized for protocol " + protocol +
", expected client Kerberos principal is " + clientPrincipal); ", expected client Kerberos principal is " + clientPrincipal);
} }
AUDITLOG.info(AUTHZ_SUCCESSFULL_FOR + user + " for protocol="+protocol); AUDITLOG.info(AUTHZ_SUCCESSFUL_FOR + user + " for protocol="+protocol);
} }
public synchronized void refresh(Configuration conf, public synchronized void refresh(Configuration conf,

View File

@ -18,10 +18,15 @@
package org.apache.hadoop.security.token; package org.apache.hadoop.security.token;
import com.google.common.collect.Maps;
import java.io.ByteArrayInputStream;
import java.io.DataInput; import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Map;
import java.util.ServiceLoader; import java.util.ServiceLoader;
import org.apache.commons.codec.binary.Base64; import org.apache.commons.codec.binary.Base64;
@ -37,6 +42,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.ReflectionUtils;
/** /**
* The client-side form of the token. * The client-side form of the token.
@ -45,6 +51,9 @@ import org.apache.hadoop.io.WritableUtils;
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class Token<T extends TokenIdentifier> implements Writable { public class Token<T extends TokenIdentifier> implements Writable {
public static final Log LOG = LogFactory.getLog(Token.class); public static final Log LOG = LogFactory.getLog(Token.class);
private static Map<Text, Class<? extends TokenIdentifier>> tokenKindMap;
private byte[] identifier; private byte[] identifier;
private byte[] password; private byte[] password;
private Text kind; private Text kind;
@ -100,13 +109,49 @@ public class Token<T extends TokenIdentifier> implements Writable {
} }
/** /**
* Get the token identifier * Get the token identifier's byte representation
* @return the token identifier * @return the token identifier's byte representation
*/ */
public byte[] getIdentifier() { public byte[] getIdentifier() {
return identifier; return identifier;
} }
private static synchronized Class<? extends TokenIdentifier>
getClassForIdentifier(Text kind) {
if (tokenKindMap == null) {
tokenKindMap = Maps.newHashMap();
for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
tokenKindMap.put(id.getKind(), id.getClass());
}
}
Class<? extends TokenIdentifier> cls = tokenKindMap.get(kind);
if (cls == null) {
LOG.warn("Cannot find class for token kind " + kind);
return null;
}
return cls;
}
/**
* Get the token identifier object, or null if it could not be constructed
* (because the class could not be loaded, for example).
* @return the token identifier, or null
* @throws IOException
*/
@SuppressWarnings("unchecked")
public T decodeIdentifier() throws IOException {
Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
if (cls == null) {
return null;
}
TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
DataInputStream in = new DataInputStream(buf);
tokenIdentifier.readFields(in);
in.close();
return (T) tokenIdentifier;
}
/** /**
* Get the token password/secret * Get the token password/secret
* @return the token password/secret * @return the token password/secret
@ -260,16 +305,31 @@ public class Token<T extends TokenIdentifier> implements Writable {
buffer.append(num); buffer.append(num);
} }
} }
private void identifierToString(StringBuilder buffer) {
T id = null;
try {
id = decodeIdentifier();
} catch (IOException e) {
// handle in the finally block
} finally {
if (id != null) {
buffer.append("(").append(id).append(")");
} else {
addBinaryBuffer(buffer, identifier);
}
}
}
@Override @Override
public String toString() { public String toString() {
StringBuilder buffer = new StringBuilder(); StringBuilder buffer = new StringBuilder();
buffer.append("Ident: "); buffer.append("Kind: ");
addBinaryBuffer(buffer, identifier);
buffer.append(", Kind: ");
buffer.append(kind.toString()); buffer.append(kind.toString());
buffer.append(", Service: "); buffer.append(", Service: ");
buffer.append(service.toString()); buffer.append(service.toString());
buffer.append(", Ident: ");
identifierToString(buffer);
return buffer.toString(); return buffer.toString();
} }

View File

@ -22,11 +22,20 @@ import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.collect.ComparisonChain;
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class VersionUtil { public abstract class VersionUtil {
private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)"); private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)");
/**
* Suffix added by maven for nightly builds and other snapshot releases.
* These releases are considered to precede the non-SNAPSHOT version
* with the same version number.
*/
private static final String SNAPSHOT_SUFFIX = "-SNAPSHOT";
/** /**
* This function splits the two versions on &quot;.&quot; and performs a * This function splits the two versions on &quot;.&quot; and performs a
* naturally-ordered comparison of the resulting components. For example, the * naturally-ordered comparison of the resulting components. For example, the
@ -48,6 +57,11 @@ public abstract class VersionUtil {
* between the two versions, then the version with fewer components is * between the two versions, then the version with fewer components is
* considered to precede the version with more components. * considered to precede the version with more components.
* *
* In addition to the above rules, there is one special case: maven SNAPSHOT
* releases are considered to precede a non-SNAPSHOT release with an
* otherwise identical version number. For example, 2.0-SNAPSHOT precedes
* 2.0.
*
* This function returns a negative integer if version1 precedes version2, a * This function returns a negative integer if version1 precedes version2, a
* positive integer if version2 precedes version1, and 0 if and only if the * positive integer if version2 precedes version1, and 0 if and only if the
* two versions' components are identical in value and cardinality. * two versions' components are identical in value and cardinality.
@ -61,6 +75,11 @@ public abstract class VersionUtil {
* versions are equal. * versions are equal.
*/ */
public static int compareVersions(String version1, String version2) { public static int compareVersions(String version1, String version2) {
boolean isSnapshot1 = version1.endsWith(SNAPSHOT_SUFFIX);
boolean isSnapshot2 = version2.endsWith(SNAPSHOT_SUFFIX);
version1 = stripSnapshotSuffix(version1);
version2 = stripSnapshotSuffix(version2);
String[] version1Parts = version1.split("\\."); String[] version1Parts = version1.split("\\.");
String[] version2Parts = version2.split("\\."); String[] version2Parts = version2.split("\\.");
@ -87,9 +106,21 @@ public abstract class VersionUtil {
return component1.length() - component2.length(); return component1.length() - component2.length();
} }
} }
return version1Parts.length - version2Parts.length;
return ComparisonChain.start()
.compare(version1Parts.length, version2Parts.length)
.compare(isSnapshot2, isSnapshot1)
.result();
} }
private static String stripSnapshotSuffix(String version) {
if (version.endsWith(SNAPSHOT_SUFFIX)) {
return version.substring(0, version.length() - SNAPSHOT_SUFFIX.length());
} else {
return version;
}
}
private static boolean isNumeric(String s) { private static boolean isNumeric(String s) {
try { try {
Integer.parseInt(s); Integer.parseInt(s);

View File

@ -1,4 +1,4 @@
# AC_COMPUTE_NEEDED_DSO(LIBRARY, PREPROC_SYMBOL) # AC_COMPUTE_NEEDED_DSO(LIBRARY, TEST_PROGRAM, PREPROC_SYMBOL)
# -------------------------------------------------- # --------------------------------------------------
# Compute the 'actual' dynamic-library used # Compute the 'actual' dynamic-library used
# for LIBRARY and set it to PREPROC_SYMBOL # for LIBRARY and set it to PREPROC_SYMBOL
@ -6,7 +6,7 @@ AC_DEFUN([AC_COMPUTE_NEEDED_DSO],
[ [
AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1, AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1,
[ [
echo 'int main(int argc, char **argv){return 0;}' > conftest.c echo '$2' > conftest.c
if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then
dnl Try objdump and ldd in that order to get the dynamic library dnl Try objdump and ldd in that order to get the dynamic library
if test ! -z "`which objdump | grep -v 'no objdump'`"; then if test ! -z "`which objdump | grep -v 'no objdump'`"; then
@ -24,5 +24,5 @@ AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_lib
rm -f conftest* rm -f conftest*
] ]
) )
AC_DEFINE_UNQUOTED($2, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1']) AC_DEFINE_UNQUOTED($3, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
])# AC_COMPUTE_NEEDED_DSO ])# AC_COMPUTE_NEEDED_DSO

View File

@ -87,10 +87,20 @@ CPPFLAGS=$cppflags_bak
AC_SUBST([JNI_CPPFLAGS]) AC_SUBST([JNI_CPPFLAGS])
dnl Check for zlib headers dnl Check for zlib headers
AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.)) AC_CHECK_HEADERS([zlib.h zconf.h],
AC_COMPUTE_NEEDED_DSO(z,
[#include "zlib.h"
int main(int argc, char **argv){zlibVersion();return 0;}],
HADOOP_ZLIB_LIBRARY),
AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
dnl Check for snappy headers dnl Check for snappy headers
AC_CHECK_HEADERS([snappy-c.h], AC_COMPUTE_NEEDED_DSO(snappy,HADOOP_SNAPPY_LIBRARY), AC_MSG_WARN(Snappy headers were not found... building without snappy.)) AC_CHECK_HEADERS([snappy-c.h],
AC_COMPUTE_NEEDED_DSO(snappy,
[#include "snappy-c.h"
int main(int argc, char **argv){snappy_compress(0,0,0,0);return 0;}],
HADOOP_SNAPPY_LIBRARY),
AC_MSG_WARN(Snappy headers were not found... building without snappy.))
dnl Check for headers needed by the native Group resolution implementation dnl Check for headers needed by the native Group resolution implementation
AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.)) AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))

View File

@ -70,7 +70,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
// set the name of the group for subsequent calls to getnetgrent // set the name of the group for subsequent calls to getnetgrent
// note that we want to end group lokup regardless whether setnetgrent // note that we want to end group lokup regardless whether setnetgrent
// was successfull or not (as long as it was called we need to call // was successful or not (as long as it was called we need to call
// endnetgrent) // endnetgrent)
setnetgrentCalledFlag = 1; setnetgrentCalledFlag = 1;
if(setnetgrent(cgroup) == 1) { if(setnetgrent(cgroup) == 1) {

View File

@ -48,10 +48,10 @@ done
export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS" export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
# Command specific options appended to HADOOP_OPTS when specified # Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS" export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS" export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
# The ZKFC does not need a large heap, and keeping it small avoids # The ZKFC does not need a large heap, and keeping it small avoids
# any potential for long GC pauses # any potential for long GC pauses

View File

@ -128,13 +128,6 @@
</description> </description>
</property> </property>
<property>
<name>dfs.secondary.https.port</name>
<value>50490</value>
<description>The https port where secondary-namenode binds</description>
</property>
<property> <property>
<name>dfs.datanode.kerberos.principal</name> <name>dfs.datanode.kerberos.principal</name>
<value>dn/_HOST@${local.realm}</value> <value>dn/_HOST@${local.realm}</value>

View File

@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# #
#Security appender #Security appender
# #
hadoop.security.logger=INFO,console hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20 hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger} log4j.category.SecurityLogger=${hadoop.security.logger}
@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
# #
# hdfs audit logging # hdfs audit logging
# #
hdfs.audit.logger=INFO,console hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20 hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
# #
# mapred audit logging # mapred audit logging
# #
mapred.audit.logger=INFO,console mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20 mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}

View File

@ -19,7 +19,6 @@ option java_package = "org.apache.hadoop.ipc.protobuf";
option java_outer_classname = "RpcPayloadHeaderProtos"; option java_outer_classname = "RpcPayloadHeaderProtos";
option java_generate_equals_and_hash = true; option java_generate_equals_and_hash = true;
/** /**
* This is the rpc payload header. It is sent with every rpc call. * This is the rpc payload header. It is sent with every rpc call.
* *
@ -34,8 +33,6 @@ option java_generate_equals_and_hash = true;
* *
*/ */
/** /**
* RpcKind determine the rpcEngine and the serialization of the rpc payload * RpcKind determine the rpcEngine and the serialization of the rpc payload
*/ */
@ -54,5 +51,27 @@ enum RpcPayloadOperationProto {
message RpcPayloadHeaderProto { // the header for the RpcRequest message RpcPayloadHeaderProto { // the header for the RpcRequest
optional RpcKindProto rpcKind = 1; optional RpcKindProto rpcKind = 1;
optional RpcPayloadOperationProto rpcOp = 2; optional RpcPayloadOperationProto rpcOp = 2;
optional uint32 callId = 3; // each rpc has a callId that is also used in response required uint32 callId = 3; // each rpc has a callId that is also used in response
}
enum RpcStatusProto {
SUCCESS = 0; // RPC succeeded
ERROR = 1; // RPC Failed
FATAL = 2; // Fatal error - connection is closed
}
/**
* Rpc Response Header
* - If successfull then the Respose follows after this header
* - length (4 byte int), followed by the response
* - If error or fatal - the exception info follow
* - length (4 byte int) Class name of exception - UTF-8 string
* - length (4 byte int) Stacktrace - UTF-8 string
* - if the strings are null then the length is -1
* In case of Fatal error then the respose contains the Serverside's IPC version
*/
message RpcResponseHeaderProto {
required uint32 callId = 1; // callId used in Request
required RpcStatusProto status = 2;
optional uint32 serverIpcVersionNum = 3; // in case of an fatal IPC error
} }

View File

@ -62,6 +62,15 @@
<description>Is service-level authorization enabled?</description> <description>Is service-level authorization enabled?</description>
</property> </property>
<property>
<name>hadoop.security.instrumentation.requires.admin</name>
<value>false</value>
<description>
Indicates if administrator ACLs are required to access
instrumentation servlets (JMX, METRICS, CONF, STACKS).
</description>
</property>
<property> <property>
<name>hadoop.security.authentication</name> <name>hadoop.security.authentication</name>
<value>simple</value> <value>simple</value>

View File

@ -1,78 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.io.DataOutputBuffer;
public class TestBlockLocation extends TestCase {
// Verify fix of bug identified in HADOOP-6004
public void testDeserialization() throws IOException {
// Create a test BlockLocation
String[] names = {"one", "two" };
String[] hosts = {"three", "four" };
String[] topologyPaths = {"five", "six"};
long offset = 25l;
long length = 55l;
BlockLocation bl = new BlockLocation(names, hosts, topologyPaths,
offset, length);
DataOutputBuffer dob = new DataOutputBuffer();
// Serialize it
try {
bl.write(dob);
} catch (IOException e) {
fail("Unable to serialize data: " + e.getMessage());
}
byte[] bytes = dob.getData();
DataInput da = new DataInputStream(new ByteArrayInputStream(bytes));
// Try to re-create the BlockLocation the same way as is done during
// deserialization
BlockLocation bl2 = new BlockLocation();
try {
bl2.readFields(da);
} catch (IOException e) {
fail("Unable to deserialize BlockLocation: " + e.getMessage());
}
// Check that we got back what we started with
verifyDeserialization(bl2.getHosts(), hosts);
verifyDeserialization(bl2.getNames(), names);
verifyDeserialization(bl2.getTopologyPaths(), topologyPaths);
assertEquals(bl2.getOffset(), offset);
assertEquals(bl2.getLength(), length);
}
private void verifyDeserialization(String[] ar1, String[] ar2) {
assertEquals(ar1.length, ar2.length);
for(int i = 0; i < ar1.length; i++)
assertEquals(ar1[i], ar2[i]);
}
}

View File

@ -18,11 +18,14 @@
package org.apache.hadoop.fs; package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem.Statistics;
import static org.apache.hadoop.fs.FileSystemTestHelper.*; import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import java.io.*; import java.io.*;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -233,4 +236,16 @@ public class TestLocalFileSystem {
assertTrue("Did not delete file", fs.delete(file1)); assertTrue("Did not delete file", fs.delete(file1));
assertTrue("Did not delete non-empty dir", fs.delete(dir1)); assertTrue("Did not delete non-empty dir", fs.delete(dir1));
} }
@Test
public void testStatistics() throws Exception {
FileSystem.getLocal(new Configuration());
int fileSchemeCount = 0;
for (Statistics stats : FileSystem.getAllStatistics()) {
if (stats.getScheme().equals("file")) {
fileSchemeCount++;
}
}
assertEquals(1, fileSchemeCount);
}
} }

View File

@ -71,11 +71,8 @@ public class ViewFileSystemBaseTest {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); initializeTargetTestRoot();
// In case previous test was killed before cleanup
fsTarget.delete(targetTestRoot, true);
fsTarget.mkdirs(targetTestRoot);
// Make user and data dirs - we creates links to them in the mount table // Make user and data dirs - we creates links to them in the mount table
fsTarget.mkdirs(new Path(targetTestRoot,"user")); fsTarget.mkdirs(new Path(targetTestRoot,"user"));
fsTarget.mkdirs(new Path(targetTestRoot,"data")); fsTarget.mkdirs(new Path(targetTestRoot,"data"));
@ -99,7 +96,16 @@ public class ViewFileSystemBaseTest {
fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true); fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
} }
void initializeTargetTestRoot() throws IOException {
targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
// In case previous test was killed before cleanup
fsTarget.delete(targetTestRoot, true);
fsTarget.mkdirs(targetTestRoot);
}
void setupMountPoints() { void setupMountPoints() {
ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri()); ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri()); ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri());
@ -121,7 +127,7 @@ public class ViewFileSystemBaseTest {
} }
int getExpectedMountPoints() { int getExpectedMountPoints() {
return 7; return 8;
} }
/** /**
@ -166,7 +172,7 @@ public class ViewFileSystemBaseTest {
} }
} }
} }
Assert.assertEquals(expectedTokenCount / 2, delTokens.size()); Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size());
} }
int getExpectedDelegationTokenCountWithCredentials() { int getExpectedDelegationTokenCountWithCredentials() {
@ -309,6 +315,16 @@ public class ViewFileSystemBaseTest {
Assert.assertTrue("Renamed dest should exist as dir in target", Assert.assertTrue("Renamed dest should exist as dir in target",
fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar"))); fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
// Make a directory under a directory that's mounted from the root of another FS
fsView.mkdirs(new Path("/targetRoot/dirFoo"));
Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
boolean dirFooPresent = false;
for (FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) {
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent = true;
}
}
Assert.assertTrue(dirFooPresent);
} }
// rename across mount points that point to same target also fail // rename across mount points that point to same target also fail
@ -418,7 +434,7 @@ public class ViewFileSystemBaseTest {
} }
int getExpectedDirPaths() { int getExpectedDirPaths() {
return 6; return 7;
} }
@Test @Test

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper; import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.FileContextTestHelper.fileType; import org.apache.hadoop.fs.FileContextTestHelper.fileType;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsConstants;
@ -77,12 +78,8 @@ public class ViewFsBaseTest {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
initializeTargetTestRoot();
targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
// In case previous test was killed before cleanup
fcTarget.delete(targetTestRoot, true);
fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
// Make user and data dirs - we creates links to them in the mount table // Make user and data dirs - we creates links to them in the mount table
fcTarget.mkdir(new Path(targetTestRoot,"user"), fcTarget.mkdir(new Path(targetTestRoot,"user"),
FileContext.DEFAULT_PERM, true); FileContext.DEFAULT_PERM, true);
@ -100,6 +97,7 @@ public class ViewFsBaseTest {
// Set up the defaultMT in the config with our mount point links // Set up the defaultMT in the config with our mount point links
conf = new Configuration(); conf = new Configuration();
ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
ConfigUtil.addLink(conf, "/user", ConfigUtil.addLink(conf, "/user",
new Path(targetTestRoot,"user").toUri()); new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/user2", ConfigUtil.addLink(conf, "/user2",
@ -118,6 +116,14 @@ public class ViewFsBaseTest {
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf); fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
// Also try viewfs://default/ - note authority is name of mount table // Also try viewfs://default/ - note authority is name of mount table
} }
void initializeTargetTestRoot() throws IOException {
targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
// In case previous test was killed before cleanup
fcTarget.delete(targetTestRoot, true);
fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
}
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
@ -128,7 +134,11 @@ public class ViewFsBaseTest {
public void testGetMountPoints() { public void testGetMountPoints() {
ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem(); ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem();
MountPoint[] mountPoints = viewfs.getMountPoints(); MountPoint[] mountPoints = viewfs.getMountPoints();
Assert.assertEquals(7, mountPoints.length); Assert.assertEquals(8, mountPoints.length);
}
int getExpectedDelegationTokenCount() {
return 0;
} }
/** /**
@ -140,7 +150,7 @@ public class ViewFsBaseTest {
public void testGetDelegationTokens() throws IOException { public void testGetDelegationTokens() throws IOException {
List<Token<?>> delTokens = List<Token<?>> delTokens =
fcView.getDelegationTokens(new Path("/"), "sanjay"); fcView.getDelegationTokens(new Path("/"), "sanjay");
Assert.assertEquals(0, delTokens.size()); Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size());
} }
@ -281,6 +291,19 @@ public class ViewFsBaseTest {
Assert.assertTrue("Renamed dest should exist as dir in target", Assert.assertTrue("Renamed dest should exist as dir in target",
isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar"))); isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
// Make a directory under a directory that's mounted from the root of another FS
fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false);
Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo")));
boolean dirFooPresent = false;
RemoteIterator<FileStatus> dirContents = fcView.listStatus(new Path(
"/targetRoot/"));
while (dirContents.hasNext()) {
FileStatus fileStatus = dirContents.next();
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent = true;
}
}
Assert.assertTrue(dirFooPresent);
} }
// rename across mount points that point to same target also fail // rename across mount points that point to same target also fail
@ -358,7 +381,7 @@ public class ViewFsBaseTest {
FileStatus[] dirPaths = fcView.util().listStatus(new Path("/")); FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
FileStatus fs; FileStatus fs;
Assert.assertEquals(6, dirPaths.length); Assert.assertEquals(7, dirPaths.length);
fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths); fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths);
Assert.assertNotNull(fs); Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink()); Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.http; package org.apache.hadoop.http;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.Assert; import org.junit.Assert;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -70,6 +71,12 @@ public class HttpServerFunctionalTest extends Assert {
return createServer(TEST, conf); return createServer(TEST, conf);
} }
public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl)
throws IOException {
prepareTestWebapp();
return createServer(TEST, conf, adminsAcl);
}
/** /**
* Create but do not start the test webapp server. The test webapp dir is * Create but do not start the test webapp server. The test webapp dir is
* prepared/checked in advance. * prepared/checked in advance.
@ -132,6 +139,11 @@ public class HttpServerFunctionalTest extends Assert {
throws IOException { throws IOException {
return new HttpServer(webapp, "0.0.0.0", 0, true, conf); return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
} }
public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
throws IOException {
return new HttpServer(webapp, "0.0.0.0", 0, true, conf, adminsAcl);
}
/** /**
* Create an HttpServer instance for the given webapp * Create an HttpServer instance for the given webapp
* @param webapp the webapp to work with * @param webapp the webapp to work with

View File

@ -60,7 +60,6 @@ import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mock;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
@ -360,6 +359,8 @@ public class TestHttpServer extends HttpServerFunctionalTest {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true); true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
true);
conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY, conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
DummyFilterInitializer.class.getName()); DummyFilterInitializer.class.getName());
@ -468,6 +469,26 @@ public class TestHttpServer extends HttpServerFunctionalTest {
} }
@Test
public void testRequiresAuthorizationAccess() throws Exception {
Configuration conf = new Configuration();
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
//requires admin access to instrumentation, FALSE by default
Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response));
//requires admin access to instrumentation, TRUE
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
}
@Test public void testBindAddress() throws Exception { @Test public void testBindAddress() throws Exception {
checkBindAddress("0.0.0.0", 0, false).stop(); checkBindAddress("0.0.0.0", 0, false).stop();
// hang onto this one for a bit more testing // hang onto this one for a bit more testing

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.io;
import junit.framework.TestCase; import junit.framework.TestCase;
import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException; import java.nio.charset.CharacterCodingException;
import java.util.Random; import java.util.Random;
@ -107,7 +108,6 @@ public class TestText extends TestCase {
} }
} }
public void testIO() throws Exception { public void testIO() throws Exception {
DataOutputBuffer out = new DataOutputBuffer(); DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer(); DataInputBuffer in = new DataInputBuffer();
@ -136,6 +136,40 @@ public class TestText extends TestCase {
assertTrue(before.equals(after2)); assertTrue(before.equals(after2));
} }
} }
public void doTestLimitedIO(String str, int strLen) throws IOException {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
out.reset();
try {
Text.writeString(out, str, strLen);
fail("expected writeString to fail when told to write a string " +
"that was too long! The string was '" + str + "'");
} catch (IOException e) {
}
Text.writeString(out, str, strLen + 1);
// test that it reads correctly
in.reset(out.getData(), out.getLength());
in.mark(strLen);
String after;
try {
after = Text.readString(in, strLen);
fail("expected readString to fail when told to read a string " +
"that was too long! The string was '" + str + "'");
} catch (IOException e) {
}
in.reset();
after = Text.readString(in, strLen + 1);
assertTrue(str.equals(after));
}
public void testLimitedIO() throws Exception {
doTestLimitedIO("abcd", 4);
doTestLimitedIO("", 0);
doTestLimitedIO("1", 1);
}
public void testCompare() throws Exception { public void testCompare() throws Exception {
DataOutputBuffer out1 = new DataOutputBuffer(); DataOutputBuffer out1 = new DataOutputBuffer();
@ -192,16 +226,6 @@ public class TestText extends TestCase {
assertTrue(text.find("\u20ac", 5)==11); assertTrue(text.find("\u20ac", 5)==11);
} }
public void testClear() {
Text text = new Text();
assertEquals("", text.toString());
assertEquals(0, text.getBytes().length);
text = new Text("abcd\u20acbdcd\u20ac");
text.clear();
assertEquals("", text.toString());
assertEquals(0, text.getBytes().length);
}
public void testFindAfterUpdatingContents() throws Exception { public void testFindAfterUpdatingContents() throws Exception {
Text text = new Text("abcd"); Text text = new Text("abcd");
text.set("a".getBytes()); text.set("a".getBytes());

View File

@ -322,6 +322,29 @@ public class TestRPC {
server.stop(); server.stop();
} }
@Test
public void testProxyAddress() throws Exception {
Server server = RPC.getServer(TestProtocol.class,
new TestImpl(), ADDRESS, 0, conf);
TestProtocol proxy = null;
try {
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
// create a client
proxy = (TestProtocol)RPC.getProxy(
TestProtocol.class, TestProtocol.versionID, addr, conf);
assertEquals(addr, RPC.getServerAddress(proxy));
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@Test @Test
public void testSlowRpc() throws Exception { public void testSlowRpc() throws Exception {
System.out.println("Testing Slow RPC"); System.out.println("Testing Slow RPC");

View File

@ -25,11 +25,16 @@ import java.net.ConnectException;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.NetworkInterface; import java.net.NetworkInterface;
import java.net.ServerSocket;
import java.net.Socket; import java.net.Socket;
import java.net.SocketException; import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.URI; import java.net.URI;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.Enumeration; import java.util.Enumeration;
import java.util.List;
import java.util.concurrent.TimeUnit;
import junit.framework.AssertionFailedError; import junit.framework.AssertionFailedError;
@ -37,7 +42,9 @@ import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.NetUtilsTestResolver; import org.apache.hadoop.security.NetUtilsTestResolver;
import org.junit.Assume;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
@ -50,6 +57,13 @@ public class TestNetUtils {
private static final int LOCAL_PORT = 8080; private static final int LOCAL_PORT = 8080;
private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT); private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
/**
* Some slop around expected times when making sure timeouts behave
* as expected. We assume that they will be accurate to within
* this threshold.
*/
static final long TIME_FUDGE_MILLIS = 200;
/** /**
* Test that we can't accidentally connect back to the connecting socket due * Test that we can't accidentally connect back to the connecting socket due
* to a quirk in the TCP spec. * to a quirk in the TCP spec.
@ -81,6 +95,79 @@ public class TestNetUtils {
} }
} }
@Test
public void testSocketReadTimeoutWithChannel() throws Exception {
doSocketReadTimeoutTest(true);
}
@Test
public void testSocketReadTimeoutWithoutChannel() throws Exception {
doSocketReadTimeoutTest(false);
}
private void doSocketReadTimeoutTest(boolean withChannel)
throws IOException {
// Binding a ServerSocket is enough to accept connections.
// Rely on the backlog to accept for us.
ServerSocket ss = new ServerSocket(0);
Socket s;
if (withChannel) {
s = NetUtils.getDefaultSocketFactory(new Configuration())
.createSocket();
Assume.assumeNotNull(s.getChannel());
} else {
s = new Socket();
assertNull(s.getChannel());
}
SocketInputWrapper stm = null;
try {
NetUtils.connect(s, ss.getLocalSocketAddress(), 1000);
stm = NetUtils.getInputStream(s, 1000);
assertReadTimeout(stm, 1000);
// Change timeout, make sure it applies.
stm.setTimeout(1);
assertReadTimeout(stm, 1);
// If there is a channel, then setting the socket timeout
// should not matter. If there is not a channel, it will
// take effect.
s.setSoTimeout(1000);
if (withChannel) {
assertReadTimeout(stm, 1);
} else {
assertReadTimeout(stm, 1000);
}
} finally {
IOUtils.closeStream(stm);
IOUtils.closeSocket(s);
ss.close();
}
}
private void assertReadTimeout(SocketInputWrapper stm, int timeoutMillis)
throws IOException {
long st = System.nanoTime();
try {
stm.read();
fail("Didn't time out");
} catch (SocketTimeoutException ste) {
assertTimeSince(st, timeoutMillis);
}
}
private void assertTimeSince(long startNanos, int expectedMillis) {
long durationNano = System.nanoTime() - startNanos;
long millis = TimeUnit.MILLISECONDS.convert(
durationNano, TimeUnit.NANOSECONDS);
assertTrue("Expected " + expectedMillis + "ms, but took " + millis,
Math.abs(millis - expectedMillis) < TIME_FUDGE_MILLIS);
}
/** /**
* Test for { * Test for {
* @throws UnknownHostException @link NetUtils#getLocalInetAddress(String) * @throws UnknownHostException @link NetUtils#getLocalInetAddress(String)
@ -512,6 +599,26 @@ public class TestNetUtils {
assertEquals("scheme://host.a.b/path", uri.toString()); assertEquals("scheme://host.a.b/path", uri.toString());
} }
/**
* Test for {@link NetUtils#normalizeHostNames}
*/
@Test
public void testNormalizeHostName() {
List<String> hosts = Arrays.asList(new String[] {"127.0.0.1",
"localhost", "3w.org", "UnknownHost"});
List<String> normalizedHosts = NetUtils.normalizeHostNames(hosts);
// when ipaddress is normalized, same address is expected in return
assertEquals(normalizedHosts.get(0), hosts.get(0));
// for normalizing a resolvable hostname, resolved ipaddress is expected in return
assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
assertEquals(normalizedHosts.get(1), hosts.get(0));
// this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric,
// its ipaddress is expected to return
assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
// return the same hostname after normalizing a irresolvable hostname.
assertEquals(normalizedHosts.get(3), hosts.get(3));
}
@Test @Test
public void testGetHostNameOfIP() { public void testGetHostNameOfIP() {
assertNull(NetUtils.getHostNameOfIP(null)); assertNull(NetUtils.getHostNameOfIP(null));

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.net;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.InterruptedIOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
import java.nio.channels.Pipe; import java.nio.channels.Pipe;
@ -26,8 +27,13 @@ import java.util.Arrays;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
import junit.framework.TestCase; import org.junit.Test;
import static org.junit.Assert.*;
/** /**
* This tests timout out from SocketInputStream and * This tests timout out from SocketInputStream and
@ -36,14 +42,17 @@ import junit.framework.TestCase;
* Normal read and write using these streams are tested by pretty much * Normal read and write using these streams are tested by pretty much
* every DFS unit test. * every DFS unit test.
*/ */
public class TestSocketIOWithTimeout extends TestCase { public class TestSocketIOWithTimeout {
static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class); static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
private static int TIMEOUT = 1*1000; private static int TIMEOUT = 1*1000;
private static String TEST_STRING = "1234567890"; private static String TEST_STRING = "1234567890";
private MultithreadedTestUtil.TestContext ctx = new TestContext();
private void doIO(InputStream in, OutputStream out) throws IOException { private void doIO(InputStream in, OutputStream out,
int expectedTimeout) throws IOException {
/* Keep on writing or reading until we get SocketTimeoutException. /* Keep on writing or reading until we get SocketTimeoutException.
* It expects this exception to occur within 100 millis of TIMEOUT. * It expects this exception to occur within 100 millis of TIMEOUT.
*/ */
@ -61,34 +70,15 @@ public class TestSocketIOWithTimeout extends TestCase {
long diff = System.currentTimeMillis() - start; long diff = System.currentTimeMillis() - start;
LOG.info("Got SocketTimeoutException as expected after " + LOG.info("Got SocketTimeoutException as expected after " +
diff + " millis : " + e.getMessage()); diff + " millis : " + e.getMessage());
assertTrue(Math.abs(TIMEOUT - diff) <= 200); assertTrue(Math.abs(expectedTimeout - diff) <=
TestNetUtils.TIME_FUDGE_MILLIS);
break; break;
} }
} }
} }
/** @Test
* Just reads one byte from the input stream. public void testSocketIOWithTimeout() throws Exception {
*/
static class ReadRunnable implements Runnable {
private InputStream in;
public ReadRunnable(InputStream in) {
this.in = in;
}
public void run() {
try {
in.read();
} catch (IOException e) {
LOG.info("Got expection while reading as expected : " +
e.getMessage());
return;
}
assertTrue(false);
}
}
public void testSocketIOWithTimeout() throws IOException {
// first open pipe: // first open pipe:
Pipe pipe = Pipe.open(); Pipe pipe = Pipe.open();
@ -96,7 +86,7 @@ public class TestSocketIOWithTimeout extends TestCase {
Pipe.SinkChannel sink = pipe.sink(); Pipe.SinkChannel sink = pipe.sink();
try { try {
InputStream in = new SocketInputStream(source, TIMEOUT); final InputStream in = new SocketInputStream(source, TIMEOUT);
OutputStream out = new SocketOutputStream(sink, TIMEOUT); OutputStream out = new SocketOutputStream(sink, TIMEOUT);
byte[] writeBytes = TEST_STRING.getBytes(); byte[] writeBytes = TEST_STRING.getBytes();
@ -105,37 +95,62 @@ public class TestSocketIOWithTimeout extends TestCase {
out.write(writeBytes); out.write(writeBytes);
out.write(byteWithHighBit); out.write(byteWithHighBit);
doIO(null, out); doIO(null, out, TIMEOUT);
in.read(readBytes); in.read(readBytes);
assertTrue(Arrays.equals(writeBytes, readBytes)); assertTrue(Arrays.equals(writeBytes, readBytes));
assertEquals(byteWithHighBit & 0xff, in.read()); assertEquals(byteWithHighBit & 0xff, in.read());
doIO(in, null); doIO(in, null, TIMEOUT);
// Change timeout on the read side.
((SocketInputStream)in).setTimeout(TIMEOUT * 2);
doIO(in, null, TIMEOUT * 2);
/* /*
* Verify that it handles interrupted threads properly. * Verify that it handles interrupted threads properly.
* Use a large timeout and expect the thread to return quickly. * Use a large timeout and expect the thread to return quickly
* upon interruption.
*/ */
in = new SocketInputStream(source, 0); ((SocketInputStream)in).setTimeout(0);
Thread thread = new Thread(new ReadRunnable(in)); TestingThread thread = new TestingThread(ctx) {
thread.start(); @Override
public void doWork() throws Exception {
try { try {
Thread.sleep(1000); in.read();
} catch (InterruptedException ignored) {} fail("Did not fail with interrupt");
} catch (InterruptedIOException ste) {
LOG.info("Got expection while reading as expected : " +
ste.getMessage());
}
}
};
ctx.addThread(thread);
ctx.startThreads();
// If the thread is interrupted before it calls read()
// then it throws ClosedByInterruptException due to
// some Java quirk. Waiting for it to call read()
// gets it into select(), so we get the expected
// InterruptedIOException.
Thread.sleep(1000);
thread.interrupt(); thread.interrupt();
ctx.stop();
try {
thread.join();
} catch (InterruptedException e) {
throw new IOException("Unexpected InterruptedException : " + e);
}
//make sure the channels are still open //make sure the channels are still open
assertTrue(source.isOpen()); assertTrue(source.isOpen());
assertTrue(sink.isOpen()); assertTrue(sink.isOpen());
// Nevertheless, the output stream is closed, because
// a partial write may have succeeded (see comment in
// SocketOutputStream#write(byte[]), int, int)
try {
out.write(1);
fail("Did not throw");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"stream is closed", ioe);
}
out.close(); out.close();
assertFalse(sink.isOpen()); assertFalse(sink.isOpen());

View File

@ -41,7 +41,7 @@ public class TestTableMapping {
public void setUp() throws IOException { public void setUp() throws IOException {
mappingFile = File.createTempFile(getClass().getSimpleName(), ".txt"); mappingFile = File.createTempFile(getClass().getSimpleName(), ".txt");
Files.write("a.b.c /rack1\n" + Files.write("a.b.c /rack1\n" +
"1.2.3\t/rack2\n", mappingFile, Charsets.UTF_8); "1.2.3.4\t/rack2\n", mappingFile, Charsets.UTF_8);
mappingFile.deleteOnExit(); mappingFile.deleteOnExit();
} }
@ -55,7 +55,7 @@ public class TestTableMapping {
List<String> names = new ArrayList<String>(); List<String> names = new ArrayList<String>();
names.add("a.b.c"); names.add("a.b.c");
names.add("1.2.3"); names.add("1.2.3.4");
List<String> result = mapping.resolve(names); List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size()); assertEquals(names.size(), result.size());
@ -73,7 +73,7 @@ public class TestTableMapping {
List<String> names = new ArrayList<String>(); List<String> names = new ArrayList<String>();
names.add("a.b.c"); names.add("a.b.c");
names.add("1.2.3"); names.add("1.2.3.4");
List<String> result1 = mapping.resolve(names); List<String> result1 = mapping.resolve(names);
assertEquals(names.size(), result1.size()); assertEquals(names.size(), result1.size());
@ -96,7 +96,7 @@ public class TestTableMapping {
List<String> names = new ArrayList<String>(); List<String> names = new ArrayList<String>();
names.add("a.b.c"); names.add("a.b.c");
names.add("1.2.3"); names.add("1.2.3.4");
List<String> result = mapping.resolve(names); List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size()); assertEquals(names.size(), result.size());
@ -114,7 +114,7 @@ public class TestTableMapping {
List<String> names = new ArrayList<String>(); List<String> names = new ArrayList<String>();
names.add("a.b.c"); names.add("a.b.c");
names.add("1.2.3"); names.add("1.2.3.4");
List<String> result = mapping.resolve(names); List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size()); assertEquals(names.size(), result.size());
@ -134,7 +134,7 @@ public class TestTableMapping {
List<String> names = new ArrayList<String>(); List<String> names = new ArrayList<String>();
names.add("a.b.c"); names.add("a.b.c");
names.add("1.2.3"); names.add("1.2.3.4");
List<String> result = mapping.resolve(names); List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size()); assertEquals(names.size(), result.size());

View File

@ -18,11 +18,15 @@
package org.apache.hadoop.security.token; package org.apache.hadoop.security.token;
import static junit.framework.Assert.assertEquals;
import java.io.*; import java.io.*;
import java.util.Arrays; import java.util.Arrays;
import org.apache.hadoop.io.*; import org.apache.hadoop.io.*;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
import junit.framework.TestCase; import junit.framework.TestCase;
@ -94,5 +98,20 @@ public class TestToken extends TestCase {
checkUrlSafe(encode); checkUrlSafe(encode);
} }
} }
public void testDecodeIdentifier() throws IOException {
TestDelegationTokenSecretManager secretManager =
new TestDelegationTokenSecretManager(0, 0, 0, 0);
secretManager.startThreads();
TestDelegationTokenIdentifier id = new TestDelegationTokenIdentifier(
new Text("owner"), new Text("renewer"), new Text("realUser"));
Token<TestDelegationTokenIdentifier> token =
new Token<TestDelegationTokenIdentifier>(id, secretManager);
TokenIdentifier idCopy = token.decodeIdentifier();
assertNotSame(id, idCopy);
assertEquals(id, idCopy);
}
} }

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.util;
import static org.junit.Assert.*; import static org.junit.Assert.*;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test; import org.junit.Test;
public class TestVersionUtil { public class TestVersionUtil {
@ -30,6 +29,8 @@ public class TestVersionUtil {
assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0")); assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a")); assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
assertEquals(0, VersionUtil.compareVersions("1", "1")); assertEquals(0, VersionUtil.compareVersions("1", "1"));
assertEquals(0, VersionUtil.compareVersions(
"2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT"));
// Assert that lower versions are lower, and higher versions are higher. // Assert that lower versions are lower, and higher versions are higher.
assertExpectedValues("1", "2.0.0"); assertExpectedValues("1", "2.0.0");
@ -52,6 +53,13 @@ public class TestVersionUtil {
assertExpectedValues("1.0.0a2", "1.0.0a10"); assertExpectedValues("1.0.0a2", "1.0.0a10");
assertExpectedValues("1.0", "1.a"); assertExpectedValues("1.0", "1.a");
assertExpectedValues("1.0", "1.a0"); assertExpectedValues("1.0", "1.a0");
// Snapshot builds precede their eventual releases.
assertExpectedValues("1.0-SNAPSHOT", "1.0");
assertExpectedValues("1.0", "1.0.0-SNAPSHOT");
assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0");
assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT");
assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1");
} }
private static void assertExpectedValues(String lower, String higher) { private static void assertExpectedValues(String lower, String higher) {

View File

@ -0,0 +1,2 @@
org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier

View File

@ -368,9 +368,6 @@ Release 2.0.0 - UNRELEASED
HDFS-2505. Add a test to verify getFileChecksum(..) with ViewFS. (Ravi HDFS-2505. Add a test to verify getFileChecksum(..) with ViewFS. (Ravi
Prakash via szetszwo) Prakash via szetszwo)
HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
and epoch in JournalProtocol. (suresh via szetszwo)
HDFS-3240. Drop log level of "heartbeat: ..." in BPServiceActor to DEBUG HDFS-3240. Drop log level of "heartbeat: ..." in BPServiceActor to DEBUG
(todd) (todd)
@ -419,6 +416,44 @@ Release 2.0.0 - UNRELEASED
HDFS-3339. Change INode to package private. (John George via szetszwo) HDFS-3339. Change INode to package private. (John George via szetszwo)
HDFS-3303. Remove Writable implementation from RemoteEditLogManifest.
(Brandon Li via szetszwo)
HDFS-2617. Replaced Kerberized SSL for image transfer and fsck
with SPNEGO-based solution. (jghoman, tucu, and atm via eli)
HDFS-3365. Enable users to disable socket caching in DFS client
configuration (todd)
HDFS-3375. Put client name in DataXceiver thread name for readBlock
and keepalive (todd)
HDFS-3363. Define BlockCollection and MutableBlockCollection interfaces
so that INodeFile and INodeFileUnderConstruction do not have to be used in
block management. (John George via szetszwo)
HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
logging is enabled. (atm)
HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of
final releases. (todd)
HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and
BlocksMap to {get|set|add}BlockCollection(..). (John George via szetszwo)
HDFS-3134. harden edit log loader against malformed or malicious input.
(Colin Patrick McCabe via eli)
HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli)
HDFS-3400. DNs should be able start with jsvc even if security is disabled.
(atm via eli)
HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
and epoch in JournalProtocol. (suresh via szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd) HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@ -432,6 +467,8 @@ Release 2.0.0 - UNRELEASED
HDFS-2476. More CPU efficient data structure for under-replicated, HDFS-2476. More CPU efficient data structure for under-replicated,
over-replicated, and invalidated blocks. (Tomasz Nykiel via todd) over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
HDFS-3378. Remove DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY and DEFAULT. (eli)
BUG FIXES BUG FIXES
HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol. HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol.
@ -589,6 +626,33 @@ Release 2.0.0 - UNRELEASED
HDFS-3330. If GetImageServlet throws an Error or RTE, response should not HDFS-3330. If GetImageServlet throws an Error or RTE, response should not
have HTTP "OK" status. (todd) have HTTP "OK" status. (todd)
HDFS-3351. NameNode#initializeGenericKeys should always set fs.defaultFS
regardless of whether HA or Federation is enabled. (atm)
HDFS-3359. DFSClient.close should close cached sockets. (todd)
HDFS-3350. In INode, add final to compareTo(..), equals(..) and hashCode(),
and remove synchronized from updatePermissionStatus(..). (szetszwo)
HDFS-3357. DataXceiver reads from client socket with incorrect/no timeout
(todd)
HDFS-3376. DFSClient fails to make connection to DN if there are many
unusable cached sockets (todd)
HDFS-3328. NPE in DataNode.getIpcPort. (eli)
HDFS-3396. FUSE build fails on Ubuntu 12.04. (Colin Patrick McCabe via eli)
HDFS-3395. NN doesn't start with HA+security enabled and HTTP address
set to 0.0.0.0. (atm)
HDFS-3385. The last block of INodeFileUnderConstruction is not
necessarily a BlockInfoUnderConstruction, so do not cast it in
FSNamesystem.recoverLeaseInternal(..). (szetszwo)
HDFS-3026. HA: Handle failure during HA state transition. (atm)
BREAKDOWN OF HDFS-1623 SUBTASKS BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -18,4 +18,5 @@ bin_PROGRAMS = fuse_dfs
fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c fuse_impls_chown.c fuse_impls_create.c fuse_impls_flush.c fuse_impls_getattr.c fuse_impls_mkdir.c fuse_impls_mknod.c fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c fuse_impls_unlink.c fuse_impls_write.c fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c fuse_impls_chown.c fuse_impls_create.c fuse_impls_flush.c fuse_impls_getattr.c fuse_impls_mkdir.c fuse_impls_mknod.c fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c fuse_impls_unlink.c fuse_impls_write.c
AM_CFLAGS= -Wall -g AM_CFLAGS= -Wall -g
AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/../../src/main/native -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/../../src/main/native -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm -lm AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib64 -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -L$(FUSE_HOME)/lib -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server
fuse_dfs_LDADD=-lfuse -lhdfs -ljvm -lm

View File

@ -57,16 +57,21 @@ shift
# Determine if we're starting a secure datanode, and if so, redefine appropriate variables # Determine if we're starting a secure datanode, and if so, redefine appropriate variables
if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then if [ -n "$JSVC_HOME" ]; then
HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
fi
if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
fi
HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
starting_secure_dn="true"
else
echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
"isn't set. Falling back to starting insecure DN."
fi fi
if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
fi
HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
starting_secure_dn="true"
fi fi
if [ "$COMMAND" = "namenode" ] ; then if [ "$COMMAND" = "namenode" ] ; then
@ -129,12 +134,12 @@ if [ "$starting_secure_dn" = "true" ]; then
if [ "$HADOOP_PID_DIR" = "" ]; then if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid" HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
else else
HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid" HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
fi fi
JSVC=$JSVC_HOME/jsvc JSVC=$JSVC_HOME/jsvc
if [ ! -f $JSVC ]; then if [ ! -f $JSVC ]; then
echo "JSVC_HOME is not set correctly so jsvc can not be found. Jsvc is required to run secure datanodes. " echo "JSVC_HOME is not set correctly so jsvc cannot be found. Jsvc is required to run secure datanodes. "
echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\ echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
"and set JSVC_HOME to the directory containing the jsvc binary." "and set JSVC_HOME to the directory containing the jsvc binary."
exit exit

View File

@ -560,6 +560,7 @@ public class DFSClient implements java.io.Closeable {
void abort() { void abort() {
clientRunning = false; clientRunning = false;
closeAllFilesBeingWritten(true); closeAllFilesBeingWritten(true);
socketCache.clear();
closeConnectionToNamenode(); closeConnectionToNamenode();
} }
@ -597,6 +598,7 @@ public class DFSClient implements java.io.Closeable {
public synchronized void close() throws IOException { public synchronized void close() throws IOException {
if(clientRunning) { if(clientRunning) {
closeAllFilesBeingWritten(false); closeAllFilesBeingWritten(false);
socketCache.clear();
clientRunning = false; clientRunning = false;
leaserenewer.closeClient(this); leaserenewer.closeClient(this);
// close connections to the namenode // close connections to the namenode

View File

@ -99,8 +99,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0; public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address"; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090"; public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
public static final String DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY = "dfs.namenode.secondary.https-port";
public static final int DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT = 50490;
public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period"; public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60; public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60;
public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period"; public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
@ -147,7 +145,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained"; public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained";
public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M
public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version"; public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version";
public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0"; public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
@ -265,7 +263,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020; public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT; public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT;
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version"; public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0"; public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable"; public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false; public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;
@ -319,10 +317,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.kerberos.principal"; public static final String DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.kerberos.principal";
public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file"; public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
public static final String DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.kerberos.principal"; public static final String DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.kerberos.principal";
public static final String DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.namenode.kerberos.https.principal"; public static final String DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal";
public static final String DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY = "dfs.secondary.namenode.keytab.file"; public static final String DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY = "dfs.secondary.namenode.keytab.file";
public static final String DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal"; public static final String DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal";
public static final String DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal"; public static final String DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal";
public static final String DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold"; public static final String DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
public static final int DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10; public static final int DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;

View File

@ -864,7 +864,13 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
// Allow retry since there is no way of knowing whether the cached socket // Allow retry since there is no way of knowing whether the cached socket
// is good until we actually use it. // is good until we actually use it.
for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) { for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) {
Socket sock = socketCache.get(dnAddr); Socket sock = null;
// Don't use the cache on the last attempt - it's possible that there
// are arbitrarily many unusable sockets in the cache, but we don't
// want to fail the read.
if (retries < nCachedConnRetry) {
sock = socketCache.get(dnAddr);
}
if (sock == null) { if (sock == null) {
fromCache = false; fromCache = false;

View File

@ -714,8 +714,11 @@ public class DFSUtil {
public static String substituteForWildcardAddress(String configuredAddress, public static String substituteForWildcardAddress(String configuredAddress,
String defaultHost) throws IOException { String defaultHost) throws IOException {
InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress); InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
+ ":0");
if (sockAddr.getAddress().isAnyLocalAddress()) { if (sockAddr.getAddress().isAnyLocalAddress()) {
if(UserGroupInformation.isSecurityEnabled()) { if (UserGroupInformation.isSecurityEnabled() &&
defaultSockAddr.getAddress().isAnyLocalAddress()) {
throw new IOException("Cannot use a wildcard address with security. " + throw new IOException("Cannot use a wildcard address with security. " +
"Must explicitly set bind address for Kerberos"); "Must explicitly set bind address for Kerberos");
} }

View File

@ -81,7 +81,6 @@ public class HdfsConfiguration extends Configuration {
deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY); deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY); deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY); deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
deprecate("dfs.secondary.https.port", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY);
deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY); deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);

View File

@ -144,7 +144,7 @@ public class HftpFileSystem extends FileSystem
} }
protected URI getNamenodeSecureUri(URI uri) { protected URI getNamenodeSecureUri(URI uri) {
return DFSUtil.createUri("https", getNamenodeSecureAddr(uri)); return DFSUtil.createUri("http", getNamenodeSecureAddr(uri));
} }
@Override @Override
@ -247,7 +247,7 @@ public class HftpFileSystem extends FileSystem
c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer); c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
} catch (Exception e) { } catch (Exception e) {
LOG.info("Couldn't get a delegation token from " + nnHttpUrl + LOG.info("Couldn't get a delegation token from " + nnHttpUrl +
" using https."); " using http.");
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("error was ", e); LOG.debug("error was ", e);
} }
@ -686,11 +686,11 @@ public class HftpFileSystem extends FileSystem
Configuration conf) throws IOException { Configuration conf) throws IOException {
// update the kerberos credentials, if they are coming from a keytab // update the kerberos credentials, if they are coming from a keytab
UserGroupInformation.getLoginUser().reloginFromKeytab(); UserGroupInformation.getLoginUser().reloginFromKeytab();
// use https to renew the token // use http to renew the token
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
return return
DelegationTokenFetcher.renewDelegationToken DelegationTokenFetcher.renewDelegationToken
(DFSUtil.createUri("https", serviceAddr).toString(), (DFSUtil.createUri("http", serviceAddr).toString(),
(Token<DelegationTokenIdentifier>) token); (Token<DelegationTokenIdentifier>) token);
} }
@ -700,10 +700,10 @@ public class HftpFileSystem extends FileSystem
Configuration conf) throws IOException { Configuration conf) throws IOException {
// update the kerberos credentials, if they are coming from a keytab // update the kerberos credentials, if they are coming from a keytab
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
// use https to cancel the token // use http to cancel the token
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
DelegationTokenFetcher.cancelDelegationToken DelegationTokenFetcher.cancelDelegationToken
(DFSUtil.createUri("https", serviceAddr).toString(), (DFSUtil.createUri("http", serviceAddr).toString(),
(Token<DelegationTokenIdentifier>) token); (Token<DelegationTokenIdentifier>) token);
} }
} }

View File

@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.hdfs.util.DirectBufferPool;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.SocketInputStream; import org.apache.hadoop.net.SocketInputWrapper;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
@ -450,11 +450,8 @@ public class RemoteBlockReader2 implements BlockReader {
// //
// Get bytes in block, set streams // Get bytes in block, set streams
// //
Preconditions.checkArgument(sock.getChannel() != null, SocketInputWrapper sin = NetUtils.getInputStream(sock);
"Socket %s does not have an associated Channel.", ReadableByteChannel ch = sin.getReadableByteChannel();
sock);
SocketInputStream sin =
(SocketInputStream)NetUtils.getInputStream(sock);
DataInputStream in = new DataInputStream(sin); DataInputStream in = new DataInputStream(sin);
BlockOpResponseProto status = BlockOpResponseProto.parseFrom( BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
@ -477,7 +474,7 @@ public class RemoteBlockReader2 implements BlockReader {
} }
return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(), return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
sin, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock); ch, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
} }
static void checkSuccess( static void checkSuccess(

View File

@ -47,6 +47,9 @@ class SocketCache {
public SocketCache(int capacity) { public SocketCache(int capacity) {
multimap = LinkedListMultimap.create(); multimap = LinkedListMultimap.create();
this.capacity = capacity; this.capacity = capacity;
if (capacity <= 0) {
LOG.debug("SocketCache disabled in configuration.");
}
} }
/** /**
@ -55,6 +58,10 @@ class SocketCache {
* @return A socket with unknown state, possibly closed underneath. Or null. * @return A socket with unknown state, possibly closed underneath. Or null.
*/ */
public synchronized Socket get(SocketAddress remote) { public synchronized Socket get(SocketAddress remote) {
if (capacity <= 0) { // disabled
return null;
}
List<Socket> socklist = multimap.get(remote); List<Socket> socklist = multimap.get(remote);
if (socklist == null) { if (socklist == null) {
return null; return null;
@ -76,6 +83,12 @@ class SocketCache {
* @param sock socket not used by anyone. * @param sock socket not used by anyone.
*/ */
public synchronized void put(Socket sock) { public synchronized void put(Socket sock) {
if (capacity <= 0) {
// Cache disabled.
IOUtils.closeSocket(sock);
return;
}
Preconditions.checkNotNull(sock); Preconditions.checkNotNull(sock);
SocketAddress remoteAddr = sock.getRemoteSocketAddress(); SocketAddress remoteAddr = sock.getRemoteSocketAddress();

View File

@ -148,7 +148,8 @@ public class BlockTokenIdentifier extends TokenIdentifier {
userId = WritableUtils.readString(in); userId = WritableUtils.readString(in);
blockPoolId = WritableUtils.readString(in); blockPoolId = WritableUtils.readString(in);
blockId = WritableUtils.readVLong(in); blockId = WritableUtils.readVLong(in);
int length = WritableUtils.readVInt(in); int length = WritableUtils.readVIntInRange(in, 0,
AccessMode.class.getEnumConstants().length);
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
modes.add(WritableUtils.readEnum(in, AccessMode.class)); modes.add(WritableUtils.readEnum(in, AccessMode.class));
} }

View File

@ -0,0 +1,63 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.fs.ContentSummary;
/**
* This interface is used by the block manager to expose a
* few characteristics of a collection of Block/BlockUnderConstruction.
*/
public interface BlockCollection {
/**
* Get the last block of the collection.
* Make sure it has the right type.
*/
public <T extends BlockInfo> T getLastBlock() throws IOException;
/**
* Get content summary.
*/
public ContentSummary computeContentSummary();
/** @return the number of blocks */
public int numBlocks();
public BlockInfo[] getBlocks();
/**
* Get preferred block size for the collection
* @return preferred block size in bytes
*/
public long getPreferredBlockSize();
/**
* Get block replication for the collection
* @return block replication value
*/
public short getReplication();
/**
* Get name of collection.
*/
public String getName();
}

View File

@ -22,18 +22,17 @@ import java.util.LinkedList;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.LightWeightGSet; import org.apache.hadoop.hdfs.util.LightWeightGSet;
/** /**
* BlockInfo class maintains for a given block * BlockInfo class maintains for a given block
* the {@link INodeFile} it is part of and datanodes where the replicas of * the {@link BlockCollection} it is part of and datanodes where the replicas of
* the block are stored. * the block are stored.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class BlockInfo extends Block implements public class BlockInfo extends Block implements
LightWeightGSet.LinkedElement { LightWeightGSet.LinkedElement {
private INodeFile inode; private BlockCollection bc;
/** For implementing {@link LightWeightGSet.LinkedElement} interface */ /** For implementing {@link LightWeightGSet.LinkedElement} interface */
private LightWeightGSet.LinkedElement nextLinkedElement; private LightWeightGSet.LinkedElement nextLinkedElement;
@ -58,13 +57,13 @@ public class BlockInfo extends Block implements
*/ */
public BlockInfo(int replication) { public BlockInfo(int replication) {
this.triplets = new Object[3*replication]; this.triplets = new Object[3*replication];
this.inode = null; this.bc = null;
} }
public BlockInfo(Block blk, int replication) { public BlockInfo(Block blk, int replication) {
super(blk); super(blk);
this.triplets = new Object[3*replication]; this.triplets = new Object[3*replication];
this.inode = null; this.bc = null;
} }
/** /**
@ -73,16 +72,16 @@ public class BlockInfo extends Block implements
* @param from BlockInfo to copy from. * @param from BlockInfo to copy from.
*/ */
protected BlockInfo(BlockInfo from) { protected BlockInfo(BlockInfo from) {
this(from, from.inode.getReplication()); this(from, from.bc.getReplication());
this.inode = from.inode; this.bc = from.bc;
} }
public INodeFile getINode() { public BlockCollection getBlockCollection() {
return inode; return bc;
} }
public void setINode(INodeFile inode) { public void setBlockCollection(BlockCollection bc) {
this.inode = inode; this.bc = bc;
} }
DatanodeDescriptor getDatanode(int index) { DatanodeDescriptor getDatanode(int index) {
@ -335,7 +334,7 @@ public class BlockInfo extends Block implements
BlockUCState s, DatanodeDescriptor[] targets) { BlockUCState s, DatanodeDescriptor[] targets) {
if(isComplete()) { if(isComplete()) {
return new BlockInfoUnderConstruction( return new BlockInfoUnderConstruction(
this, getINode().getReplication(), s, targets); this, getBlockCollection().getReplication(), s, targets);
} }
// the block is already under construction // the block is already under construction
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this; BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this;

View File

@ -234,7 +234,7 @@ public class BlockInfoUnderConstruction extends BlockInfo {
blockRecoveryId = recoveryId; blockRecoveryId = recoveryId;
if (replicas.size() == 0) { if (replicas.size() == 0) {
NameNode.stateChangeLog.warn("BLOCK*" NameNode.stateChangeLog.warn("BLOCK*"
+ " INodeFileUnderConstruction.initLeaseRecovery:" + " BlockInfoUnderConstruction.initLeaseRecovery:"
+ " No blocks found, lease removed."); + " No blocks found, lease removed.");
} }

View File

@ -55,8 +55,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@ -142,7 +140,7 @@ public class BlockManager {
private final long replicationRecheckInterval; private final long replicationRecheckInterval;
/** /**
* Mapping: Block -> { INode, datanodes, self ref } * Mapping: Block -> { BlockCollection, datanodes, self ref }
* Updated only in response to client-sent information. * Updated only in response to client-sent information.
*/ */
final BlocksMap blocksMap; final BlocksMap blocksMap;
@ -192,7 +190,7 @@ public class BlockManager {
public final short minReplication; public final short minReplication;
/** Default number of replicas */ /** Default number of replicas */
public final int defaultReplication; public final int defaultReplication;
/** The maximum number of entries returned by getCorruptInodes() */ /** value returned by MAX_CORRUPT_FILES_RETURNED */
final int maxCorruptFilesReturned; final int maxCorruptFilesReturned;
/** variable to enable check for enough racks */ /** variable to enable check for enough racks */
@ -384,7 +382,7 @@ public class BlockManager {
numReplicas.decommissionedReplicas(); numReplicas.decommissionedReplicas();
if (block instanceof BlockInfo) { if (block instanceof BlockInfo) {
String fileName = ((BlockInfo)block).getINode().getFullPathName(); String fileName = ((BlockInfo)block).getBlockCollection().getName();
out.print(fileName + ": "); out.print(fileName + ": ");
} }
// l: == live:, d: == decommissioned c: == corrupt e: == excess // l: == live:, d: == decommissioned c: == corrupt e: == excess
@ -454,17 +452,17 @@ public class BlockManager {
* Commit the last block of the file and mark it as complete if it has * Commit the last block of the file and mark it as complete if it has
* meets the minimum replication requirement * meets the minimum replication requirement
* *
* @param fileINode file inode * @param bc block collection
* @param commitBlock - contains client reported block length and generation * @param commitBlock - contains client reported block length and generation
* @return true if the last block is changed to committed state. * @return true if the last block is changed to committed state.
* @throws IOException if the block does not have at least a minimal number * @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes. * of replicas reported from data-nodes.
*/ */
public boolean commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode, public boolean commitOrCompleteLastBlock(MutableBlockCollection bc,
Block commitBlock) throws IOException { Block commitBlock) throws IOException {
if(commitBlock == null) if(commitBlock == null)
return false; // not committing, this is a block allocation retry return false; // not committing, this is a block allocation retry
BlockInfo lastBlock = fileINode.getLastBlock(); BlockInfo lastBlock = bc.getLastBlock();
if(lastBlock == null) if(lastBlock == null)
return false; // no blocks in file yet return false; // no blocks in file yet
if(lastBlock.isComplete()) if(lastBlock.isComplete())
@ -472,22 +470,22 @@ public class BlockManager {
final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock); final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock);
if(countNodes(lastBlock).liveReplicas() >= minReplication) if(countNodes(lastBlock).liveReplicas() >= minReplication)
completeBlock(fileINode,fileINode.numBlocks()-1, false); completeBlock(bc, bc.numBlocks()-1, false);
return b; return b;
} }
/** /**
* Convert a specified block of the file to a complete block. * Convert a specified block of the file to a complete block.
* @param fileINode file * @param bc file
* @param blkIndex block index in the file * @param blkIndex block index in the file
* @throws IOException if the block does not have at least a minimal number * @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes. * of replicas reported from data-nodes.
*/ */
private BlockInfo completeBlock(final INodeFile fileINode, private BlockInfo completeBlock(final MutableBlockCollection bc,
final int blkIndex, boolean force) throws IOException { final int blkIndex, boolean force) throws IOException {
if(blkIndex < 0) if(blkIndex < 0)
return null; return null;
BlockInfo curBlock = fileINode.getBlocks()[blkIndex]; BlockInfo curBlock = bc.getBlocks()[blkIndex];
if(curBlock.isComplete()) if(curBlock.isComplete())
return curBlock; return curBlock;
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock; BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock;
@ -500,7 +498,7 @@ public class BlockManager {
"Cannot complete block: block has not been COMMITTED by the client"); "Cannot complete block: block has not been COMMITTED by the client");
BlockInfo completeBlock = ucBlock.convertToCompleteBlock(); BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
// replace penultimate block in file // replace penultimate block in file
fileINode.setBlock(blkIndex, completeBlock); bc.setBlock(blkIndex, completeBlock);
// Since safe-mode only counts complete blocks, and we now have // Since safe-mode only counts complete blocks, and we now have
// one more complete block, we need to adjust the total up, and // one more complete block, we need to adjust the total up, and
@ -516,12 +514,12 @@ public class BlockManager {
return blocksMap.replaceBlock(completeBlock); return blocksMap.replaceBlock(completeBlock);
} }
private BlockInfo completeBlock(final INodeFile fileINode, private BlockInfo completeBlock(final MutableBlockCollection bc,
final BlockInfo block, boolean force) throws IOException { final BlockInfo block, boolean force) throws IOException {
BlockInfo[] fileBlocks = fileINode.getBlocks(); BlockInfo[] fileBlocks = bc.getBlocks();
for(int idx = 0; idx < fileBlocks.length; idx++) for(int idx = 0; idx < fileBlocks.length; idx++)
if(fileBlocks[idx] == block) { if(fileBlocks[idx] == block) {
return completeBlock(fileINode, idx, force); return completeBlock(bc, idx, force);
} }
return block; return block;
} }
@ -531,10 +529,10 @@ public class BlockManager {
* regardless of whether enough replicas are present. This is necessary * regardless of whether enough replicas are present. This is necessary
* when tailing edit logs as a Standby. * when tailing edit logs as a Standby.
*/ */
public BlockInfo forceCompleteBlock(final INodeFile fileINode, public BlockInfo forceCompleteBlock(final MutableBlockCollection bc,
final BlockInfoUnderConstruction block) throws IOException { final BlockInfoUnderConstruction block) throws IOException {
block.commitBlock(block); block.commitBlock(block);
return completeBlock(fileINode, block, true); return completeBlock(bc, block, true);
} }
@ -548,14 +546,14 @@ public class BlockManager {
* The methods returns null if there is no partial block at the end. * The methods returns null if there is no partial block at the end.
* The client is supposed to allocate a new block with the next call. * The client is supposed to allocate a new block with the next call.
* *
* @param fileINode file * @param bc file
* @return the last block locations if the block is partial or null otherwise * @return the last block locations if the block is partial or null otherwise
*/ */
public LocatedBlock convertLastBlockToUnderConstruction( public LocatedBlock convertLastBlockToUnderConstruction(
INodeFileUnderConstruction fileINode) throws IOException { MutableBlockCollection bc) throws IOException {
BlockInfo oldBlock = fileINode.getLastBlock(); BlockInfo oldBlock = bc.getLastBlock();
if(oldBlock == null || if(oldBlock == null ||
fileINode.getPreferredBlockSize() == oldBlock.getNumBytes()) bc.getPreferredBlockSize() == oldBlock.getNumBytes())
return null; return null;
assert oldBlock == getStoredBlock(oldBlock) : assert oldBlock == getStoredBlock(oldBlock) :
"last block of the file is not in blocksMap"; "last block of the file is not in blocksMap";
@ -563,7 +561,7 @@ public class BlockManager {
DatanodeDescriptor[] targets = getNodes(oldBlock); DatanodeDescriptor[] targets = getNodes(oldBlock);
BlockInfoUnderConstruction ucBlock = BlockInfoUnderConstruction ucBlock =
fileINode.setLastBlock(oldBlock, targets); bc.setLastBlock(oldBlock, targets);
blocksMap.replaceBlock(ucBlock); blocksMap.replaceBlock(ucBlock);
// Remove block from replication queue. // Remove block from replication queue.
@ -583,7 +581,7 @@ public class BlockManager {
// always decrement total blocks // always decrement total blocks
-1); -1);
final long fileLength = fileINode.computeContentSummary().getLength(); final long fileLength = bc.computeContentSummary().getLength();
final long pos = fileLength - ucBlock.getNumBytes(); final long pos = fileLength - ucBlock.getNumBytes();
return createLocatedBlock(ucBlock, pos, AccessMode.WRITE); return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
} }
@ -923,8 +921,8 @@ public class BlockManager {
" does not exist. "); " does not exist. ");
} }
INodeFile inode = storedBlock.getINode(); BlockCollection bc = storedBlock.getBlockCollection();
if (inode == null) { if (bc == null) {
NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " + NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
"block " + storedBlock + "block " + storedBlock +
" could not be marked as corrupt as it" + " could not be marked as corrupt as it" +
@ -938,7 +936,7 @@ public class BlockManager {
// Add this replica to corruptReplicas Map // Add this replica to corruptReplicas Map
corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason); corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason);
if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) { if (countNodes(storedBlock).liveReplicas() >= bc.getReplication()) {
// the block is over-replicated so invalidate the replicas immediately // the block is over-replicated so invalidate the replicas immediately
invalidateBlock(storedBlock, node); invalidateBlock(storedBlock, node);
} else if (namesystem.isPopulatingReplQueues()) { } else if (namesystem.isPopulatingReplQueues()) {
@ -1051,7 +1049,7 @@ public class BlockManager {
int requiredReplication, numEffectiveReplicas; int requiredReplication, numEffectiveReplicas;
List<DatanodeDescriptor> containingNodes, liveReplicaNodes; List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
DatanodeDescriptor srcNode; DatanodeDescriptor srcNode;
INodeFile fileINode = null; BlockCollection bc = null;
int additionalReplRequired; int additionalReplRequired;
int scheduledWork = 0; int scheduledWork = 0;
@ -1063,15 +1061,15 @@ public class BlockManager {
for (int priority = 0; priority < blocksToReplicate.size(); priority++) { for (int priority = 0; priority < blocksToReplicate.size(); priority++) {
for (Block block : blocksToReplicate.get(priority)) { for (Block block : blocksToReplicate.get(priority)) {
// block should belong to a file // block should belong to a file
fileINode = blocksMap.getINode(block); bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append // abandoned block or block reopened for append
if(fileINode == null || fileINode.isUnderConstruction()) { if(bc == null || bc instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority); neededReplications.decrementReplicationIndex(priority);
continue; continue;
} }
requiredReplication = fileINode.getReplication(); requiredReplication = bc.getReplication();
// get a source data-node // get a source data-node
containingNodes = new ArrayList<DatanodeDescriptor>(); containingNodes = new ArrayList<DatanodeDescriptor>();
@ -1107,7 +1105,7 @@ public class BlockManager {
} else { } else {
additionalReplRequired = 1; // Needed on a new rack additionalReplRequired = 1; // Needed on a new rack
} }
work.add(new ReplicationWork(block, fileINode, srcNode, work.add(new ReplicationWork(block, bc, srcNode,
containingNodes, liveReplicaNodes, additionalReplRequired, containingNodes, liveReplicaNodes, additionalReplRequired,
priority)); priority));
} }
@ -1129,8 +1127,8 @@ public class BlockManager {
// choose replication targets: NOT HOLDING THE GLOBAL LOCK // choose replication targets: NOT HOLDING THE GLOBAL LOCK
// It is costly to extract the filename for which chooseTargets is called, // It is costly to extract the filename for which chooseTargets is called,
// so for now we pass in the Inode itself. // so for now we pass in the block collection itself.
rw.targets = blockplacement.chooseTarget(rw.fileINode, rw.targets = blockplacement.chooseTarget(rw.bc,
rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes, rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes,
excludedNodes, rw.block.getNumBytes()); excludedNodes, rw.block.getNumBytes());
} }
@ -1149,15 +1147,15 @@ public class BlockManager {
int priority = rw.priority; int priority = rw.priority;
// Recheck since global lock was released // Recheck since global lock was released
// block should belong to a file // block should belong to a file
fileINode = blocksMap.getINode(block); bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append // abandoned block or block reopened for append
if(fileINode == null || fileINode.isUnderConstruction()) { if(bc == null || bc instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications neededReplications.remove(block, priority); // remove from neededReplications
rw.targets = null; rw.targets = null;
neededReplications.decrementReplicationIndex(priority); neededReplications.decrementReplicationIndex(priority);
continue; continue;
} }
requiredReplication = fileINode.getReplication(); requiredReplication = bc.getReplication();
// do not schedule more if enough replicas is already pending // do not schedule more if enough replicas is already pending
NumberReplicas numReplicas = countNodes(block); NumberReplicas numReplicas = countNodes(block);
@ -1916,7 +1914,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
int numCurrentReplica = countLiveNodes(storedBlock); int numCurrentReplica = countLiveNodes(storedBlock);
if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
&& numCurrentReplica >= minReplication) { && numCurrentReplica >= minReplication) {
completeBlock(storedBlock.getINode(), storedBlock, false); completeBlock((MutableBlockCollection)storedBlock.getBlockCollection(), storedBlock, false);
} else if (storedBlock.isComplete()) { } else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block // check whether safe replication is reached for the block
// only complete blocks are counted towards that. // only complete blocks are counted towards that.
@ -1944,7 +1942,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
} else { } else {
storedBlock = block; storedBlock = block;
} }
if (storedBlock == null || storedBlock.getINode() == null) { if (storedBlock == null || storedBlock.getBlockCollection() == null) {
// If this block does not belong to anyfile, then we are done. // If this block does not belong to anyfile, then we are done.
NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on " NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
+ node + " size " + block.getNumBytes() + node + " size " + block.getNumBytes()
@ -1954,8 +1952,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
return block; return block;
} }
assert storedBlock != null : "Block must be stored by now"; assert storedBlock != null : "Block must be stored by now";
INodeFile fileINode = storedBlock.getINode(); BlockCollection bc = storedBlock.getBlockCollection();
assert fileINode != null : "Block must belong to a file"; assert bc != null : "Block must belong to a file";
// add block to the datanode // add block to the datanode
boolean added = node.addBlock(storedBlock); boolean added = node.addBlock(storedBlock);
@ -1981,7 +1979,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED && if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
numLiveReplicas >= minReplication) { numLiveReplicas >= minReplication) {
storedBlock = completeBlock(fileINode, storedBlock, false); storedBlock = completeBlock((MutableBlockCollection)bc, storedBlock, false);
} else if (storedBlock.isComplete()) { } else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block // check whether safe replication is reached for the block
// only complete blocks are counted towards that // only complete blocks are counted towards that
@ -1992,7 +1990,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
} }
// if file is under construction, then done for now // if file is under construction, then done for now
if (fileINode.isUnderConstruction()) { if (bc instanceof MutableBlockCollection) {
return storedBlock; return storedBlock;
} }
@ -2002,7 +2000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
} }
// handle underReplication/overReplication // handle underReplication/overReplication
short fileReplication = fileINode.getReplication(); short fileReplication = bc.getReplication();
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica, neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedReplicas(), fileReplication); num.decommissionedReplicas(), fileReplication);
@ -2129,8 +2127,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
* what happened with it. * what happened with it.
*/ */
private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
INodeFile fileINode = block.getINode(); BlockCollection bc = block.getBlockCollection();
if (fileINode == null) { if (bc == null) {
// block does not belong to any file // block does not belong to any file
addToInvalidates(block); addToInvalidates(block);
return MisReplicationResult.INVALID; return MisReplicationResult.INVALID;
@ -2141,7 +2139,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
return MisReplicationResult.UNDER_CONSTRUCTION; return MisReplicationResult.UNDER_CONSTRUCTION;
} }
// calculate current replication // calculate current replication
short expectedReplication = fileINode.getReplication(); short expectedReplication = bc.getReplication();
NumberReplicas num = countNodes(block); NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas(); int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be // add to under-replicated queue if need to be
@ -2258,7 +2256,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
BlockPlacementPolicy replicator) { BlockPlacementPolicy replicator) {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
// first form a rack to datanodes map and // first form a rack to datanodes map and
INodeFile inode = getINode(b); BlockCollection bc = getBlockCollection(b);
final Map<String, List<DatanodeDescriptor>> rackMap final Map<String, List<DatanodeDescriptor>> rackMap
= new HashMap<String, List<DatanodeDescriptor>>(); = new HashMap<String, List<DatanodeDescriptor>>();
for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator(); for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
@ -2298,7 +2296,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|| (addedNode != null && !priSet.contains(addedNode))) ) { || (addedNode != null && !priSet.contains(addedNode))) ) {
cur = delNodeHint; cur = delNodeHint;
} else { // regular excessive replica removal } else { // regular excessive replica removal
cur = replicator.chooseReplicaToDelete(inode, b, replication, cur = replicator.chooseReplicaToDelete(bc, b, replication,
priSet, remains); priSet, remains);
} }
firstOne = false; firstOne = false;
@ -2379,8 +2377,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
// necessary. In that case, put block on a possibly-will- // necessary. In that case, put block on a possibly-will-
// be-replicated list. // be-replicated list.
// //
INodeFile fileINode = blocksMap.getINode(block); BlockCollection bc = blocksMap.getBlockCollection(block);
if (fileINode != null) { if (bc != null) {
namesystem.decrementSafeBlockCount(block); namesystem.decrementSafeBlockCount(block);
updateNeededReplications(block, -1, 0); updateNeededReplications(block, -1, 0);
} }
@ -2611,7 +2609,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
NumberReplicas num) { NumberReplicas num) {
int curReplicas = num.liveReplicas(); int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block); int curExpectedReplicas = getReplication(block);
INodeFile fileINode = blocksMap.getINode(block); BlockCollection bc = blocksMap.getBlockCollection(block);
Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block); Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
StringBuilder nodeList = new StringBuilder(); StringBuilder nodeList = new StringBuilder();
while (nodeIter.hasNext()) { while (nodeIter.hasNext()) {
@ -2624,7 +2622,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
+ ", corrupt replicas: " + num.corruptReplicas() + ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissionedReplicas() + ", decommissioned replicas: " + num.decommissionedReplicas()
+ ", excess replicas: " + num.excessReplicas() + ", excess replicas: " + num.excessReplicas()
+ ", Is Open File: " + fileINode.isUnderConstruction() + ", Is Open File: " + (bc instanceof MutableBlockCollection)
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: " + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+ srcNode + ", Is current datanode decommissioning: " + srcNode + ", Is current datanode decommissioning: "
+ srcNode.isDecommissionInProgress()); + srcNode.isDecommissionInProgress());
@ -2639,8 +2637,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
final Iterator<? extends Block> it = srcNode.getBlockIterator(); final Iterator<? extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) { while(it.hasNext()) {
final Block block = it.next(); final Block block = it.next();
INodeFile fileINode = blocksMap.getINode(block); BlockCollection bc = blocksMap.getBlockCollection(block);
short expectedReplication = fileINode.getReplication(); short expectedReplication = bc.getReplication();
NumberReplicas num = countNodes(block); NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas(); int numCurrentReplica = num.liveReplicas();
if (numCurrentReplica > expectedReplication) { if (numCurrentReplica > expectedReplication) {
@ -2662,9 +2660,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
final Iterator<? extends Block> it = srcNode.getBlockIterator(); final Iterator<? extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) { while(it.hasNext()) {
final Block block = it.next(); final Block block = it.next();
INodeFile fileINode = blocksMap.getINode(block); BlockCollection bc = blocksMap.getBlockCollection(block);
if (fileINode != null) { if (bc != null) {
NumberReplicas num = countNodes(block); NumberReplicas num = countNodes(block);
int curReplicas = num.liveReplicas(); int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block); int curExpectedReplicas = getReplication(block);
@ -2679,7 +2677,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) { if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
decommissionOnlyReplicas++; decommissionOnlyReplicas++;
} }
if (fileINode.isUnderConstruction()) { if (bc instanceof MutableBlockCollection) {
underReplicatedInOpenFiles++; underReplicatedInOpenFiles++;
} }
} }
@ -2782,12 +2780,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
/* get replication factor of a block */ /* get replication factor of a block */
private int getReplication(Block block) { private int getReplication(Block block) {
INodeFile fileINode = blocksMap.getINode(block); BlockCollection bc = blocksMap.getBlockCollection(block);
if (fileINode == null) { // block does not belong to any file if (bc == null) { // block does not belong to any file
return 0; return 0;
} }
assert !fileINode.isDirectory() : "Block cannot belong to a directory."; return bc.getReplication();
return fileINode.getReplication();
} }
@ -2859,12 +2856,12 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
return this.neededReplications.getCorruptBlockSize(); return this.neededReplications.getCorruptBlockSize();
} }
public BlockInfo addINode(BlockInfo block, INodeFile iNode) { public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) {
return blocksMap.addINode(block, iNode); return blocksMap.addBlockCollection(block, bc);
} }
public INodeFile getINode(Block b) { public BlockCollection getBlockCollection(Block b) {
return blocksMap.getINode(b); return blocksMap.getBlockCollection(b);
} }
/** @return an iterator of the datanodes. */ /** @return an iterator of the datanodes. */
@ -3003,7 +3000,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
private static class ReplicationWork { private static class ReplicationWork {
private Block block; private Block block;
private INodeFile fileINode; private BlockCollection bc;
private DatanodeDescriptor srcNode; private DatanodeDescriptor srcNode;
private List<DatanodeDescriptor> containingNodes; private List<DatanodeDescriptor> containingNodes;
@ -3014,14 +3011,14 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
private int priority; private int priority;
public ReplicationWork(Block block, public ReplicationWork(Block block,
INodeFile fileINode, BlockCollection bc,
DatanodeDescriptor srcNode, DatanodeDescriptor srcNode,
List<DatanodeDescriptor> containingNodes, List<DatanodeDescriptor> containingNodes,
List<DatanodeDescriptor> liveReplicaNodes, List<DatanodeDescriptor> liveReplicaNodes,
int additionalReplRequired, int additionalReplRequired,
int priority) { int priority) {
this.block = block; this.block = block;
this.fileINode = fileINode; this.bc = bc;
this.srcNode = srcNode; this.srcNode = srcNode;
this.containingNodes = containingNodes; this.containingNodes = containingNodes;
this.liveReplicaNodes = liveReplicaNodes; this.liveReplicaNodes = liveReplicaNodes;

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
@ -111,11 +110,11 @@ public abstract class BlockPlacementPolicy {
* choose <i>numOfReplicas</i> data nodes for <i>writer</i> * choose <i>numOfReplicas</i> data nodes for <i>writer</i>
* If not, return as many as we can. * If not, return as many as we can.
* The base implemenatation extracts the pathname of the file from the * The base implemenatation extracts the pathname of the file from the
* specified srcInode, but this could be a costly operation depending on the * specified srcBC, but this could be a costly operation depending on the
* file system implementation. Concrete implementations of this class should * file system implementation. Concrete implementations of this class should
* override this method to avoid this overhead. * override this method to avoid this overhead.
* *
* @param srcInode The inode of the file for which chooseTarget is being invoked. * @param srcBC block collection of file for which chooseTarget is invoked.
* @param numOfReplicas additional number of replicas wanted. * @param numOfReplicas additional number of replicas wanted.
* @param writer the writer's machine, null if not in the cluster. * @param writer the writer's machine, null if not in the cluster.
* @param chosenNodes datanodes that have been chosen as targets. * @param chosenNodes datanodes that have been chosen as targets.
@ -123,13 +122,13 @@ public abstract class BlockPlacementPolicy {
* @return array of DatanodeDescriptor instances chosen as target * @return array of DatanodeDescriptor instances chosen as target
* and sorted as a pipeline. * and sorted as a pipeline.
*/ */
DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode, DatanodeDescriptor[] chooseTarget(BlockCollection srcBC,
int numOfReplicas, int numOfReplicas,
DatanodeDescriptor writer, DatanodeDescriptor writer,
List<DatanodeDescriptor> chosenNodes, List<DatanodeDescriptor> chosenNodes,
HashMap<Node, Node> excludedNodes, HashMap<Node, Node> excludedNodes,
long blocksize) { long blocksize) {
return chooseTarget(srcInode.getFullPathName(), numOfReplicas, writer, return chooseTarget(srcBC.getName(), numOfReplicas, writer,
chosenNodes, excludedNodes, blocksize); chosenNodes, excludedNodes, blocksize);
} }
@ -150,7 +149,7 @@ public abstract class BlockPlacementPolicy {
* Decide whether deleting the specified replica of the block still makes * Decide whether deleting the specified replica of the block still makes
* the block conform to the configured block placement policy. * the block conform to the configured block placement policy.
* *
* @param srcInode The inode of the file to which the block-to-be-deleted belongs * @param srcBC block collection of file to which block-to-be-deleted belongs
* @param block The block to be deleted * @param block The block to be deleted
* @param replicationFactor The required number of replicas for this block * @param replicationFactor The required number of replicas for this block
* @param existingReplicas The replica locations of this block that are present * @param existingReplicas The replica locations of this block that are present
@ -159,7 +158,7 @@ public abstract class BlockPlacementPolicy {
listed in the previous parameter. listed in the previous parameter.
* @return the replica that is the best candidate for deletion * @return the replica that is the best candidate for deletion
*/ */
abstract public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo srcInode, abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcBC,
Block block, Block block,
short replicationFactor, short replicationFactor,
Collection<DatanodeDescriptor> existingReplicas, Collection<DatanodeDescriptor> existingReplicas,

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
@ -547,7 +546,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
} }
@Override @Override
public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode, public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
Block block, Block block,
short replicationFactor, short replicationFactor,
Collection<DatanodeDescriptor> first, Collection<DatanodeDescriptor> first,

View File

@ -20,13 +20,12 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.Iterator; import java.util.Iterator;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.GSet; import org.apache.hadoop.hdfs.util.GSet;
import org.apache.hadoop.hdfs.util.LightWeightGSet; import org.apache.hadoop.hdfs.util.LightWeightGSet;
/** /**
* This class maintains the map from a block to its metadata. * This class maintains the map from a block to its metadata.
* block's metadata currently includes INode it belongs to and * block's metadata currently includes blockCollection it belongs to and
* the datanodes that store the block. * the datanodes that store the block.
*/ */
class BlocksMap { class BlocksMap {
@ -93,21 +92,21 @@ class BlocksMap {
blocks = null; blocks = null;
} }
INodeFile getINode(Block b) { BlockCollection getBlockCollection(Block b) {
BlockInfo info = blocks.get(b); BlockInfo info = blocks.get(b);
return (info != null) ? info.getINode() : null; return (info != null) ? info.getBlockCollection() : null;
} }
/** /**
* Add block b belonging to the specified file inode to the map. * Add block b belonging to the specified block collection to the map.
*/ */
BlockInfo addINode(BlockInfo b, INodeFile iNode) { BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) {
BlockInfo info = blocks.get(b); BlockInfo info = blocks.get(b);
if (info != b) { if (info != b) {
info = b; info = b;
blocks.put(info); blocks.put(info);
} }
info.setINode(iNode); info.setBlockCollection(bc);
return info; return info;
} }
@ -121,7 +120,7 @@ class BlocksMap {
if (blockInfo == null) if (blockInfo == null)
return; return;
blockInfo.setINode(null); blockInfo.setBlockCollection(null);
for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) { for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx); DatanodeDescriptor dn = blockInfo.getDatanode(idx);
dn.removeBlock(blockInfo); // remove from the list and wipe the location dn.removeBlock(blockInfo); // remove from the list and wipe the location
@ -169,7 +168,7 @@ class BlocksMap {
boolean removed = node.removeBlock(info); boolean removed = node.removeBlock(info);
if (info.getDatanode(0) == null // no datanodes left if (info.getDatanode(0) == null // no datanodes left
&& info.getINode() == null) { // does not belong to a file && info.getBlockCollection() == null) { // does not belong to a file
blocks.remove(b); // remove block from the map blocks.remove(b); // remove block from the map
} }
return removed; return removed;

View File

@ -15,24 +15,30 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.fs.ContentSummary;
/** /**
* This interface is used used the pluggable block placement policy * This interface is used by the block manager to expose a
* to expose a few characteristics of an Inode. * few characteristics of a collection of Block/BlockUnderConstruction.
*/ */
@InterfaceAudience.Private public interface MutableBlockCollection extends BlockCollection {
public interface FSInodeInfo { /**
* Set block
*/
public void setBlock(int idx, BlockInfo blk);
/** /**
* a string representation of an inode * Convert the last block of the collection to an under-construction block.
* * Set its locations.
* @return the full pathname (from root) that this inode represents
*/ */
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
public String getFullPathName() ; DatanodeDescriptor[] targets) throws IOException;
} }

View File

@ -235,6 +235,9 @@ class BPServiceActor implements Runnable {
} }
void reportBadBlocks(ExtendedBlock block) { void reportBadBlocks(ExtendedBlock block) {
if (bpRegistration == null) {
return;
}
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) }; DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) }; LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) };

View File

@ -860,7 +860,7 @@ public class DataNode extends Configured
*/ */
public String getDisplayName() { public String getDisplayName() {
// NB: our DatanodeID may not be set yet // NB: our DatanodeID may not be set yet
return hostName + ":" + getIpcPort(); return hostName + ":" + getXferPort();
} }
/** /**
@ -877,7 +877,6 @@ public class DataNode extends Configured
/** /**
* @return the datanode's IPC port * @return the datanode's IPC port
*/ */
@VisibleForTesting
public int getIpcPort() { public int getIpcPort() {
return ipcServer.getListenerAddress().getPort(); return ipcServer.getListenerAddress().getPort();
} }

View File

@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.SocketInputWrapper;
import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
@ -83,13 +84,30 @@ class DataXceiver extends Receiver implements Runnable {
private final DataXceiverServer dataXceiverServer; private final DataXceiverServer dataXceiverServer;
private long opStartTime; //the start time of receiving an Op private long opStartTime; //the start time of receiving an Op
private final SocketInputWrapper socketInputWrapper;
/**
* Client Name used in previous operation. Not available on first request
* on the socket.
*/
private String previousOpClientName;
public DataXceiver(Socket s, DataNode datanode, public static DataXceiver create(Socket s, DataNode dn,
DataXceiverServer dataXceiverServer) throws IOException {
SocketInputWrapper iw = NetUtils.getInputStream(s);
return new DataXceiver(s, iw, dn, dataXceiverServer);
}
private DataXceiver(Socket s,
SocketInputWrapper socketInput,
DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException { DataXceiverServer dataXceiverServer) throws IOException {
super(new DataInputStream(new BufferedInputStream( super(new DataInputStream(new BufferedInputStream(
NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE))); socketInput, HdfsConstants.SMALL_BUFFER_SIZE)));
this.s = s; this.s = s;
this.socketInputWrapper = socketInput;
this.isLocal = s.getInetAddress().equals(s.getLocalAddress()); this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
this.datanode = datanode; this.datanode = datanode;
this.dnConf = datanode.getDnConf(); this.dnConf = datanode.getDnConf();
@ -110,7 +128,11 @@ class DataXceiver extends Receiver implements Runnable {
*/ */
private void updateCurrentThreadName(String status) { private void updateCurrentThreadName(String status) {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
sb.append("DataXceiver for client ").append(remoteAddress); sb.append("DataXceiver for client ");
if (previousOpClientName != null) {
sb.append(previousOpClientName).append(" at ");
}
sb.append(remoteAddress);
if (status != null) { if (status != null) {
sb.append(" [").append(status).append("]"); sb.append(" [").append(status).append("]");
} }
@ -128,8 +150,6 @@ class DataXceiver extends Receiver implements Runnable {
Op op = null; Op op = null;
dataXceiverServer.childSockets.add(s); dataXceiverServer.childSockets.add(s);
try { try {
int stdTimeout = s.getSoTimeout();
// We process requests in a loop, and stay around for a short timeout. // We process requests in a loop, and stay around for a short timeout.
// This optimistic behaviour allows the other end to reuse connections. // This optimistic behaviour allows the other end to reuse connections.
// Setting keepalive timeout to 0 disable this behavior. // Setting keepalive timeout to 0 disable this behavior.
@ -139,7 +159,9 @@ class DataXceiver extends Receiver implements Runnable {
try { try {
if (opsProcessed != 0) { if (opsProcessed != 0) {
assert dnConf.socketKeepaliveTimeout > 0; assert dnConf.socketKeepaliveTimeout > 0;
s.setSoTimeout(dnConf.socketKeepaliveTimeout); socketInputWrapper.setTimeout(dnConf.socketKeepaliveTimeout);
} else {
socketInputWrapper.setTimeout(dnConf.socketTimeout);
} }
op = readOp(); op = readOp();
} catch (InterruptedIOException ignored) { } catch (InterruptedIOException ignored) {
@ -160,7 +182,7 @@ class DataXceiver extends Receiver implements Runnable {
// restore normal timeout // restore normal timeout
if (opsProcessed != 0) { if (opsProcessed != 0) {
s.setSoTimeout(stdTimeout); s.setSoTimeout(dnConf.socketTimeout);
} }
opStartTime = now(); opStartTime = now();
@ -190,6 +212,8 @@ class DataXceiver extends Receiver implements Runnable {
final String clientName, final String clientName,
final long blockOffset, final long blockOffset,
final long length) throws IOException { final long length) throws IOException {
previousOpClientName = clientName;
OutputStream baseStream = NetUtils.getOutputStream(s, OutputStream baseStream = NetUtils.getOutputStream(s,
dnConf.socketWriteTimeout); dnConf.socketWriteTimeout);
DataOutputStream out = new DataOutputStream(new BufferedOutputStream( DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
@ -283,7 +307,8 @@ class DataXceiver extends Receiver implements Runnable {
final long maxBytesRcvd, final long maxBytesRcvd,
final long latestGenerationStamp, final long latestGenerationStamp,
DataChecksum requestedChecksum) throws IOException { DataChecksum requestedChecksum) throws IOException {
updateCurrentThreadName("Receiving block " + block + " client=" + clientname); previousOpClientName = clientname;
updateCurrentThreadName("Receiving block " + block);
final boolean isDatanode = clientname.length() == 0; final boolean isDatanode = clientname.length() == 0;
final boolean isClient = !isDatanode; final boolean isClient = !isDatanode;
final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW
@ -490,7 +515,7 @@ class DataXceiver extends Receiver implements Runnable {
final DatanodeInfo[] targets) throws IOException { final DatanodeInfo[] targets) throws IOException {
checkAccess(null, true, blk, blockToken, checkAccess(null, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY); Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk); updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream( final DataOutputStream out = new DataOutputStream(

View File

@ -135,6 +135,7 @@ class DataXceiverServer implements Runnable {
try { try {
s = ss.accept(); s = ss.accept();
s.setTcpNoDelay(true); s.setTcpNoDelay(true);
// Timeouts are set within DataXceiver.run()
// Make sure the xceiver count is not exceeded // Make sure the xceiver count is not exceeded
int curXceiverCount = datanode.getXceiverCount(); int curXceiverCount = datanode.getXceiverCount();
@ -144,7 +145,8 @@ class DataXceiverServer implements Runnable {
+ maxXceiverCount); + maxXceiverCount);
} }
new Daemon(datanode.threadGroup, new DataXceiver(s, datanode, this)) new Daemon(datanode.threadGroup,
DataXceiver.create(s, datanode, this))
.start(); .start();
} catch (SocketTimeoutException ignored) { } catch (SocketTimeoutException ignored) {
// wake up to see if should continue to run // wake up to see if should continue to run

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.nio.SelectChannelConnector;
/** /**
@ -60,10 +61,7 @@ public class SecureDataNodeStarter implements Daemon {
@Override @Override
public void init(DaemonContext context) throws Exception { public void init(DaemonContext context) throws Exception {
System.err.println("Initializing secure datanode resources"); System.err.println("Initializing secure datanode resources");
// We should only start up a secure datanode in a Kerberos-secured cluster Configuration conf = new Configuration();
Configuration conf = new Configuration(); // Skip UGI method to not log in
if(!conf.get(HADOOP_SECURITY_AUTHENTICATION).equals("kerberos"))
throw new RuntimeException("Cannot start secure datanode in unsecure cluster");
// Stash command-line arguments for regular datanode // Stash command-line arguments for regular datanode
args = context.getArguments(); args = context.getArguments();
@ -98,7 +96,8 @@ public class SecureDataNodeStarter implements Daemon {
System.err.println("Successfully obtained privileged resources (streaming port = " System.err.println("Successfully obtained privileged resources (streaming port = "
+ ss + " ) (http listener port = " + listener.getConnection() +")"); + ss + " ) (http listener port = " + listener.getConnection() +")");
if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) { if ((ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) &&
UserGroupInformation.isSecurityEnabled()) {
throw new RuntimeException("Cannot start secure datanode with unprivileged ports"); throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
} }
System.err.println("Opened streaming server at " + streamingAddr); System.err.println("Opened streaming server at " + streamingAddr);

View File

@ -309,7 +309,7 @@ public class FSDirectory implements Closeable {
INodeFile newF = (INodeFile)newNode; INodeFile newF = (INodeFile)newNode;
BlockInfo[] blocks = newF.getBlocks(); BlockInfo[] blocks = newF.getBlocks();
for (int i = 0; i < blocks.length; i++) { for (int i = 0; i < blocks.length; i++) {
newF.setBlock(i, getBlockManager().addINode(blocks[i], newF)); newF.setBlock(i, getBlockManager().addBlockCollection(blocks[i], newF));
} }
} }
} finally { } finally {
@ -346,7 +346,7 @@ public class FSDirectory implements Closeable {
fileINode.getReplication(), fileINode.getReplication(),
BlockUCState.UNDER_CONSTRUCTION, BlockUCState.UNDER_CONSTRUCTION,
targets); targets);
getBlockManager().addINode(blockInfo, fileINode); getBlockManager().addBlockCollection(blockInfo, fileINode);
fileINode.addBlock(blockInfo); fileINode.addBlock(blockInfo);
if(NameNode.stateChangeLog.isDebugEnabled()) { if(NameNode.stateChangeLog.isDebugEnabled()) {
@ -1127,7 +1127,7 @@ public class FSDirectory implements Closeable {
int index = 0; int index = 0;
for (BlockInfo b : newnode.getBlocks()) { for (BlockInfo b : newnode.getBlocks()) {
BlockInfo info = getBlockManager().addINode(b, newnode); BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
newnode.setBlock(index, info); // inode refers to the block in BlocksMap newnode.setBlock(index, info); // inode refers to the block in BlocksMap
index++; index++;
} }

View File

@ -601,7 +601,7 @@ public class FSEditLogLoader {
// OP_ADD operations as each block is allocated. // OP_ADD operations as each block is allocated.
newBI = new BlockInfo(newBlock, file.getReplication()); newBI = new BlockInfo(newBlock, file.getReplication());
} }
fsNamesys.getBlockManager().addINode(newBI, file); fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI); file.addBlock(newBI);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock); fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
} }

View File

@ -203,6 +203,10 @@ public abstract class FSEditLogOp {
} }
<T extends AddCloseOp> T setBlocks(Block[] blocks) { <T extends AddCloseOp> T setBlocks(Block[] blocks) {
if (blocks.length > MAX_BLOCKS) {
throw new RuntimeException("Can't have more than " + MAX_BLOCKS +
" in an AddCloseOp.");
}
this.blocks = blocks; this.blocks = blocks;
return (T)this; return (T)this;
} }
@ -296,10 +300,18 @@ public abstract class FSEditLogOp {
} }
} }
static final public int MAX_BLOCKS = 1024 * 1024 * 64;
private static Block[] readBlocks( private static Block[] readBlocks(
DataInputStream in, DataInputStream in,
int logVersion) throws IOException { int logVersion) throws IOException {
int numBlocks = in.readInt(); int numBlocks = in.readInt();
if (numBlocks < 0) {
throw new IOException("invalid negative number of blocks");
} else if (numBlocks > MAX_BLOCKS) {
throw new IOException("invalid number of blocks: " + numBlocks +
". The maximum number of blocks per file is " + MAX_BLOCKS);
}
Block[] blocks = new Block[numBlocks]; Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) { for (int i = 0; i < numBlocks; i++) {
Block blk = new Block(); Block blk = new Block();
@ -579,6 +591,7 @@ public abstract class FSEditLogOp {
String trg; String trg;
String[] srcs; String[] srcs;
long timestamp; long timestamp;
final static public int MAX_CONCAT_SRC = 1024 * 1024;
private ConcatDeleteOp() { private ConcatDeleteOp() {
super(OP_CONCAT_DELETE); super(OP_CONCAT_DELETE);
@ -594,7 +607,12 @@ public abstract class FSEditLogOp {
} }
ConcatDeleteOp setSources(String[] srcs) { ConcatDeleteOp setSources(String[] srcs) {
if (srcs.length > MAX_CONCAT_SRC) {
throw new RuntimeException("ConcatDeleteOp can only have " +
MAX_CONCAT_SRC + " sources at most.");
}
this.srcs = srcs; this.srcs = srcs;
return this; return this;
} }
@ -624,8 +642,8 @@ public abstract class FSEditLogOp {
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt(); this.length = in.readInt();
if (length < 3) { // trg, srcs.., timestamp if (length < 3) { // trg, srcs.., timestamp
throw new IOException("Incorrect data format. " throw new IOException("Incorrect data format " +
+ "Concat delete operation."); "for ConcatDeleteOp.");
} }
} }
this.trg = FSImageSerialization.readString(in); this.trg = FSImageSerialization.readString(in);
@ -635,6 +653,15 @@ public abstract class FSEditLogOp {
} else { } else {
srcSize = this.length - 1 - 1; // trg and timestamp srcSize = this.length - 1 - 1; // trg and timestamp
} }
if (srcSize < 0) {
throw new IOException("Incorrect data format. "
+ "ConcatDeleteOp cannot have a negative number of data " +
" sources.");
} else if (srcSize > MAX_CONCAT_SRC) {
throw new IOException("Incorrect data format. "
+ "ConcatDeleteOp can have at most " + MAX_CONCAT_SRC +
" sources, but we tried to have " + (length - 3) + " sources.");
}
this.srcs = new String [srcSize]; this.srcs = new String [srcSize];
for(int i=0; i<srcSize;i++) { for(int i=0; i<srcSize;i++) {
srcs[i]= FSImageSerialization.readString(in); srcs[i]= FSImageSerialization.readString(in);

View File

@ -1783,24 +1783,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
"Failed to close file " + src + "Failed to close file " + src +
". Lease recovery is in progress. Try again later."); ". Lease recovery is in progress. Try again later.");
} else { } else {
BlockInfoUnderConstruction lastBlock=pendingFile.getLastBlock(); final BlockInfo lastBlock = pendingFile.getLastBlock();
if(lastBlock != null && lastBlock.getBlockUCState() == if (lastBlock != null
BlockUCState.UNDER_RECOVERY) { && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
throw new RecoveryInProgressException( throw new RecoveryInProgressException("Recovery in progress, file ["
"Recovery in progress, file [" + src + "], " + + src + "], " + "lease owner [" + lease.getHolder() + "]");
"lease owner [" + lease.getHolder() + "]"); } else {
} else { throw new AlreadyBeingCreatedException("Failed to create file ["
throw new AlreadyBeingCreatedException( + src + "] for [" + holder + "] on client [" + clientMachine
"Failed to create file [" + src + "] for [" + holder + + "], because this file is already being created by ["
"] on client [" + clientMachine + + pendingFile.getClientName() + "] on ["
"], because this file is already being created by [" + + pendingFile.getClientMachine() + "]");
pendingFile.getClientName() + "] on [" + }
pendingFile.getClientMachine() + "]"); }
}
}
} }
} }
} }
/** /**
@ -2840,7 +2837,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (storedBlock == null) { if (storedBlock == null) {
throw new IOException("Block (=" + lastblock + ") not found"); throw new IOException("Block (=" + lastblock + ") not found");
} }
INodeFile iFile = storedBlock.getINode(); INodeFile iFile = (INodeFile) storedBlock.getBlockCollection();
if (!iFile.isUnderConstruction() || storedBlock.isComplete()) { if (!iFile.isUnderConstruction() || storedBlock.isComplete()) {
throw new IOException("Unexpected block (=" + lastblock throw new IOException("Unexpected block (=" + lastblock
+ ") since the file (=" + iFile.getLocalName() + ") since the file (=" + iFile.getLocalName()
@ -4135,7 +4132,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* Returns whether the given block is one pointed-to by a file. * Returns whether the given block is one pointed-to by a file.
*/ */
private boolean isValidBlock(Block b) { private boolean isValidBlock(Block b) {
return (blockManager.getINode(b) != null); return (blockManager.getBlockCollection(b) != null);
} }
// Distributed upgrade manager // Distributed upgrade manager
@ -4394,7 +4391,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
// check file inode // check file inode
INodeFile file = storedBlock.getINode(); INodeFile file = (INodeFile) storedBlock.getBlockCollection();
if (file==null || !file.isUnderConstruction()) { if (file==null || !file.isUnderConstruction()) {
throw new IOException("The file " + storedBlock + throw new IOException("The file " + storedBlock +
" belonged to does not exist or it is not under construction."); " belonged to does not exist or it is not under construction.");
@ -4556,7 +4553,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (destinationExisted && dinfo.isDir()) { if (destinationExisted && dinfo.isDir()) {
Path spath = new Path(src); Path spath = new Path(src);
Path parent = spath.getParent(); Path parent = spath.getParent();
if (isRoot(parent)) { if (parent.isRoot()) {
overwrite = parent.toString(); overwrite = parent.toString();
} else { } else {
overwrite = parent.toString() + Path.SEPARATOR; overwrite = parent.toString() + Path.SEPARATOR;
@ -4569,10 +4566,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
leaseManager.changeLease(src, dst, overwrite, replaceBy); leaseManager.changeLease(src, dst, overwrite, replaceBy);
} }
private boolean isRoot(Path path) {
return path.getParent() == null;
}
/** /**
* Serializes leases. * Serializes leases.
@ -4710,7 +4703,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
while (blkIterator.hasNext()) { while (blkIterator.hasNext()) {
Block blk = blkIterator.next(); Block blk = blkIterator.next();
INode inode = blockManager.getINode(blk); INode inode = (INodeFile) blockManager.getBlockCollection(blk);
skip++; skip++;
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) { if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
String src = FSDirectory.getFullPathName(inode); String src = FSDirectory.getFullPathName(inode);

View File

@ -27,6 +27,8 @@ import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -34,7 +36,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
@ -83,11 +84,11 @@ public class GetImageServlet extends HttpServlet {
(Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF); (Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF);
if(UserGroupInformation.isSecurityEnabled() && if(UserGroupInformation.isSecurityEnabled() &&
!isValidRequestor(request.getRemoteUser(), conf)) { !isValidRequestor(request.getUserPrincipal().getName(), conf)) {
response.sendError(HttpServletResponse.SC_FORBIDDEN, response.sendError(HttpServletResponse.SC_FORBIDDEN,
"Only Namenode and Secondary Namenode may access this servlet"); "Only Namenode and Secondary Namenode may access this servlet");
LOG.warn("Received non-NN/SNN request for image or edits from " LOG.warn("Received non-NN/SNN request for image or edits from "
+ request.getRemoteHost()); + request.getUserPrincipal().getName() + " at " + request.getRemoteHost());
return; return;
} }
@ -156,15 +157,10 @@ public class GetImageServlet extends HttpServlet {
} }
// issue a HTTP get request to download the new fsimage // issue a HTTP get request to download the new fsimage
MD5Hash downloadImageDigest = reloginIfNecessary().doAs( MD5Hash downloadImageDigest =
new PrivilegedExceptionAction<MD5Hash>() { TransferFsImage.downloadImageToStorage(
@Override
public MD5Hash run() throws Exception {
return TransferFsImage.downloadImageToStorage(
parsedParams.getInfoServer(), txid, parsedParams.getInfoServer(), txid,
nnImage.getStorage(), true); nnImage.getStorage(), true);
}
});
nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest); nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest);
// Now that we have a new checkpoint, we might be able to // Now that we have a new checkpoint, we might be able to
@ -176,18 +172,6 @@ public class GetImageServlet extends HttpServlet {
} }
return null; return null;
} }
// We may have lost our ticket since the last time we tried to open
// an http connection, so log in just in case.
private UserGroupInformation reloginIfNecessary() throws IOException {
// This method is only called on the NN, therefore it is safe to
// use these key values.
return UserGroupInformation.loginUserFromKeytabAndReturnUGI(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
NameNode.getAddress(conf).getHostName()),
conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
}
}); });
} catch (Throwable t) { } catch (Throwable t) {
@ -232,18 +216,10 @@ public class GetImageServlet extends HttpServlet {
Set<String> validRequestors = new HashSet<String>(); Set<String> validRequestors = new HashSet<String>();
validRequestors.add(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode
.getAddress(conf).getHostName()));
validRequestors.add( validRequestors.add(
SecurityUtil.getServerPrincipal(conf SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode
.getAddress(conf).getHostName())); .getAddress(conf).getHostName()));
validRequestors.add(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
SecondaryNameNode.getHttpAddress(conf).getHostName()));
validRequestors.add( validRequestors.add(
SecurityUtil.getServerPrincipal(conf SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY), .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
@ -251,10 +227,6 @@ public class GetImageServlet extends HttpServlet {
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) { if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
Configuration otherNnConf = HAUtil.getConfForOtherNode(conf); Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
validRequestors.add(
SecurityUtil.getServerPrincipal(otherNnConf
.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
NameNode.getAddress(otherNnConf).getHostName()));
validRequestors.add( validRequestors.add(
SecurityUtil.getServerPrincipal(otherNnConf SecurityUtil.getServerPrincipal(otherNnConf
.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
@ -263,11 +235,11 @@ public class GetImageServlet extends HttpServlet {
for(String v : validRequestors) { for(String v : validRequestors) {
if(v != null && v.equals(remoteUser)) { if(v != null && v.equals(remoteUser)) {
if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing: " + remoteUser); if(LOG.isInfoEnabled()) LOG.info("GetImageServlet allowing: " + remoteUser);
return true; return true;
} }
} }
if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is rejecting: " + remoteUser); if(LOG.isInfoEnabled()) LOG.info("GetImageServlet rejecting: " + remoteUser);
return false; return false;
} }

View File

@ -30,13 +30,15 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.primitives.SignedBytes;
/** /**
* We keep an in-memory representation of the file/block hierarchy. * We keep an in-memory representation of the file/block hierarchy.
* This is a base INode class containing common fields for file and * This is a base INode class containing common fields for file and
* directory inodes. * directory inodes.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
abstract class INode implements Comparable<byte[]>, FSInodeInfo { abstract class INode implements Comparable<byte[]> {
/* /*
* The inode name is in java UTF8 encoding; * The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this. * The name in HdfsFileStatus should keep the same encoding as this.
@ -143,8 +145,7 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
protected PermissionStatus getPermissionStatus() { protected PermissionStatus getPermissionStatus() {
return new PermissionStatus(getUserName(),getGroupName(),getFsPermission()); return new PermissionStatus(getUserName(),getGroupName(),getFsPermission());
} }
private synchronized void updatePermissionStatus( private void updatePermissionStatus(PermissionStatusFormat f, long n) {
PermissionStatusFormat f, long n) {
permission = f.combine(n, permission); permission = f.combine(n, permission);
} }
/** Get user name */ /** Get user name */
@ -263,7 +264,6 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
this.name = name; this.name = name;
} }
@Override
public String getFullPathName() { public String getFullPathName() {
// Get the full path name of this inode. // Get the full path name of this inode.
return FSDirectory.getFullPathName(this); return FSDirectory.getFullPathName(this);
@ -400,48 +400,30 @@ abstract class INode implements Comparable<byte[]>, FSInodeInfo {
} }
} }
// private static final byte[] EMPTY_BYTES = {};
// Comparable interface
// @Override
public int compareTo(byte[] o) { public final int compareTo(byte[] bytes) {
return compareBytes(name, o); final byte[] left = name == null? EMPTY_BYTES: name;
final byte[] right = bytes == null? EMPTY_BYTES: bytes;
return SignedBytes.lexicographicalComparator().compare(left, right);
} }
public boolean equals(Object o) { @Override
if (!(o instanceof INode)) { public final boolean equals(Object that) {
if (this == that) {
return true;
}
if (that == null || !(that instanceof INode)) {
return false; return false;
} }
return Arrays.equals(this.name, ((INode)o).name); return Arrays.equals(this.name, ((INode)that).name);
} }
public int hashCode() { @Override
public final int hashCode() {
return Arrays.hashCode(this.name); return Arrays.hashCode(this.name);
} }
//
// static methods
//
/**
* Compare two byte arrays.
*
* @return a negative integer, zero, or a positive integer
* as defined by {@link #compareTo(byte[])}.
*/
static int compareBytes(byte[] a1, byte[] a2) {
if (a1==a2)
return 0;
int len1 = (a1==null ? 0 : a1.length);
int len2 = (a2==null ? 0 : a2.length);
int n = Math.min(len1, len2);
byte b1, b2;
for (int i=0; i<n; i++) {
b1 = a1[i];
b2 = a2[i];
if (b1 != b2)
return b1 - b2;
}
return len1 - len2;
}
/** /**
* Create an INode; the inode's name is not set yet * Create an INode; the inode's name is not set yet

View File

@ -173,9 +173,9 @@ class INodeDirectory extends INode {
*/ */
int getExistingPathINodes(byte[][] components, INode[] existing, int getExistingPathINodes(byte[][] components, INode[] existing,
boolean resolveLink) throws UnresolvedLinkException { boolean resolveLink) throws UnresolvedLinkException {
assert compareBytes(this.name, components[0]) == 0 : assert this.compareTo(components[0]) == 0 :
"Incorrect name " + getLocalName() + " expected " + "Incorrect name " + getLocalName() + " expected "
DFSUtil.bytes2String(components[0]); + (components[0] == null? null: DFSUtil.bytes2String(components[0]));
INode curNode = this; INode curNode = this;
int count = 0; int count = 0;
@ -317,8 +317,7 @@ class INodeDirectory extends INode {
INode newNode, INode newNode,
INodeDirectory parent, INodeDirectory parent,
boolean propagateModTime boolean propagateModTime
) throws FileNotFoundException, ) throws FileNotFoundException {
UnresolvedLinkException {
// insert into the parent children list // insert into the parent children list
newNode.name = localname; newNode.name = localname;
if(parent.addChild(newNode, propagateModTime) == null) if(parent.addChild(newNode, propagateModTime) == null)

View File

@ -20,15 +20,18 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
/** I-node for closed file. */ /** I-node for closed file. */
public class INodeFile extends INode { @InterfaceAudience.Private
public class INodeFile extends INode implements BlockCollection {
static final FsPermission UMASK = FsPermission.createImmutable((short)0111); static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
//Number of bits for Block size //Number of bits for Block size
@ -128,7 +131,7 @@ public class INodeFile extends INode {
} }
for(BlockInfo bi: newlist) { for(BlockInfo bi: newlist) {
bi.setINode(this); bi.setBlockCollection(this);
} }
this.blocks = newlist; this.blocks = newlist;
} }
@ -161,12 +164,18 @@ public class INodeFile extends INode {
if(blocks != null && v != null) { if(blocks != null && v != null) {
for (BlockInfo blk : blocks) { for (BlockInfo blk : blocks) {
v.add(blk); v.add(blk);
blk.setINode(null); blk.setBlockCollection(null);
} }
} }
blocks = null; blocks = null;
return 1; return 1;
} }
public String getName() {
// Get the full path name of this inode.
return getFullPathName();
}
@Override @Override
long[] computeContentSummary(long[] summary) { long[] computeContentSummary(long[] summary) {

View File

@ -25,13 +25,15 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
/** /**
* I-node for file being written. * I-node for file being written.
*/ */
public class INodeFileUnderConstruction extends INodeFile { public class INodeFileUnderConstruction extends INodeFile
implements MutableBlockCollection {
private String clientName; // lease holder private String clientName; // lease holder
private final String clientMachine; private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too. private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@ -154,7 +156,7 @@ public class INodeFileUnderConstruction extends INodeFile {
BlockInfoUnderConstruction ucBlock = BlockInfoUnderConstruction ucBlock =
lastBlock.convertToBlockUnderConstruction( lastBlock.convertToBlockUnderConstruction(
BlockUCState.UNDER_CONSTRUCTION, targets); BlockUCState.UNDER_CONSTRUCTION, targets);
ucBlock.setINode(this); ucBlock.setBlockCollection(this);
setBlock(numBlocks()-1, ucBlock); setBlock(numBlocks()-1, ucBlock);
return ucBlock; return ucBlock;
} }

View File

@ -174,10 +174,8 @@ public class NameNode {
DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_KEY,
DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_BACKUP_ADDRESS_KEY, DFS_NAMENODE_BACKUP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
@ -229,6 +227,7 @@ public class NameNode {
private final boolean haEnabled; private final boolean haEnabled;
private final HAContext haContext; private final HAContext haContext;
protected boolean allowStaleStandbyReads; protected boolean allowStaleStandbyReads;
private Runtime runtime = Runtime.getRuntime();
/** httpServer */ /** httpServer */
@ -382,8 +381,9 @@ public class NameNode {
} }
protected void setHttpServerAddress(Configuration conf) { protected void setHttpServerAddress(Configuration conf) {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, String hostPort = NetUtils.getHostPortString(getHttpAddress());
NetUtils.getHostPortString(getHttpAddress())); conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, hostPort);
LOG.info("Web-server up at: " + hostPort);
} }
protected void loadNamesystem(Configuration conf) throws IOException { protected void loadNamesystem(Configuration conf) throws IOException {
@ -503,11 +503,16 @@ public class NameNode {
} }
private void startTrashEmptier(Configuration conf) throws IOException { private void startTrashEmptier(Configuration conf) throws IOException {
long trashInterval long trashInterval = conf.getLong(
= conf.getLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT); CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
if(trashInterval == 0) if (trashInterval == 0) {
return; return;
} else if (trashInterval < 0) {
throw new IOException("Cannot start tresh emptier with negative interval."
+ " Set " + CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY + " to a"
+ " positive value.");
}
this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier"); this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
this.emptier.setDaemon(true); this.emptier.setDaemon(true);
this.emptier.start(); this.emptier.start();
@ -1151,23 +1156,21 @@ public class NameNode {
*/ */
public static void initializeGenericKeys(Configuration conf, public static void initializeGenericKeys(Configuration conf,
String nameserviceId, String namenodeId) { String nameserviceId, String namenodeId) {
if ((nameserviceId == null || nameserviceId.isEmpty()) && if ((nameserviceId != null && !nameserviceId.isEmpty()) ||
(namenodeId == null || namenodeId.isEmpty())) { (namenodeId != null && !namenodeId.isEmpty())) {
return; if (nameserviceId != null) {
conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
}
if (namenodeId != null) {
conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
}
DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
NAMENODE_SPECIFIC_KEYS);
DFSUtil.setGenericConf(conf, nameserviceId, null,
NAMESERVICE_SPECIFIC_KEYS);
} }
if (nameserviceId != null) {
conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
}
if (namenodeId != null) {
conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
}
DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
NAMENODE_SPECIFIC_KEYS);
DFSUtil.setGenericConf(conf, nameserviceId, null,
NAMESERVICE_SPECIFIC_KEYS);
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) { if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@ -1262,14 +1265,37 @@ public class NameNode {
} }
return state.getServiceState(); return state.getServiceState();
} }
@VisibleForTesting
public synchronized void setRuntimeForTesting(Runtime runtime) {
this.runtime = runtime;
}
/** /**
* Class used as expose {@link NameNode} as context to {@link HAState} * Shutdown the NN immediately in an ungraceful way. Used when it would be
* unsafe for the NN to continue operating, e.g. during a failed HA state
* transition.
* *
* TODO(HA): * @param t exception which warrants the shutdown. Printed to the NN log
* When entering and exiting state, on failing to start services, * before exit.
* appropriate action is needed todo either shutdown the node or recover * @throws ServiceFailedException thrown only for testing.
* from failure. */
private synchronized void doImmediateShutdown(Throwable t)
throws ServiceFailedException {
String message = "Error encountered requiring NN shutdown. " +
"Shutting down immediately.";
try {
LOG.fatal(message, t);
} catch (Throwable ignored) {
// This is unlikely to happen, but there's nothing we can do if it does.
}
runtime.exit(1);
// This code is only reached during testing, when runtime is stubbed out.
throw new ServiceFailedException(message, t);
}
/**
* Class used to expose {@link NameNode} as context to {@link HAState}
*/ */
protected class NameNodeHAContext implements HAContext { protected class NameNodeHAContext implements HAContext {
@Override @Override
@ -1284,32 +1310,52 @@ public class NameNode {
@Override @Override
public void startActiveServices() throws IOException { public void startActiveServices() throws IOException {
namesystem.startActiveServices(); try {
startTrashEmptier(conf); namesystem.startActiveServices();
startTrashEmptier(conf);
} catch (Throwable t) {
doImmediateShutdown(t);
}
} }
@Override @Override
public void stopActiveServices() throws IOException { public void stopActiveServices() throws IOException {
if (namesystem != null) { try {
namesystem.stopActiveServices(); if (namesystem != null) {
namesystem.stopActiveServices();
}
stopTrashEmptier();
} catch (Throwable t) {
doImmediateShutdown(t);
} }
stopTrashEmptier();
} }
@Override @Override
public void startStandbyServices() throws IOException { public void startStandbyServices() throws IOException {
namesystem.startStandbyServices(conf); try {
namesystem.startStandbyServices(conf);
} catch (Throwable t) {
doImmediateShutdown(t);
}
} }
@Override @Override
public void prepareToStopStandbyServices() throws ServiceFailedException { public void prepareToStopStandbyServices() throws ServiceFailedException {
namesystem.prepareToStopStandbyServices(); try {
namesystem.prepareToStopStandbyServices();
} catch (Throwable t) {
doImmediateShutdown(t);
}
} }
@Override @Override
public void stopStandbyServices() throws IOException { public void stopStandbyServices() throws IOException {
if (namesystem != null) { try {
namesystem.stopStandbyServices(); if (namesystem != null) {
namesystem.stopStandbyServices();
}
} catch (Throwable t) {
doImmediateShutdown(t);
} }
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT;
@ -43,6 +44,7 @@ import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.AccessControlList;
/** /**
@ -78,127 +80,101 @@ public class NameNodeHttpServer {
conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
nn.getNameNodeAddress().getHostName()); nn.getNameNodeAddress().getHostName());
} }
public void start() throws IOException { public void start() throws IOException {
final String infoHost = bindAddress.getHostName(); final String infoHost = bindAddress.getHostName();
int infoPort = bindAddress.getPort();
if(UserGroupInformation.isSecurityEnabled()) {
String httpsUser = SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoHost);
if (httpsUser == null) {
LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY
+ " not defined in config. Starting http server as "
+ getDefaultServerPrincipal()
+ ": Kerberized SSL may be not function correctly.");
} else {
// Kerberized SSL servers must be run from the host principal...
LOG.info("Logging in as " + httpsUser + " to start http server.");
SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoHost);
}
}
UserGroupInformation ugi = UserGroupInformation.getLoginUser(); httpServer = new HttpServer("hdfs", infoHost, infoPort,
try { infoPort == 0, conf,
this.httpServer = ugi.doAs(new PrivilegedExceptionAction<HttpServer>() { new AccessControlList(conf.get(DFS_ADMIN, " "))) {
@Override {
public HttpServer run() throws IOException, InterruptedException { // Add SPNEGO support to NameNode
int infoPort = bindAddress.getPort(); if (UserGroupInformation.isSecurityEnabled()) {
httpServer = new HttpServer("hdfs", infoHost, infoPort, Map<String, String> params = new HashMap<String, String>();
infoPort == 0, conf, String principalInConf = conf.get(
new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) { DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
{ if (principalInConf != null && !principalInConf.isEmpty()) {
if (WebHdfsFileSystem.isEnabled(conf, LOG)) { params.put("kerberos.principal",
//add SPNEGO authentication filter for webhdfs SecurityUtil.getServerPrincipal(principalInConf, infoHost));
final String name = "SPNEGO"; String httpKeytab = conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
final String classname = AuthFilter.class.getName(); if (httpKeytab != null && !httpKeytab.isEmpty()) {
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; params.put("kerberos.keytab", httpKeytab);
Map<String, String> params = getAuthFilterParams(conf);
defineFilter(webAppContext, name, classname, params,
new String[]{pathSpec});
LOG.info("Added filter '" + name + "' (class=" + classname + ")");
// add webhdfs packages
addJerseyResourcePackage(
NamenodeWebHdfsMethods.class.getPackage().getName()
+ ";" + Param.class.getPackage().getName(), pathSpec);
}
} }
private Map<String, String> getAuthFilterParams(Configuration conf) params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
throws IOException {
Map<String, String> params = new HashMap<String, String>();
String principalInConf = conf
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
if (principalInConf != null && !principalInConf.isEmpty()) {
params
.put(
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
SecurityUtil.getServerPrincipal(principalInConf,
infoHost));
}
String httpKeytab = conf
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
if (httpKeytab != null && !httpKeytab.isEmpty()) {
params.put(
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
httpKeytab);
}
return params;
}
};
boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); defineFilter(webAppContext, SPNEGO_FILTER,
boolean useKrb = UserGroupInformation.isSecurityEnabled(); AuthenticationFilter.class.getName(), params, null);
if (certSSL || useKrb) {
boolean needClientAuth = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf
.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
Configuration sslConf = new HdfsConfiguration(false);
if (certSSL) {
sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
}
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
useKrb);
// assume same ssl port for all datanodes
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(
conf.get(DFS_DATANODE_HTTPS_ADDRESS_KEY,
infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
datanodeSslPort.getPort());
} }
httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
nn.getNameNodeAddress());
httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, nn.getFSImage());
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
setupServlets(httpServer, conf);
httpServer.start();
// The web-server port can be ephemeral... ensure we have the correct
// info
infoPort = httpServer.getPort();
httpAddress = new InetSocketAddress(infoHost, infoPort);
LOG.info(nn.getRole() + " Web-server up at: " + httpAddress);
return httpServer;
} }
}); if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
} catch (InterruptedException e) { //add SPNEGO authentication filter for webhdfs
throw new IOException(e); final String name = "SPNEGO";
} finally { final String classname = AuthFilter.class.getName();
if(UserGroupInformation.isSecurityEnabled() && final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) { Map<String, String> params = getAuthFilterParams(conf);
// Go back to being the correct Namenode principal defineFilter(webAppContext, name, classname, params,
LOG.info("Logging back in as NameNode user following http server start"); new String[]{pathSpec});
nn.loginAsNameNodeUser(conf); LOG.info("Added filter '" + name + "' (class=" + classname + ")");
// add webhdfs packages
addJerseyResourcePackage(
NamenodeWebHdfsMethods.class.getPackage().getName()
+ ";" + Param.class.getPackage().getName(), pathSpec);
}
} }
private Map<String, String> getAuthFilterParams(Configuration conf)
throws IOException {
Map<String, String> params = new HashMap<String, String>();
String principalInConf = conf
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
if (principalInConf != null && !principalInConf.isEmpty()) {
params
.put(
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
SecurityUtil.getServerPrincipal(principalInConf,
bindAddress.getHostName()));
}
String httpKeytab = conf
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
if (httpKeytab != null && !httpKeytab.isEmpty()) {
params.put(
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
httpKeytab);
}
return params;
}
};
boolean certSSL = conf.getBoolean("dfs.https.enable", false);
if (certSSL) {
boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoHost + ":" + conf.get(
"dfs.https.port", infoHost + ":" + 0));
Configuration sslConf = new Configuration(false);
if (certSSL) {
sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
"ssl-server.xml"));
}
httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
// assume same ssl port for all datanodes
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
"dfs.datanode.https.address", infoHost + ":" + 50475));
httpServer.setAttribute("datanode.https.port", datanodeSslPort
.getPort());
} }
httpServer.setAttribute("name.node", nn);
httpServer.setAttribute("name.node.address", bindAddress);
httpServer.setAttribute("name.system.image", nn.getFSImage());
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
setupServlets(httpServer, conf);
httpServer.start();
httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort());
} }
public void stop() throws Exception { public void stop() throws Exception {
if (httpServer != null) { if (httpServer != null) {
httpServer.stop(); httpServer.stop();

View File

@ -734,7 +734,7 @@ class NamenodeJspHelper {
this.inode = null; this.inode = null;
} else { } else {
this.block = new Block(blockId); this.block = new Block(blockId);
this.inode = blockManager.getINode(block); this.inode = (INodeFile) blockManager.getBlockCollection(block);
} }
} }

View File

@ -25,8 +25,10 @@ import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.Collection; import java.util.Collection;
import java.util.Date; import java.util.Date;
import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map;
import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.CommandLineParser;
@ -44,6 +46,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.NameNodeProxies;
@ -63,9 +66,9 @@ import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
@ -108,7 +111,6 @@ public class SecondaryNameNode implements Runnable {
private volatile boolean shouldRun; private volatile boolean shouldRun;
private HttpServer infoServer; private HttpServer infoServer;
private int infoPort; private int infoPort;
private int imagePort;
private String infoBindAddress; private String infoBindAddress;
private Collection<URI> checkpointDirs; private Collection<URI> checkpointDirs;
@ -229,63 +231,47 @@ public class SecondaryNameNode implements Runnable {
// Initialize other scheduling parameters from the configuration // Initialize other scheduling parameters from the configuration
checkpointConf = new CheckpointConf(conf); checkpointConf = new CheckpointConf(conf);
// initialize the webserver for uploading files.
// Kerberized SSL servers must be run from the host principal...
UserGroupInformation httpUGI =
UserGroupInformation.loginUserFromKeytabAndReturnUGI(
SecurityUtil.getServerPrincipal(conf
.get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
infoBindAddress),
conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
try {
infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {
@Override
public HttpServer run() throws IOException, InterruptedException {
LOG.info("Starting web server as: " +
UserGroupInformation.getCurrentUser().getUserName());
int tmpInfoPort = infoSocAddr.getPort(); // initialize the webserver for uploading files.
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, int tmpInfoPort = infoSocAddr.getPort();
tmpInfoPort == 0, conf, infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
new AccessControlList(conf.get(DFS_ADMIN, " "))); tmpInfoPort == 0, conf,
new AccessControlList(conf.get(DFS_ADMIN, " "))) {
if(UserGroupInformation.isSecurityEnabled()) { {
SecurityUtil.initKrb5CipherSuites(); if (UserGroupInformation.isSecurityEnabled()) {
InetSocketAddress secInfoSocAddr = Map<String, String> params = new HashMap<String, String>();
NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt( String principalInConf = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY, if (principalInConf != null && !principalInConf.isEmpty()) {
DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT)); params.put("kerberos.principal",
imagePort = secInfoSocAddr.getPort(); SecurityUtil.getServerPrincipal(principalInConf, infoSocAddr.getHostName()));
infoServer.addSslListener(secInfoSocAddr, conf, false, true);
} }
String httpKeytab = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
infoServer.setAttribute("secondary.name.node", SecondaryNameNode.this); if (httpKeytab != null && !httpKeytab.isEmpty()) {
infoServer.setAttribute("name.system.image", checkpointImage); params.put("kerberos.keytab", httpKeytab);
infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); }
infoServer.addInternalServlet("getimage", "/getimage", params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
GetImageServlet.class, true);
infoServer.start(); defineFilter(webAppContext, SPNEGO_FILTER, AuthenticationFilter.class.getName(),
return infoServer; params, null);
} }
}); }
} catch (InterruptedException e) { };
throw new RuntimeException(e); infoServer.setAttribute("secondary.name.node", this);
} infoServer.setAttribute("name.system.image", checkpointImage);
infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
infoServer.addInternalServlet("getimage", "/getimage",
GetImageServlet.class, true);
infoServer.start();
LOG.info("Web server init done"); LOG.info("Web server init done");
// The web-server port can be ephemeral... ensure we have the correct info // The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort(); infoPort = infoServer.getPort();
if (!UserGroupInformation.isSecurityEnabled()) {
imagePort = infoPort; conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
} LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort);
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " + LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " +
"(" + checkpointConf.getPeriod()/60 + " min)"); "(" + checkpointConf.getPeriod() / 60 + " min)");
LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns");
} }
@ -434,7 +420,7 @@ public class SecondaryNameNode implements Runnable {
throw new IOException("This is not a DFS"); throw new IOException("This is not a DFS");
} }
String configuredAddress = DFSUtil.getInfoServer(null, conf, true); String configuredAddress = DFSUtil.getInfoServer(null, conf, false);
String address = DFSUtil.substituteForWildcardAddress(configuredAddress, String address = DFSUtil.substituteForWildcardAddress(configuredAddress,
fsName.getHost()); fsName.getHost());
LOG.debug("Will connect to NameNode at HTTP address: " + address); LOG.debug("Will connect to NameNode at HTTP address: " + address);
@ -446,7 +432,7 @@ public class SecondaryNameNode implements Runnable {
* for image transfers * for image transfers
*/ */
private InetSocketAddress getImageListenAddress() { private InetSocketAddress getImageListenAddress() {
return new InetSocketAddress(infoBindAddress, imagePort); return new InetSocketAddress(infoBindAddress, infoPort);
} }
/** /**
@ -507,7 +493,7 @@ public class SecondaryNameNode implements Runnable {
/** /**
* @param argv The parameters passed to this program. * @param opts The parameters passed to this program.
* @exception Exception if the filesystem does not exist. * @exception Exception if the filesystem does not exist.
* @return 0 on success, non zero on error. * @return 0 on success, non zero on error.
*/ */
@ -709,7 +695,7 @@ public class SecondaryNameNode implements Runnable {
* Construct a checkpoint image. * Construct a checkpoint image.
* @param conf Node configuration. * @param conf Node configuration.
* @param imageDirs URIs of storage for image. * @param imageDirs URIs of storage for image.
* @param editDirs URIs of storage for edit logs. * @param editsDirs URIs of storage for edit logs.
* @throws IOException If storage cannot be access. * @throws IOException If storage cannot be access.
*/ */
CheckpointStorage(Configuration conf, CheckpointStorage(Configuration conf,

View File

@ -201,19 +201,17 @@ public class TransferFsImage {
String queryString, List<File> localPaths, String queryString, List<File> localPaths,
NNStorage dstStorage, boolean getChecksum) throws IOException { NNStorage dstStorage, boolean getChecksum) throws IOException {
byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE]; byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
str.append(queryString);
String str = "http://" + nnHostPort + "/getimage?" + queryString;
LOG.info("Opening connection to " + str);
// //
// open connection to remote server // open connection to remote server
// //
URL url = new URL(str.toString()); URL url = new URL(str);
// Avoid Krb bug with cross-realm hosts HttpURLConnection connection = (HttpURLConnection)
SecurityUtil.fetchServiceTicket(url); SecurityUtil.openSecureHttpConnection(url);
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
throw new HttpGetFailedException( throw new HttpGetFailedException(
"Image transfer servlet at " + url + "Image transfer servlet at " + url +

View File

@ -97,7 +97,6 @@ public class BootstrapStandby implements Tool, Configurable {
static final int ERR_CODE_LOGS_UNAVAILABLE = 6; static final int ERR_CODE_LOGS_UNAVAILABLE = 6;
public int run(String[] args) throws Exception { public int run(String[] args) throws Exception {
SecurityUtil.initKrb5CipherSuites();
parseArgs(args); parseArgs(args);
parseConfAndFindOtherNN(); parseConfAndFindOtherNN();
NameNode.checkAllowFormat(conf); NameNode.checkAllowFormat(conf);
@ -325,7 +324,7 @@ public class BootstrapStandby implements Tool, Configurable {
"Could not determine valid IPC address for other NameNode (%s)" + "Could not determine valid IPC address for other NameNode (%s)" +
", got: %s", otherNNId, otherIpcAddr); ", got: %s", otherNNId, otherIpcAddr);
otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, true); otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr, otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
otherIpcAddr.getHostName()); otherIpcAddr.getHostName());

Some files were not shown because too many files have changed in this diff Show More