Merge changes from trunk
This commit is contained in:
commit
45d5b13256
|
@ -9,6 +9,7 @@
|
||||||
.project
|
.project
|
||||||
.settings
|
.settings
|
||||||
target
|
target
|
||||||
|
build
|
||||||
hadoop-common-project/hadoop-kms/downloads/
|
hadoop-common-project/hadoop-kms/downloads/
|
||||||
hadoop-hdfs-project/hadoop-hdfs/downloads
|
hadoop-hdfs-project/hadoop-hdfs/downloads
|
||||||
hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
|
hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
|
||||||
|
|
|
@ -70,8 +70,10 @@ fi
|
||||||
|
|
||||||
ARTIFACTS_DIR="target/artifacts"
|
ARTIFACTS_DIR="target/artifacts"
|
||||||
|
|
||||||
# Create staging dir for release artifacts
|
# mvn clean for sanity
|
||||||
|
run ${MVN} clean
|
||||||
|
|
||||||
|
# Create staging dir for release artifacts
|
||||||
run mkdir -p ${ARTIFACTS_DIR}
|
run mkdir -p ${ARTIFACTS_DIR}
|
||||||
|
|
||||||
# Create RAT report
|
# Create RAT report
|
||||||
|
@ -80,10 +82,17 @@ run ${MVN} apache-rat:check
|
||||||
# Create SRC and BIN tarballs for release,
|
# Create SRC and BIN tarballs for release,
|
||||||
# Using 'install’ goal instead of 'package' so artifacts are available
|
# Using 'install’ goal instead of 'package' so artifacts are available
|
||||||
# in the Maven local cache for the site generation
|
# in the Maven local cache for the site generation
|
||||||
run ${MVN} install -Pdist,docs,src,native -DskipTests -Dtar
|
run ${MVN} install -Pdist,src,native -DskipTests -Dtar
|
||||||
|
|
||||||
# Create site for release
|
# Create site for release
|
||||||
run ${MVN} site site:stage -Pdist -Psrc
|
run ${MVN} site site:stage -Pdist -Psrc
|
||||||
|
run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn
|
||||||
|
run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce
|
||||||
|
run cp ./hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
|
||||||
|
run cp ./hadoop-common-project/hadoop-common/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
|
||||||
|
run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-hdfs/
|
||||||
|
run cp ./hadoop-yarn-project/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn/
|
||||||
|
run cp ./hadoop-mapreduce-project/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce/
|
||||||
run mv target/staging/hadoop-project target/r${HADOOP_VERSION}/
|
run mv target/staging/hadoop-project target/r${HADOOP_VERSION}/
|
||||||
run cd target/
|
run cd target/
|
||||||
run tar czf hadoop-site-${HADOOP_VERSION}.tar.gz r${HADOOP_VERSION}/*
|
run tar czf hadoop-site-${HADOOP_VERSION}.tar.gz r${HADOOP_VERSION}/*
|
||||||
|
@ -94,14 +103,19 @@ find . -name rat.txt | xargs -I% cat % > ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSIO
|
||||||
|
|
||||||
# Stage CHANGES.txt files
|
# Stage CHANGES.txt files
|
||||||
run cp ./hadoop-common-project/hadoop-common/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-COMMON-${HADOOP_VERSION}${RC_LABEL}.txt
|
run cp ./hadoop-common-project/hadoop-common/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-COMMON-${HADOOP_VERSION}${RC_LABEL}.txt
|
||||||
run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-HDFS--${HADOOP_VERSION}${RC_LABEL}.txt
|
run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-HDFS-${HADOOP_VERSION}${RC_LABEL}.txt
|
||||||
run cp ./hadoop-mapreduce-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-MAPREDUCE-${HADOOP_VERSION}${RC_LABEL}.txt
|
run cp ./hadoop-mapreduce-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-MAPREDUCE-${HADOOP_VERSION}${RC_LABEL}.txt
|
||||||
run cp ./hadoop-yarn-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-YARN-${HADOOP_VERSION}${RC_LABEL}.txt
|
run cp ./hadoop-yarn-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-YARN-${HADOOP_VERSION}${RC_LABEL}.txt
|
||||||
|
|
||||||
# Stage BIN tarball
|
# Prepare and stage BIN tarball
|
||||||
|
run cd hadoop-dist/target/
|
||||||
|
run tar -xzf hadoop-${HADOOP_VERSION}.tar.gz
|
||||||
|
run cp -r ../../target/r${HADOOP_VERSION}/* hadoop-${HADOOP_VERSION}/share/doc/hadoop/
|
||||||
|
run tar -czf hadoop-${HADOOP_VERSION}.tar.gz hadoop-${HADOOP_VERSION}
|
||||||
|
run cd ../..
|
||||||
run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz
|
run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz
|
||||||
|
|
||||||
# State SRC tarball
|
# Stage SRC tarball
|
||||||
run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}-src.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-src.tar.gz
|
run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}-src.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-src.tar.gz
|
||||||
|
|
||||||
# Stage SITE tarball
|
# Stage SITE tarball
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
ulimit -n 1024
|
ulimit -n 1024
|
||||||
|
|
||||||
### Setup some variables.
|
### Setup some variables.
|
||||||
### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
|
### BUILD_URL is set by Hudson if it is run by patch process
|
||||||
### Read variables from properties file
|
### Read variables from properties file
|
||||||
bindir=$(dirname $0)
|
bindir=$(dirname $0)
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ BUILD_NATIVE=true
|
||||||
PS=${PS:-ps}
|
PS=${PS:-ps}
|
||||||
AWK=${AWK:-awk}
|
AWK=${AWK:-awk}
|
||||||
WGET=${WGET:-wget}
|
WGET=${WGET:-wget}
|
||||||
SVN=${SVN:-svn}
|
GIT=${GIT:-git}
|
||||||
GREP=${GREP:-grep}
|
GREP=${GREP:-grep}
|
||||||
PATCH=${PATCH:-patch}
|
PATCH=${PATCH:-patch}
|
||||||
DIFF=${DIFF:-diff}
|
DIFF=${DIFF:-diff}
|
||||||
|
@ -59,13 +59,13 @@ printUsage() {
|
||||||
echo "--mvn-cmd=<cmd> The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')"
|
echo "--mvn-cmd=<cmd> The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')"
|
||||||
echo "--ps-cmd=<cmd> The 'ps' command to use (default 'ps')"
|
echo "--ps-cmd=<cmd> The 'ps' command to use (default 'ps')"
|
||||||
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
|
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
|
||||||
echo "--svn-cmd=<cmd> The 'svn' command to use (default 'svn')"
|
echo "--git-cmd=<cmd> The 'git' command to use (default 'git')"
|
||||||
echo "--grep-cmd=<cmd> The 'grep' command to use (default 'grep')"
|
echo "--grep-cmd=<cmd> The 'grep' command to use (default 'grep')"
|
||||||
echo "--patch-cmd=<cmd> The 'patch' command to use (default 'patch')"
|
echo "--patch-cmd=<cmd> The 'patch' command to use (default 'patch')"
|
||||||
echo "--diff-cmd=<cmd> The 'diff' command to use (default 'diff')"
|
echo "--diff-cmd=<cmd> The 'diff' command to use (default 'diff')"
|
||||||
echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
|
echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
|
||||||
echo "--forrest-home=<path> Forrest home directory (default FORREST_HOME environment variable)"
|
echo "--forrest-home=<path> Forrest home directory (default FORREST_HOME environment variable)"
|
||||||
echo "--dirty-workspace Allow the local SVN workspace to have uncommitted changes"
|
echo "--dirty-workspace Allow the local git workspace to have uncommitted changes"
|
||||||
echo "--run-tests Run all tests below the base directory"
|
echo "--run-tests Run all tests below the base directory"
|
||||||
echo "--build-native=<bool> If true, then build native components (default 'true')"
|
echo "--build-native=<bool> If true, then build native components (default 'true')"
|
||||||
echo
|
echo
|
||||||
|
@ -107,8 +107,8 @@ parseArgs() {
|
||||||
--wget-cmd=*)
|
--wget-cmd=*)
|
||||||
WGET=${i#*=}
|
WGET=${i#*=}
|
||||||
;;
|
;;
|
||||||
--svn-cmd=*)
|
--git-cmd=*)
|
||||||
SVN=${i#*=}
|
GIT=${i#*=}
|
||||||
;;
|
;;
|
||||||
--grep-cmd=*)
|
--grep-cmd=*)
|
||||||
GREP=${i#*=}
|
GREP=${i#*=}
|
||||||
|
@ -197,7 +197,7 @@ checkout () {
|
||||||
echo ""
|
echo ""
|
||||||
### When run by a developer, if the workspace contains modifications, do not continue
|
### When run by a developer, if the workspace contains modifications, do not continue
|
||||||
### unless the --dirty-workspace option was set
|
### unless the --dirty-workspace option was set
|
||||||
status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'`
|
status=`$GIT status --porcelain`
|
||||||
if [[ $JENKINS == "false" ]] ; then
|
if [[ $JENKINS == "false" ]] ; then
|
||||||
if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then
|
if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then
|
||||||
echo "ERROR: can't run in a workspace that contains the following modifications"
|
echo "ERROR: can't run in a workspace that contains the following modifications"
|
||||||
|
@ -207,10 +207,12 @@ checkout () {
|
||||||
echo
|
echo
|
||||||
else
|
else
|
||||||
cd $BASEDIR
|
cd $BASEDIR
|
||||||
$SVN revert -R .
|
$GIT reset --hard
|
||||||
rm -rf `$SVN status --no-ignore`
|
$GIT clean -xdf
|
||||||
$SVN update
|
$GIT checkout trunk
|
||||||
|
$GIT pull --rebase
|
||||||
fi
|
fi
|
||||||
|
GIT_REVISION=`git rev-parse --verify --short HEAD`
|
||||||
return $?
|
return $?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,10 +231,10 @@ downloadPatch () {
|
||||||
echo "$defect patch is being downloaded at `date` from"
|
echo "$defect patch is being downloaded at `date` from"
|
||||||
echo "$patchURL"
|
echo "$patchURL"
|
||||||
$WGET -q -O $PATCH_DIR/patch $patchURL
|
$WGET -q -O $PATCH_DIR/patch $patchURL
|
||||||
VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum}
|
VERSION=${GIT_REVISION}_${defect}_PATCH-${patchNum}
|
||||||
JIRA_COMMENT="Here are the results of testing the latest attachment
|
JIRA_COMMENT="Here are the results of testing the latest attachment
|
||||||
$patchURL
|
$patchURL
|
||||||
against trunk revision ${SVN_REVISION}."
|
against trunk revision ${GIT_REVISION}."
|
||||||
|
|
||||||
### Copy in any supporting files needed by this process
|
### Copy in any supporting files needed by this process
|
||||||
cp -r $SUPPORT_DIR/lib/* ./lib
|
cp -r $SUPPORT_DIR/lib/* ./lib
|
||||||
|
|
|
@ -23,6 +23,14 @@
|
||||||
</formats>
|
</formats>
|
||||||
<includeBaseDirectory>true</includeBaseDirectory>
|
<includeBaseDirectory>true</includeBaseDirectory>
|
||||||
<fileSets>
|
<fileSets>
|
||||||
|
<fileSet>
|
||||||
|
<directory>.</directory>
|
||||||
|
<includes>
|
||||||
|
<include>LICENCE.txt</include>
|
||||||
|
<include>README.txt</include>
|
||||||
|
<include>NOTICE.txt</include>
|
||||||
|
</includes>
|
||||||
|
</fileSet>
|
||||||
<fileSet>
|
<fileSet>
|
||||||
<directory>.</directory>
|
<directory>.</directory>
|
||||||
<useDefaultExcludes>true</useDefaultExcludes>
|
<useDefaultExcludes>true</useDefaultExcludes>
|
||||||
|
|
|
@ -61,6 +61,16 @@
|
||||||
<groupId>org.mortbay.jetty</groupId>
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
<artifactId>jetty</artifactId>
|
<artifactId>jetty</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.tomcat.embed</groupId>
|
||||||
|
<artifactId>tomcat-embed-core</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.tomcat.embed</groupId>
|
||||||
|
<artifactId>tomcat-embed-logging-juli</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>javax.servlet</groupId>
|
<groupId>javax.servlet</groupId>
|
||||||
|
|
|
@ -519,9 +519,7 @@ public class AuthenticationFilter implements Filter {
|
||||||
StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
|
StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
|
||||||
.append("=");
|
.append("=");
|
||||||
if (token != null && token.length() > 0) {
|
if (token != null && token.length() > 0) {
|
||||||
sb.append("\"")
|
sb.append(token);
|
||||||
.append(token)
|
|
||||||
.append("\"");
|
|
||||||
}
|
}
|
||||||
sb.append("; Version=1");
|
sb.append("; Version=1");
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,22 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.security.authentication.client;
|
package org.apache.hadoop.security.authentication.client;
|
||||||
|
|
||||||
|
import org.apache.catalina.deploy.FilterDef;
|
||||||
|
import org.apache.catalina.deploy.FilterMap;
|
||||||
|
import org.apache.catalina.startup.Tomcat;
|
||||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||||
|
import org.apache.http.HttpResponse;
|
||||||
|
import org.apache.http.auth.AuthScope;
|
||||||
|
import org.apache.http.auth.Credentials;
|
||||||
|
import org.apache.http.client.HttpClient;
|
||||||
|
import org.apache.http.client.methods.HttpGet;
|
||||||
|
import org.apache.http.client.methods.HttpPost;
|
||||||
|
import org.apache.http.client.methods.HttpUriRequest;
|
||||||
|
import org.apache.http.client.params.AuthPolicy;
|
||||||
|
import org.apache.http.entity.InputStreamEntity;
|
||||||
|
import org.apache.http.impl.auth.SPNegoSchemeFactory;
|
||||||
|
import org.apache.http.impl.client.SystemDefaultHttpClient;
|
||||||
|
import org.apache.http.util.EntityUtils;
|
||||||
import org.mortbay.jetty.Server;
|
import org.mortbay.jetty.Server;
|
||||||
import org.mortbay.jetty.servlet.Context;
|
import org.mortbay.jetty.servlet.Context;
|
||||||
import org.mortbay.jetty.servlet.FilterHolder;
|
import org.mortbay.jetty.servlet.FilterHolder;
|
||||||
|
@ -24,16 +39,19 @@ import javax.servlet.ServletException;
|
||||||
import javax.servlet.http.HttpServlet;
|
import javax.servlet.http.HttpServlet;
|
||||||
import javax.servlet.http.HttpServletRequest;
|
import javax.servlet.http.HttpServletRequest;
|
||||||
import javax.servlet.http.HttpServletResponse;
|
import javax.servlet.http.HttpServletResponse;
|
||||||
|
import java.io.BufferedReader;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.io.OutputStreamWriter;
|
import java.io.OutputStreamWriter;
|
||||||
import java.io.BufferedReader;
|
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
import java.io.Writer;
|
import java.io.Writer;
|
||||||
import java.net.HttpURLConnection;
|
import java.net.HttpURLConnection;
|
||||||
import java.net.ServerSocket;
|
import java.net.ServerSocket;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.security.Principal;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
@ -41,10 +59,18 @@ public class AuthenticatorTestCase {
|
||||||
private Server server;
|
private Server server;
|
||||||
private String host = null;
|
private String host = null;
|
||||||
private int port = -1;
|
private int port = -1;
|
||||||
|
private boolean useTomcat = false;
|
||||||
|
private Tomcat tomcat = null;
|
||||||
Context context;
|
Context context;
|
||||||
|
|
||||||
private static Properties authenticatorConfig;
|
private static Properties authenticatorConfig;
|
||||||
|
|
||||||
|
public AuthenticatorTestCase() {}
|
||||||
|
|
||||||
|
public AuthenticatorTestCase(boolean useTomcat) {
|
||||||
|
this.useTomcat = useTomcat;
|
||||||
|
}
|
||||||
|
|
||||||
protected static void setAuthenticationHandlerConfig(Properties config) {
|
protected static void setAuthenticationHandlerConfig(Properties config) {
|
||||||
authenticatorConfig = config;
|
authenticatorConfig = config;
|
||||||
}
|
}
|
||||||
|
@ -80,7 +106,19 @@ public class AuthenticatorTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected int getLocalPort() throws Exception {
|
||||||
|
ServerSocket ss = new ServerSocket(0);
|
||||||
|
int ret = ss.getLocalPort();
|
||||||
|
ss.close();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
protected void start() throws Exception {
|
protected void start() throws Exception {
|
||||||
|
if (useTomcat) startTomcat();
|
||||||
|
else startJetty();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void startJetty() throws Exception {
|
||||||
server = new Server(0);
|
server = new Server(0);
|
||||||
context = new Context();
|
context = new Context();
|
||||||
context.setContextPath("/foo");
|
context.setContextPath("/foo");
|
||||||
|
@ -88,16 +126,42 @@ public class AuthenticatorTestCase {
|
||||||
context.addFilter(new FilterHolder(TestFilter.class), "/*", 0);
|
context.addFilter(new FilterHolder(TestFilter.class), "/*", 0);
|
||||||
context.addServlet(new ServletHolder(TestServlet.class), "/bar");
|
context.addServlet(new ServletHolder(TestServlet.class), "/bar");
|
||||||
host = "localhost";
|
host = "localhost";
|
||||||
ServerSocket ss = new ServerSocket(0);
|
port = getLocalPort();
|
||||||
port = ss.getLocalPort();
|
|
||||||
ss.close();
|
|
||||||
server.getConnectors()[0].setHost(host);
|
server.getConnectors()[0].setHost(host);
|
||||||
server.getConnectors()[0].setPort(port);
|
server.getConnectors()[0].setPort(port);
|
||||||
server.start();
|
server.start();
|
||||||
System.out.println("Running embedded servlet container at: http://" + host + ":" + port);
|
System.out.println("Running embedded servlet container at: http://" + host + ":" + port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void startTomcat() throws Exception {
|
||||||
|
tomcat = new Tomcat();
|
||||||
|
File base = new File(System.getProperty("java.io.tmpdir"));
|
||||||
|
org.apache.catalina.Context ctx =
|
||||||
|
tomcat.addContext("/foo",base.getAbsolutePath());
|
||||||
|
FilterDef fd = new FilterDef();
|
||||||
|
fd.setFilterClass(TestFilter.class.getName());
|
||||||
|
fd.setFilterName("TestFilter");
|
||||||
|
FilterMap fm = new FilterMap();
|
||||||
|
fm.setFilterName("TestFilter");
|
||||||
|
fm.addURLPattern("/*");
|
||||||
|
fm.addServletName("/bar");
|
||||||
|
ctx.addFilterDef(fd);
|
||||||
|
ctx.addFilterMap(fm);
|
||||||
|
tomcat.addServlet(ctx, "/bar", TestServlet.class.getName());
|
||||||
|
ctx.addServletMapping("/bar", "/bar");
|
||||||
|
host = "localhost";
|
||||||
|
port = getLocalPort();
|
||||||
|
tomcat.setHostname(host);
|
||||||
|
tomcat.setPort(port);
|
||||||
|
tomcat.start();
|
||||||
|
}
|
||||||
|
|
||||||
protected void stop() throws Exception {
|
protected void stop() throws Exception {
|
||||||
|
if (useTomcat) stopTomcat();
|
||||||
|
else stopJetty();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void stopJetty() throws Exception {
|
||||||
try {
|
try {
|
||||||
server.stop();
|
server.stop();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -109,6 +173,18 @@ public class AuthenticatorTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void stopTomcat() throws Exception {
|
||||||
|
try {
|
||||||
|
tomcat.stop();
|
||||||
|
} catch (Exception e) {
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
tomcat.destroy();
|
||||||
|
} catch (Exception e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
protected String getBaseURL() {
|
protected String getBaseURL() {
|
||||||
return "http://" + host + ":" + port + "/foo/bar";
|
return "http://" + host + ":" + port + "/foo/bar";
|
||||||
}
|
}
|
||||||
|
@ -165,4 +241,57 @@ public class AuthenticatorTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private SystemDefaultHttpClient getHttpClient() {
|
||||||
|
final SystemDefaultHttpClient httpClient = new SystemDefaultHttpClient();
|
||||||
|
httpClient.getAuthSchemes().register(AuthPolicy.SPNEGO, new SPNegoSchemeFactory(true));
|
||||||
|
Credentials use_jaas_creds = new Credentials() {
|
||||||
|
public String getPassword() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Principal getUserPrincipal() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
httpClient.getCredentialsProvider().setCredentials(
|
||||||
|
AuthScope.ANY, use_jaas_creds);
|
||||||
|
return httpClient;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void doHttpClientRequest(HttpClient httpClient, HttpUriRequest request) throws Exception {
|
||||||
|
HttpResponse response = null;
|
||||||
|
try {
|
||||||
|
response = httpClient.execute(request);
|
||||||
|
final int httpStatus = response.getStatusLine().getStatusCode();
|
||||||
|
Assert.assertEquals(HttpURLConnection.HTTP_OK, httpStatus);
|
||||||
|
} finally {
|
||||||
|
if (response != null) EntityUtils.consumeQuietly(response.getEntity());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void _testAuthenticationHttpClient(Authenticator authenticator, boolean doPost) throws Exception {
|
||||||
|
start();
|
||||||
|
try {
|
||||||
|
SystemDefaultHttpClient httpClient = getHttpClient();
|
||||||
|
doHttpClientRequest(httpClient, new HttpGet(getBaseURL()));
|
||||||
|
|
||||||
|
// Always do a GET before POST to trigger the SPNego negotiation
|
||||||
|
if (doPost) {
|
||||||
|
HttpPost post = new HttpPost(getBaseURL());
|
||||||
|
byte [] postBytes = POST.getBytes();
|
||||||
|
ByteArrayInputStream bis = new ByteArrayInputStream(postBytes);
|
||||||
|
InputStreamEntity entity = new InputStreamEntity(bis, postBytes.length);
|
||||||
|
|
||||||
|
// Important that the entity is not repeatable -- this means if
|
||||||
|
// we have to renegotiate (e.g. b/c the cookie wasn't handled properly)
|
||||||
|
// the test will fail.
|
||||||
|
Assert.assertFalse(entity.isRepeatable());
|
||||||
|
post.setEntity(entity);
|
||||||
|
doHttpClientRequest(httpClient, post);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,16 +20,36 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHand
|
||||||
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
|
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
import org.junit.runners.Parameterized;
|
||||||
|
import org.junit.runners.Parameterized.Parameters;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.net.HttpURLConnection;
|
import java.net.HttpURLConnection;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
|
@RunWith(Parameterized.class)
|
||||||
public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
|
public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
|
||||||
|
|
||||||
|
private boolean useTomcat = false;
|
||||||
|
|
||||||
|
public TestKerberosAuthenticator(boolean useTomcat) {
|
||||||
|
this.useTomcat = useTomcat;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Parameterized.Parameters
|
||||||
|
public static Collection booleans() {
|
||||||
|
return Arrays.asList(new Object[][] {
|
||||||
|
{ false },
|
||||||
|
{ true }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() throws Exception {
|
public void setup() throws Exception {
|
||||||
// create keytab
|
// create keytab
|
||||||
|
@ -53,7 +73,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
|
||||||
|
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testFallbacktoPseudoAuthenticator() throws Exception {
|
public void testFallbacktoPseudoAuthenticator() throws Exception {
|
||||||
AuthenticatorTestCase auth = new AuthenticatorTestCase();
|
AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
|
||||||
Properties props = new Properties();
|
Properties props = new Properties();
|
||||||
props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
|
props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
|
||||||
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
|
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
|
||||||
|
@ -63,7 +83,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
|
||||||
|
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
|
public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
|
||||||
AuthenticatorTestCase auth = new AuthenticatorTestCase();
|
AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
|
||||||
Properties props = new Properties();
|
Properties props = new Properties();
|
||||||
props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
|
props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
|
||||||
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
|
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
|
||||||
|
@ -73,7 +93,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
|
||||||
|
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testNotAuthenticated() throws Exception {
|
public void testNotAuthenticated() throws Exception {
|
||||||
AuthenticatorTestCase auth = new AuthenticatorTestCase();
|
AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
|
||||||
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
|
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
|
||||||
auth.start();
|
auth.start();
|
||||||
try {
|
try {
|
||||||
|
@ -89,7 +109,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
|
||||||
|
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testAuthentication() throws Exception {
|
public void testAuthentication() throws Exception {
|
||||||
final AuthenticatorTestCase auth = new AuthenticatorTestCase();
|
final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
|
||||||
AuthenticatorTestCase.setAuthenticationHandlerConfig(
|
AuthenticatorTestCase.setAuthenticationHandlerConfig(
|
||||||
getAuthenticationHandlerConfiguration());
|
getAuthenticationHandlerConfiguration());
|
||||||
KerberosTestUtils.doAsClient(new Callable<Void>() {
|
KerberosTestUtils.doAsClient(new Callable<Void>() {
|
||||||
|
@ -103,7 +123,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
|
||||||
|
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testAuthenticationPost() throws Exception {
|
public void testAuthenticationPost() throws Exception {
|
||||||
final AuthenticatorTestCase auth = new AuthenticatorTestCase();
|
final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
|
||||||
AuthenticatorTestCase.setAuthenticationHandlerConfig(
|
AuthenticatorTestCase.setAuthenticationHandlerConfig(
|
||||||
getAuthenticationHandlerConfiguration());
|
getAuthenticationHandlerConfiguration());
|
||||||
KerberosTestUtils.doAsClient(new Callable<Void>() {
|
KerberosTestUtils.doAsClient(new Callable<Void>() {
|
||||||
|
@ -114,4 +134,32 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout=60000)
|
||||||
|
public void testAuthenticationHttpClient() throws Exception {
|
||||||
|
final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
|
||||||
|
AuthenticatorTestCase.setAuthenticationHandlerConfig(
|
||||||
|
getAuthenticationHandlerConfiguration());
|
||||||
|
KerberosTestUtils.doAsClient(new Callable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
auth._testAuthenticationHttpClient(new KerberosAuthenticator(), false);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout=60000)
|
||||||
|
public void testAuthenticationHttpClientPost() throws Exception {
|
||||||
|
final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
|
||||||
|
AuthenticatorTestCase.setAuthenticationHandlerConfig(
|
||||||
|
getAuthenticationHandlerConfiguration());
|
||||||
|
KerberosTestUtils.doAsClient(new Callable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
auth._testAuthenticationHttpClient(new KerberosAuthenticator(), true);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,6 +125,12 @@ Trunk (Unreleased)
|
||||||
|
|
||||||
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
|
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
|
||||||
|
|
||||||
|
HADOOP-11013. CLASSPATH handling should be consolidated, debuggable (aw)
|
||||||
|
|
||||||
|
HADOOP-11041. VersionInfo specifies subversion (Tsuyoshi OZAWA via aw)
|
||||||
|
|
||||||
|
HADOOP-10373 create tools/hadoop-amazon for aws/EMR support (stevel)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
||||||
|
@ -232,9 +238,6 @@ Trunk (Unreleased)
|
||||||
HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
|
HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
|
||||||
to RPC Server and Client classes. (Brandon Li via suresh)
|
to RPC Server and Client classes. (Brandon Li via suresh)
|
||||||
|
|
||||||
HADOOP-8815. RandomDatum needs to override hashCode().
|
|
||||||
(Brandon Li via suresh)
|
|
||||||
|
|
||||||
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
|
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
|
||||||
required context item is not configured
|
required context item is not configured
|
||||||
(Brahma Reddy Battula via harsh)
|
(Brahma Reddy Battula via harsh)
|
||||||
|
@ -323,62 +326,16 @@ Trunk (Unreleased)
|
||||||
|
|
||||||
HADOOP-10996. Stop violence in the *_HOME (aw)
|
HADOOP-10996. Stop violence in the *_HOME (aw)
|
||||||
|
|
||||||
|
HADOOP-10748. HttpServer2 should not load JspServlet. (wheat9)
|
||||||
|
|
||||||
|
HADOOP-11033. shell scripts ignore JAVA_HOME on OS X. (aw)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||||
|
|
||||||
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
|
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
|
||||||
|
|
||||||
HADOOP-10734. Implement high-performance secure random number sources.
|
|
||||||
(Yi Liu via Colin Patrick McCabe)
|
|
||||||
|
|
||||||
HADOOP-10603. Crypto input and output streams implementing Hadoop stream
|
|
||||||
interfaces. (Yi Liu and Charles Lamb)
|
|
||||||
|
|
||||||
HADOOP-10628. Javadoc and few code style improvement for Crypto
|
|
||||||
input and output streams. (Yi Liu via clamb)
|
|
||||||
|
|
||||||
HADOOP-10632. Minor improvements to Crypto input and output streams.
|
|
||||||
(Yi Liu)
|
|
||||||
|
|
||||||
HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
|
|
||||||
|
|
||||||
HADOOP-10653. Add a new constructor for CryptoInputStream that
|
|
||||||
receives current position of wrapped stream. (Yi Liu)
|
|
||||||
|
|
||||||
HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
|
|
||||||
stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
|
|
||||||
|
|
||||||
HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[].
|
|
||||||
(wang via yliu)
|
|
||||||
|
|
||||||
HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL.
|
|
||||||
(Yi Liu via cmccabe)
|
|
||||||
|
|
||||||
HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
|
|
||||||
format. (Yi Liu)
|
|
||||||
|
|
||||||
HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
|
|
||||||
JCE if non native support. (Yi Liu)
|
|
||||||
|
|
||||||
HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
|
|
||||||
openssl versions (cmccabe)
|
|
||||||
|
|
||||||
HADOOP-10853. Refactor get instance of CryptoCodec and support create via
|
|
||||||
algorithm/mode/padding. (Yi Liu)
|
|
||||||
|
|
||||||
HADOOP-10919. Copy command should preserve raw.* namespace
|
|
||||||
extended attributes. (clamb)
|
|
||||||
|
|
||||||
HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
|
|
||||||
|
|
||||||
HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
|
|
||||||
|
|
||||||
HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not
|
|
||||||
loaded. (umamahesh)
|
|
||||||
|
|
||||||
Release 2.6.0 - UNRELEASED
|
Release 2.6.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -516,6 +473,29 @@ Release 2.6.0 - UNRELEASED
|
||||||
|
|
||||||
HADOOP-10998. Fix bash tab completion code to work (Jim Hester via aw)
|
HADOOP-10998. Fix bash tab completion code to work (Jim Hester via aw)
|
||||||
|
|
||||||
|
HADOOP-10880. Move HTTP delegation tokens out of URL querystring to
|
||||||
|
a header. (tucu)
|
||||||
|
|
||||||
|
HADOOP-11005. Fix HTTP content type for ReconfigurationServlet.
|
||||||
|
(Lei Xu via wang)
|
||||||
|
|
||||||
|
HADOOP-10814. Update Tomcat version used by HttpFS and KMS to latest
|
||||||
|
6.x version. (rkanter via tucu)
|
||||||
|
|
||||||
|
HADOOP-10994. KeyProviderCryptoExtension should use CryptoCodec for
|
||||||
|
generation/decryption of keys. (tucu)
|
||||||
|
|
||||||
|
HADOOP-11021. Configurable replication factor in the hadoop archive
|
||||||
|
command. (Zhe Zhang via wang)
|
||||||
|
|
||||||
|
HADOOP-11030. Define a variable jackson.version instead of using constant
|
||||||
|
at multiple places. (Juan Yu via kasha)
|
||||||
|
|
||||||
|
HADOOP-10990. Add missed NFSv3 request and response classes (brandonli)
|
||||||
|
|
||||||
|
HADOOP-10863. KMS should have a blacklist for decrypting EEKs.
|
||||||
|
(asuresh via tucu)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
|
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
|
||||||
|
@ -568,6 +548,8 @@ Release 2.6.0 - UNRELEASED
|
||||||
schedules incoming calls and multiplexes outgoing calls. (Chris Li via
|
schedules incoming calls and multiplexes outgoing calls. (Chris Li via
|
||||||
Arpit Agarwal)
|
Arpit Agarwal)
|
||||||
|
|
||||||
|
HADOOP-10833. Remove unused cache in UserProvider. (Benoy Antony)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
|
HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
|
||||||
|
@ -699,6 +681,68 @@ Release 2.6.0 - UNRELEASED
|
||||||
HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that
|
HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that
|
||||||
return 0 on failure. (cnauroth)
|
return 0 on failure. (cnauroth)
|
||||||
|
|
||||||
|
HADOOP-8815. RandomDatum needs to override hashCode().
|
||||||
|
(Brandon Li via suresh)
|
||||||
|
|
||||||
|
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
|
HADOOP-10734. Implement high-performance secure random number sources.
|
||||||
|
(Yi Liu via Colin Patrick McCabe)
|
||||||
|
|
||||||
|
HADOOP-10603. Crypto input and output streams implementing Hadoop stream
|
||||||
|
interfaces. (Yi Liu and Charles Lamb)
|
||||||
|
|
||||||
|
HADOOP-10628. Javadoc and few code style improvement for Crypto
|
||||||
|
input and output streams. (Yi Liu via clamb)
|
||||||
|
|
||||||
|
HADOOP-10632. Minor improvements to Crypto input and output streams.
|
||||||
|
(Yi Liu)
|
||||||
|
|
||||||
|
HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
|
||||||
|
|
||||||
|
HADOOP-10653. Add a new constructor for CryptoInputStream that
|
||||||
|
receives current position of wrapped stream. (Yi Liu)
|
||||||
|
|
||||||
|
HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
|
||||||
|
stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
|
||||||
|
|
||||||
|
HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[].
|
||||||
|
(wang via yliu)
|
||||||
|
|
||||||
|
HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL.
|
||||||
|
(Yi Liu via cmccabe)
|
||||||
|
|
||||||
|
HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
|
||||||
|
format. (Yi Liu)
|
||||||
|
|
||||||
|
HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
|
||||||
|
JCE if non native support. (Yi Liu)
|
||||||
|
|
||||||
|
HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
|
||||||
|
openssl versions (cmccabe)
|
||||||
|
|
||||||
|
HADOOP-10853. Refactor get instance of CryptoCodec and support create via
|
||||||
|
algorithm/mode/padding. (Yi Liu)
|
||||||
|
|
||||||
|
HADOOP-10919. Copy command should preserve raw.* namespace
|
||||||
|
extended attributes. (clamb)
|
||||||
|
|
||||||
|
HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
|
||||||
|
|
||||||
|
HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
|
||||||
|
|
||||||
|
HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not
|
||||||
|
loaded. (umamahesh)
|
||||||
|
--
|
||||||
|
|
||||||
|
HADOOP-10911. hadoop.auth cookie after HADOOP-10710 still not proper
|
||||||
|
according to RFC2109. (gchanan via tucu)
|
||||||
|
|
||||||
|
HADOOP-11036. Add build directory to .gitignore (Tsuyoshi OZAWA via aw)
|
||||||
|
|
||||||
|
HADOOP-11012. hadoop fs -text of zero-length file causes EOFException
|
||||||
|
(Eric Payne via jlowe)
|
||||||
|
|
||||||
Release 2.5.1 - UNRELEASED
|
Release 2.5.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -706,11 +750,16 @@ Release 2.5.1 - UNRELEASED
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
HADOOP-10956. Fix create-release script to include docs and necessary txt
|
||||||
|
files. (kasha)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
|
HADOOP-11001. Fix test-patch to work with the git repo. (kasha)
|
||||||
|
|
||||||
Release 2.5.0 - 2014-08-11
|
Release 2.5.0 - 2014-08-11
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -224,6 +224,10 @@
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.zookeeper</groupId>
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
<artifactId>zookeeper</artifactId>
|
<artifactId>zookeeper</artifactId>
|
||||||
|
|
|
@ -114,6 +114,7 @@ case ${COMMAND} in
|
||||||
;;
|
;;
|
||||||
archive)
|
archive)
|
||||||
CLASS=org.apache.hadoop.tools.HadoopArchives
|
CLASS=org.apache.hadoop.tools.HadoopArchives
|
||||||
|
hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
|
||||||
hadoop_add_classpath "${TOOL_PATH}"
|
hadoop_add_classpath "${TOOL_PATH}"
|
||||||
;;
|
;;
|
||||||
checknative)
|
checknative)
|
||||||
|
@ -136,10 +137,12 @@ case ${COMMAND} in
|
||||||
;;
|
;;
|
||||||
distch)
|
distch)
|
||||||
CLASS=org.apache.hadoop.tools.DistCh
|
CLASS=org.apache.hadoop.tools.DistCh
|
||||||
|
hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
|
||||||
hadoop_add_classpath "${TOOL_PATH}"
|
hadoop_add_classpath "${TOOL_PATH}"
|
||||||
;;
|
;;
|
||||||
distcp)
|
distcp)
|
||||||
CLASS=org.apache.hadoop.tools.DistCp
|
CLASS=org.apache.hadoop.tools.DistCp
|
||||||
|
hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
|
||||||
hadoop_add_classpath "${TOOL_PATH}"
|
hadoop_add_classpath "${TOOL_PATH}"
|
||||||
;;
|
;;
|
||||||
fs)
|
fs)
|
||||||
|
@ -168,11 +171,11 @@ case ${COMMAND} in
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
|
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
|
||||||
|
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||||
|
|
||||||
hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
|
hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
|
||||||
|
|
||||||
hadoop_finalize
|
hadoop_finalize
|
||||||
export CLASSPATH
|
|
||||||
hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
|
hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
|
||||||
|
|
||||||
|
|
|
@ -129,6 +129,11 @@ while [[ -z "${_hadoop_common_done}" ]]; do
|
||||||
hadoop_exit_with_usage 1
|
hadoop_exit_with_usage 1
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
--debug)
|
||||||
|
shift
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
HADOOP_SHELL_SCRIPT_DEBUG=true
|
||||||
|
;;
|
||||||
--help|-help|-h|help|--h|--\?|-\?|\?)
|
--help|-help|-h|help|--h|--\?|-\?|\?)
|
||||||
hadoop_exit_with_usage 0
|
hadoop_exit_with_usage 0
|
||||||
;;
|
;;
|
||||||
|
|
|
@ -21,6 +21,13 @@ function hadoop_error
|
||||||
echo "$*" 1>&2
|
echo "$*" 1>&2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function hadoop_debug
|
||||||
|
{
|
||||||
|
if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
|
||||||
|
echo "DEBUG: $*" 1>&2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function hadoop_bootstrap_init
|
function hadoop_bootstrap_init
|
||||||
{
|
{
|
||||||
# NOTE: This function is not user replaceable.
|
# NOTE: This function is not user replaceable.
|
||||||
|
@ -62,6 +69,7 @@ function hadoop_bootstrap_init
|
||||||
|
|
||||||
# defaults
|
# defaults
|
||||||
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
|
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
|
||||||
|
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function hadoop_find_confdir
|
function hadoop_find_confdir
|
||||||
|
@ -80,6 +88,8 @@ function hadoop_find_confdir
|
||||||
conf_dir="etc/hadoop"
|
conf_dir="etc/hadoop"
|
||||||
fi
|
fi
|
||||||
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
|
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
|
||||||
|
|
||||||
|
hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function hadoop_exec_hadoopenv
|
function hadoop_exec_hadoopenv
|
||||||
|
@ -105,6 +115,7 @@ function hadoop_basic_init
|
||||||
|
|
||||||
# CLASSPATH initially contains $HADOOP_CONF_DIR
|
# CLASSPATH initially contains $HADOOP_CONF_DIR
|
||||||
CLASSPATH="${HADOOP_CONF_DIR}"
|
CLASSPATH="${HADOOP_CONF_DIR}"
|
||||||
|
hadoop_debug "Initial CLASSPATH=${HADOOP_CONF_DIR}"
|
||||||
|
|
||||||
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
|
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
|
||||||
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
|
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
|
||||||
|
@ -116,19 +127,19 @@ function hadoop_basic_init
|
||||||
|
|
||||||
# define HADOOP_HDFS_HOME
|
# define HADOOP_HDFS_HOME
|
||||||
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
|
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
|
||||||
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
|
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
|
||||||
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
|
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# define HADOOP_YARN_HOME
|
# define HADOOP_YARN_HOME
|
||||||
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
|
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
|
||||||
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
|
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
|
||||||
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
|
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# define HADOOP_MAPRED_HOME
|
# define HADOOP_MAPRED_HOME
|
||||||
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
|
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
|
||||||
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
|
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
|
||||||
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
|
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -274,6 +285,9 @@ function hadoop_add_param
|
||||||
if [[ ! ${!1} =~ $2 ]] ; then
|
if [[ ! ${!1} =~ $2 ]] ; then
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
eval $1="'${!1} $3'"
|
eval $1="'${!1} $3'"
|
||||||
|
hadoop_debug "$1 accepted $3"
|
||||||
|
else
|
||||||
|
hadoop_debug "$1 declined $3"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,8 +297,8 @@ function hadoop_add_classpath
|
||||||
# $1 = directory, file, wildcard, whatever to add
|
# $1 = directory, file, wildcard, whatever to add
|
||||||
# $2 = before or after, which determines where in the
|
# $2 = before or after, which determines where in the
|
||||||
# classpath this object should go. default is after
|
# classpath this object should go. default is after
|
||||||
# return 0 = success
|
# return 0 = success (added or duplicate)
|
||||||
# return 1 = failure (duplicate, doesn't exist, whatever)
|
# return 1 = failure (doesn't exist, whatever)
|
||||||
|
|
||||||
# However, with classpath (& JLP), we can do dedupe
|
# However, with classpath (& JLP), we can do dedupe
|
||||||
# along with some sanity checking (e.g., missing directories)
|
# along with some sanity checking (e.g., missing directories)
|
||||||
|
@ -295,23 +309,29 @@ function hadoop_add_classpath
|
||||||
if [[ $1 =~ ^.*\*$ ]]; then
|
if [[ $1 =~ ^.*\*$ ]]; then
|
||||||
local mp=$(dirname "$1")
|
local mp=$(dirname "$1")
|
||||||
if [[ ! -d "${mp}" ]]; then
|
if [[ ! -d "${mp}" ]]; then
|
||||||
|
hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# no wildcard in the middle, so check existence
|
# no wildcard in the middle, so check existence
|
||||||
# (doesn't matter *what* it is)
|
# (doesn't matter *what* it is)
|
||||||
elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
|
elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
|
||||||
|
hadoop_debug "Rejected CLASSPATH: $1 (does not exist)"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z "${CLASSPATH}" ]]; then
|
if [[ -z "${CLASSPATH}" ]]; then
|
||||||
CLASSPATH=$1
|
CLASSPATH=$1
|
||||||
|
hadoop_debug "Initial CLASSPATH=$1"
|
||||||
elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
|
elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
|
||||||
if [[ "$2" = "before" ]]; then
|
if [[ "$2" = "before" ]]; then
|
||||||
CLASSPATH="$1:${CLASSPATH}"
|
CLASSPATH="$1:${CLASSPATH}"
|
||||||
|
hadoop_debug "Prepend CLASSPATH: $1"
|
||||||
else
|
else
|
||||||
CLASSPATH+=:$1
|
CLASSPATH+=:$1
|
||||||
|
hadoop_debug "Append CLASSPATH: $1"
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
hadoop_debug "Dupe CLASSPATH: $1"
|
||||||
fi
|
fi
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
@ -331,14 +351,20 @@ function hadoop_add_colonpath
|
||||||
if [[ -z "${!1}" ]]; then
|
if [[ -z "${!1}" ]]; then
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
eval $1="'$2'"
|
eval $1="'$2'"
|
||||||
|
hadoop_debug "Initial colonpath($1): $2"
|
||||||
elif [[ "$3" = "before" ]]; then
|
elif [[ "$3" = "before" ]]; then
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
eval $1="'$2:${!1}'"
|
eval $1="'$2:${!1}'"
|
||||||
|
hadoop_debug "Prepend colonpath($1): $2"
|
||||||
else
|
else
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
eval $1+="'$2'"
|
eval $1+="'$2'"
|
||||||
|
hadoop_debug "Append colonpath($1): $2"
|
||||||
fi
|
fi
|
||||||
|
return 0
|
||||||
fi
|
fi
|
||||||
|
hadoop_debug "Rejected colonpath($1): $2"
|
||||||
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
function hadoop_add_javalibpath
|
function hadoop_add_javalibpath
|
||||||
|
@ -397,6 +423,7 @@ function hadoop_add_to_classpath_hdfs
|
||||||
|
|
||||||
function hadoop_add_to_classpath_yarn
|
function hadoop_add_to_classpath_yarn
|
||||||
{
|
{
|
||||||
|
local i
|
||||||
#
|
#
|
||||||
# get all of the yarn jars+config in the path
|
# get all of the yarn jars+config in the path
|
||||||
#
|
#
|
||||||
|
@ -459,7 +486,7 @@ function hadoop_add_to_classpath_userpath
|
||||||
local i
|
local i
|
||||||
local j
|
local j
|
||||||
let c=0
|
let c=0
|
||||||
|
|
||||||
if [[ -n "${HADOOP_CLASSPATH}" ]]; then
|
if [[ -n "${HADOOP_CLASSPATH}" ]]; then
|
||||||
# I wonder if Java runs on VMS.
|
# I wonder if Java runs on VMS.
|
||||||
for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
|
for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
|
||||||
|
@ -490,10 +517,12 @@ function hadoop_os_tricks
|
||||||
# examples for OS X and Linux. Vendors, replace this with your special sauce.
|
# examples for OS X and Linux. Vendors, replace this with your special sauce.
|
||||||
case ${HADOOP_OS_TYPE} in
|
case ${HADOOP_OS_TYPE} in
|
||||||
Darwin)
|
Darwin)
|
||||||
if [[ -x /usr/libexec/java_home ]]; then
|
if [[ -z "${JAVA_HOME}" ]]; then
|
||||||
export JAVA_HOME="$(/usr/libexec/java_home)"
|
if [[ -x /usr/libexec/java_home ]]; then
|
||||||
else
|
export JAVA_HOME="$(/usr/libexec/java_home)"
|
||||||
export JAVA_HOME=/Library/Java/Home
|
else
|
||||||
|
export JAVA_HOME=/Library/Java/Home
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
Linux)
|
Linux)
|
||||||
|
@ -715,6 +744,11 @@ function hadoop_java_exec
|
||||||
local command=$1
|
local command=$1
|
||||||
local class=$2
|
local class=$2
|
||||||
shift 2
|
shift 2
|
||||||
|
|
||||||
|
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
|
||||||
|
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
|
||||||
|
|
||||||
|
export CLASSPATH
|
||||||
#shellcheck disable=SC2086
|
#shellcheck disable=SC2086
|
||||||
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
|
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
|
||||||
}
|
}
|
||||||
|
@ -727,6 +761,11 @@ function hadoop_start_daemon
|
||||||
local command=$1
|
local command=$1
|
||||||
local class=$2
|
local class=$2
|
||||||
shift 2
|
shift 2
|
||||||
|
|
||||||
|
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
|
||||||
|
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
|
||||||
|
|
||||||
|
export CLASSPATH
|
||||||
#shellcheck disable=SC2086
|
#shellcheck disable=SC2086
|
||||||
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
|
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
|
||||||
}
|
}
|
||||||
|
@ -807,6 +846,9 @@ function hadoop_start_secure_daemon
|
||||||
# note that shellcheck will throw a
|
# note that shellcheck will throw a
|
||||||
# bogus for-our-use-case 2086 here.
|
# bogus for-our-use-case 2086 here.
|
||||||
# it doesn't properly support multi-line situations
|
# it doesn't properly support multi-line situations
|
||||||
|
|
||||||
|
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
|
||||||
|
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
|
||||||
|
|
||||||
exec "${jsvc}" \
|
exec "${jsvc}" \
|
||||||
"-Dproc_${daemonname}" \
|
"-Dproc_${daemonname}" \
|
||||||
|
|
|
@ -23,6 +23,7 @@ this="$bin/$script"
|
||||||
|
|
||||||
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
|
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
|
||||||
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
|
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
|
||||||
|
# shellcheck disable=SC2034
|
||||||
HADOOP_NEW_CONFIG=true
|
HADOOP_NEW_CONFIG=true
|
||||||
. "$HADOOP_LIBEXEC_DIR/hadoop-config.sh"
|
. "$HADOOP_LIBEXEC_DIR/hadoop-config.sh"
|
||||||
|
|
||||||
|
@ -33,10 +34,10 @@ fi
|
||||||
CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
|
CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
|
||||||
|
|
||||||
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
|
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
|
||||||
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
|
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||||
|
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||||
|
|
||||||
hadoop_add_param HADOOP_OPTS Xmx "$JAVA_HEAP_MAX"
|
hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
|
||||||
|
|
||||||
hadoop_finalize
|
hadoop_finalize
|
||||||
export CLASSPATH
|
|
||||||
hadoop_java_exec rcc "${CLASS}" "$@"
|
hadoop_java_exec rcc "${CLASS}" "$@"
|
||||||
|
|
|
@ -200,6 +200,7 @@ public class ReconfigurationServlet extends HttpServlet {
|
||||||
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
|
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
|
||||||
throws ServletException, IOException {
|
throws ServletException, IOException {
|
||||||
LOG.info("GET");
|
LOG.info("GET");
|
||||||
|
resp.setContentType("text/html");
|
||||||
PrintWriter out = resp.getWriter();
|
PrintWriter out = resp.getWriter();
|
||||||
|
|
||||||
Reconfigurable reconf = getReconfigurable(req);
|
Reconfigurable reconf = getReconfigurable(req);
|
||||||
|
@ -214,6 +215,7 @@ public class ReconfigurationServlet extends HttpServlet {
|
||||||
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
|
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
|
||||||
throws ServletException, IOException {
|
throws ServletException, IOException {
|
||||||
LOG.info("POST");
|
LOG.info("POST");
|
||||||
|
resp.setContentType("text/html");
|
||||||
PrintWriter out = resp.getWriter();
|
PrintWriter out = resp.getWriter();
|
||||||
|
|
||||||
Reconfigurable reconf = getReconfigurable(req);
|
Reconfigurable reconf = getReconfigurable(req);
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -48,7 +49,7 @@ public abstract class CryptoCodec implements Configurable {
|
||||||
*
|
*
|
||||||
* @param conf
|
* @param conf
|
||||||
* the configuration
|
* the configuration
|
||||||
* @param CipherSuite
|
* @param cipherSuite
|
||||||
* algorithm/mode/padding
|
* algorithm/mode/padding
|
||||||
* @return CryptoCodec the codec object. Null value will be returned if no
|
* @return CryptoCodec the codec object. Null value will be returned if no
|
||||||
* crypto codec classes with cipher suite configured.
|
* crypto codec classes with cipher suite configured.
|
||||||
|
@ -66,15 +67,18 @@ public abstract class CryptoCodec implements Configurable {
|
||||||
CryptoCodec c = ReflectionUtils.newInstance(klass, conf);
|
CryptoCodec c = ReflectionUtils.newInstance(klass, conf);
|
||||||
if (c.getCipherSuite().getName().equals(cipherSuite.getName())) {
|
if (c.getCipherSuite().getName().equals(cipherSuite.getName())) {
|
||||||
if (codec == null) {
|
if (codec == null) {
|
||||||
LOG.debug("Using crypto codec {}.", klass.getName());
|
PerformanceAdvisory.LOG.debug("Using crypto codec {}.",
|
||||||
|
klass.getName());
|
||||||
codec = c;
|
codec = c;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Crypto codec {} doesn't meet the cipher suite {}.",
|
PerformanceAdvisory.LOG.debug(
|
||||||
|
"Crypto codec {} doesn't meet the cipher suite {}.",
|
||||||
klass.getName(), cipherSuite.getName());
|
klass.getName(), cipherSuite.getName());
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.warn("Crypto codec {} is not available.", klass.getName());
|
PerformanceAdvisory.LOG.debug("Crypto codec {} is not available.",
|
||||||
|
klass.getName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,7 +112,8 @@ public abstract class CryptoCodec implements Configurable {
|
||||||
cipherSuite.getConfigSuffix();
|
cipherSuite.getConfigSuffix();
|
||||||
String codecString = conf.get(configName);
|
String codecString = conf.get(configName);
|
||||||
if (codecString == null) {
|
if (codecString == null) {
|
||||||
LOG.warn("No crypto codec classes with cipher suite configured.");
|
PerformanceAdvisory.LOG.debug(
|
||||||
|
"No crypto codec classes with cipher suite configured.");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
for (String c : Splitter.on(',').trimResults().omitEmptyStrings().
|
for (String c : Splitter.on(',').trimResults().omitEmptyStrings().
|
||||||
|
@ -117,9 +122,9 @@ public abstract class CryptoCodec implements Configurable {
|
||||||
Class<?> cls = conf.getClassByName(c);
|
Class<?> cls = conf.getClassByName(c);
|
||||||
result.add(cls.asSubclass(CryptoCodec.class));
|
result.add(cls.asSubclass(CryptoCodec.class));
|
||||||
} catch (ClassCastException e) {
|
} catch (ClassCastException e) {
|
||||||
LOG.warn("Class " + c + " is not a CryptoCodec.");
|
PerformanceAdvisory.LOG.debug("Class {} is not a CryptoCodec.", c);
|
||||||
} catch (ClassNotFoundException e) {
|
} catch (ClassNotFoundException e) {
|
||||||
LOG.warn("Crypto codec " + c + " not found.");
|
PerformanceAdvisory.LOG.debug("Crypto codec {} not found.", c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.util.NativeCodeLoader;
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* OpenSSL cipher using JNI.
|
* OpenSSL cipher using JNI.
|
||||||
|
@ -82,6 +83,7 @@ public final class OpensslCipher {
|
||||||
String loadingFailure = null;
|
String loadingFailure = null;
|
||||||
try {
|
try {
|
||||||
if (!NativeCodeLoader.buildSupportsOpenssl()) {
|
if (!NativeCodeLoader.buildSupportsOpenssl()) {
|
||||||
|
PerformanceAdvisory.LOG.debug("Build does not support openssl");
|
||||||
loadingFailure = "build does not support openssl.";
|
loadingFailure = "build does not support openssl.";
|
||||||
} else {
|
} else {
|
||||||
initIDs();
|
initIDs();
|
||||||
|
|
|
@ -108,6 +108,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
|
||||||
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
|
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
|
||||||
|
|
||||||
private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
|
private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
|
||||||
|
super(conf);
|
||||||
this.uri = uri;
|
this.uri = uri;
|
||||||
path = ProviderUtils.unnestUri(uri);
|
path = ProviderUtils.unnestUri(uri);
|
||||||
fs = path.getFileSystem(conf);
|
fs = path.getFileSystem(conf);
|
||||||
|
|
|
@ -56,6 +56,8 @@ public abstract class KeyProvider {
|
||||||
"hadoop.security.key.default.bitlength";
|
"hadoop.security.key.default.bitlength";
|
||||||
public static final int DEFAULT_BITLENGTH = 128;
|
public static final int DEFAULT_BITLENGTH = 128;
|
||||||
|
|
||||||
|
private final Configuration conf;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The combination of both the key version name and the key material.
|
* The combination of both the key version name and the key material.
|
||||||
*/
|
*/
|
||||||
|
@ -353,6 +355,24 @@ public abstract class KeyProvider {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*
|
||||||
|
* @param conf configuration for the provider
|
||||||
|
*/
|
||||||
|
public KeyProvider(Configuration conf) {
|
||||||
|
this.conf = new Configuration(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the provider configuration.
|
||||||
|
*
|
||||||
|
* @return the provider configuration
|
||||||
|
*/
|
||||||
|
public Configuration getConf() {
|
||||||
|
return conf;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A helper function to create an options object.
|
* A helper function to create an options object.
|
||||||
* @param conf the configuration to use
|
* @param conf the configuration to use
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
package org.apache.hadoop.crypto.key;
|
package org.apache.hadoop.crypto.key;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
import java.security.GeneralSecurityException;
|
import java.security.GeneralSecurityException;
|
||||||
import java.security.SecureRandom;
|
import java.security.SecureRandom;
|
||||||
|
|
||||||
|
@ -29,6 +30,9 @@ import javax.crypto.spec.SecretKeySpec;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.crypto.CryptoCodec;
|
||||||
|
import org.apache.hadoop.crypto.Decryptor;
|
||||||
|
import org.apache.hadoop.crypto.Encryptor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A KeyProvider with Cryptographic Extensions specifically for generating
|
* A KeyProvider with Cryptographic Extensions specifically for generating
|
||||||
|
@ -239,18 +243,25 @@ public class KeyProviderCryptoExtension extends
|
||||||
Preconditions.checkNotNull(encryptionKey,
|
Preconditions.checkNotNull(encryptionKey,
|
||||||
"No KeyVersion exists for key '%s' ", encryptionKeyName);
|
"No KeyVersion exists for key '%s' ", encryptionKeyName);
|
||||||
// Generate random bytes for new key and IV
|
// Generate random bytes for new key and IV
|
||||||
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
|
|
||||||
|
CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
|
||||||
final byte[] newKey = new byte[encryptionKey.getMaterial().length];
|
final byte[] newKey = new byte[encryptionKey.getMaterial().length];
|
||||||
RANDOM.get().nextBytes(newKey);
|
cc.generateSecureRandom(newKey);
|
||||||
final byte[] iv = new byte[cipher.getBlockSize()];
|
final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()];
|
||||||
RANDOM.get().nextBytes(iv);
|
cc.generateSecureRandom(iv);
|
||||||
// Encryption key IV is derived from new key's IV
|
// Encryption key IV is derived from new key's IV
|
||||||
final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
|
final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
|
||||||
// Encrypt the new key
|
Encryptor encryptor = cc.createEncryptor();
|
||||||
cipher.init(Cipher.ENCRYPT_MODE,
|
encryptor.init(encryptionKey.getMaterial(), encryptionIV);
|
||||||
new SecretKeySpec(encryptionKey.getMaterial(), "AES"),
|
int keyLen = newKey.length;
|
||||||
new IvParameterSpec(encryptionIV));
|
ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
|
||||||
final byte[] encryptedKey = cipher.doFinal(newKey);
|
ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
|
||||||
|
bbIn.put(newKey);
|
||||||
|
bbIn.flip();
|
||||||
|
encryptor.encrypt(bbIn, bbOut);
|
||||||
|
bbOut.flip();
|
||||||
|
byte[] encryptedKey = new byte[keyLen];
|
||||||
|
bbOut.get(encryptedKey);
|
||||||
return new EncryptedKeyVersion(encryptionKeyName,
|
return new EncryptedKeyVersion(encryptionKeyName,
|
||||||
encryptionKey.getVersionName(), iv,
|
encryptionKey.getVersionName(), iv,
|
||||||
new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
|
new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
|
||||||
|
@ -274,19 +285,25 @@ public class KeyProviderCryptoExtension extends
|
||||||
KeyProviderCryptoExtension.EEK,
|
KeyProviderCryptoExtension.EEK,
|
||||||
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
|
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
|
||||||
);
|
);
|
||||||
final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
|
|
||||||
// Encryption key IV is determined from encrypted key's IV
|
// Encryption key IV is determined from encrypted key's IV
|
||||||
final byte[] encryptionIV =
|
final byte[] encryptionIV =
|
||||||
EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
|
EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
|
||||||
// Init the cipher with encryption key parameters
|
|
||||||
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
|
CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
|
||||||
cipher.init(Cipher.DECRYPT_MODE,
|
Decryptor decryptor = cc.createDecryptor();
|
||||||
new SecretKeySpec(encryptionKeyMaterial, "AES"),
|
decryptor.init(encryptionKey.getMaterial(), encryptionIV);
|
||||||
new IvParameterSpec(encryptionIV));
|
|
||||||
// Decrypt the encrypted key
|
|
||||||
final KeyVersion encryptedKV =
|
final KeyVersion encryptedKV =
|
||||||
encryptedKeyVersion.getEncryptedKeyVersion();
|
encryptedKeyVersion.getEncryptedKeyVersion();
|
||||||
final byte[] decryptedKey = cipher.doFinal(encryptedKV.getMaterial());
|
int keyLen = encryptedKV.getMaterial().length;
|
||||||
|
ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
|
||||||
|
ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
|
||||||
|
bbIn.put(encryptedKV.getMaterial());
|
||||||
|
bbIn.flip();
|
||||||
|
decryptor.decrypt(bbIn, bbOut);
|
||||||
|
bbOut.flip();
|
||||||
|
byte[] decryptedKey = new byte[keyLen];
|
||||||
|
bbOut.get(decryptedKey);
|
||||||
return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
|
return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,6 +40,7 @@ public abstract class KeyProviderExtension
|
||||||
private E extension;
|
private E extension;
|
||||||
|
|
||||||
public KeyProviderExtension(KeyProvider keyProvider, E extensions) {
|
public KeyProviderExtension(KeyProvider keyProvider, E extensions) {
|
||||||
|
super(keyProvider.getConf());
|
||||||
this.keyProvider = keyProvider;
|
this.keyProvider = keyProvider;
|
||||||
this.extension = extensions;
|
this.extension = extensions;
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,8 @@ public class UserProvider extends KeyProvider {
|
||||||
private final Credentials credentials;
|
private final Credentials credentials;
|
||||||
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
|
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
|
||||||
|
|
||||||
private UserProvider() throws IOException {
|
private UserProvider(Configuration conf) throws IOException {
|
||||||
|
super(conf);
|
||||||
user = UserGroupInformation.getCurrentUser();
|
user = UserGroupInformation.getCurrentUser();
|
||||||
credentials = user.getCredentials();
|
credentials = user.getCredentials();
|
||||||
}
|
}
|
||||||
|
@ -145,7 +146,7 @@ public class UserProvider extends KeyProvider {
|
||||||
public KeyProvider createProvider(URI providerName,
|
public KeyProvider createProvider(URI providerName,
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
if (SCHEME_NAME.equals(providerName.getScheme())) {
|
if (SCHEME_NAME.equals(providerName.getScheme())) {
|
||||||
return new UserProvider();
|
return new UserProvider(conf);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -283,6 +283,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
||||||
}
|
}
|
||||||
|
|
||||||
public KMSClientProvider(URI uri, Configuration conf) throws IOException {
|
public KMSClientProvider(URI uri, Configuration conf) throws IOException {
|
||||||
|
super(conf);
|
||||||
Path path = ProviderUtils.unnestUri(uri);
|
Path path = ProviderUtils.unnestUri(uri);
|
||||||
URL url = path.toUri().toURL();
|
URL url = path.toUri().toURL();
|
||||||
kmsUrl = createServiceURL(url);
|
kmsUrl = createServiceURL(url);
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.util.NativeCodeLoader;
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* OpenSSL secure random using JNI.
|
* OpenSSL secure random using JNI.
|
||||||
|
@ -67,6 +68,8 @@ public class OpensslSecureRandom extends Random {
|
||||||
|
|
||||||
public OpensslSecureRandom() {
|
public OpensslSecureRandom() {
|
||||||
if (!nativeEnabled) {
|
if (!nativeEnabled) {
|
||||||
|
PerformanceAdvisory.LOG.debug("Build does not support openssl, " +
|
||||||
|
"falling back to Java SecureRandom.");
|
||||||
fallback = new java.security.SecureRandom();
|
fallback = new java.security.SecureRandom();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -381,7 +381,8 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
||||||
long blockSize,
|
long blockSize,
|
||||||
Progressable progress)
|
Progressable progress)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
|
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
|
||||||
|
fs.getBytesPerSum()));
|
||||||
int bytesPerSum = fs.getBytesPerSum();
|
int bytesPerSum = fs.getBytesPerSum();
|
||||||
this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize,
|
this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize,
|
||||||
replication, blockSize, progress);
|
replication, blockSize, progress);
|
||||||
|
@ -405,10 +406,11 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
|
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
|
||||||
|
int ckoff, int cklen)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
datas.write(b, offset, len);
|
datas.write(b, offset, len);
|
||||||
sums.write(checksum);
|
sums.write(checksum, ckoff, cklen);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -337,7 +337,8 @@ public abstract class ChecksumFs extends FilterFs {
|
||||||
final short replication, final long blockSize,
|
final short replication, final long blockSize,
|
||||||
final Progressable progress, final ChecksumOpt checksumOpt,
|
final Progressable progress, final ChecksumOpt checksumOpt,
|
||||||
final boolean createParent) throws IOException {
|
final boolean createParent) throws IOException {
|
||||||
super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
|
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
|
||||||
|
fs.getBytesPerSum()));
|
||||||
|
|
||||||
// checksumOpt is passed down to the raw fs. Unless it implements
|
// checksumOpt is passed down to the raw fs. Unless it implements
|
||||||
// checksum impelemts internally, checksumOpt will be ignored.
|
// checksum impelemts internally, checksumOpt will be ignored.
|
||||||
|
@ -370,10 +371,11 @@ public abstract class ChecksumFs extends FilterFs {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
|
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
|
||||||
|
int ckoff, int cklen)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
datas.write(b, offset, len);
|
datas.write(b, offset, len);
|
||||||
sums.write(checksum);
|
sums.write(checksum, ckoff, cklen);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -18,13 +18,14 @@
|
||||||
|
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.util.zip.Checksum;
|
import java.util.zip.Checksum;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is a generic output stream for generating checksums for
|
* This is a generic output stream for generating checksums for
|
||||||
* data before it is written to the underlying stream
|
* data before it is written to the underlying stream
|
||||||
|
@ -33,7 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
abstract public class FSOutputSummer extends OutputStream {
|
abstract public class FSOutputSummer extends OutputStream {
|
||||||
// data checksum
|
// data checksum
|
||||||
private Checksum sum;
|
private final DataChecksum sum;
|
||||||
// internal buffer for storing data before it is checksumed
|
// internal buffer for storing data before it is checksumed
|
||||||
private byte buf[];
|
private byte buf[];
|
||||||
// internal buffer for storing checksum
|
// internal buffer for storing checksum
|
||||||
|
@ -41,18 +42,24 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
// The number of valid bytes in the buffer.
|
// The number of valid bytes in the buffer.
|
||||||
private int count;
|
private int count;
|
||||||
|
|
||||||
protected FSOutputSummer(Checksum sum, int maxChunkSize, int checksumSize) {
|
// We want this value to be a multiple of 3 because the native code checksums
|
||||||
|
// 3 chunks simultaneously. The chosen value of 9 strikes a balance between
|
||||||
|
// limiting the number of JNI calls and flushing to the underlying stream
|
||||||
|
// relatively frequently.
|
||||||
|
private static final int BUFFER_NUM_CHUNKS = 9;
|
||||||
|
|
||||||
|
protected FSOutputSummer(DataChecksum sum) {
|
||||||
this.sum = sum;
|
this.sum = sum;
|
||||||
this.buf = new byte[maxChunkSize];
|
this.buf = new byte[sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS];
|
||||||
this.checksum = new byte[checksumSize];
|
this.checksum = new byte[sum.getChecksumSize() * BUFFER_NUM_CHUNKS];
|
||||||
this.count = 0;
|
this.count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* write the data chunk in <code>b</code> staring at <code>offset</code> with
|
/* write the data chunk in <code>b</code> staring at <code>offset</code> with
|
||||||
* a length of <code>len</code>, and its checksum
|
* a length of <code>len > 0</code>, and its checksum
|
||||||
*/
|
*/
|
||||||
protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksum)
|
protected abstract void writeChunk(byte[] b, int bOffset, int bLen,
|
||||||
throws IOException;
|
byte[] checksum, int checksumOffset, int checksumLen) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the implementing OutputStream is closed and should no longer
|
* Check if the implementing OutputStream is closed and should no longer
|
||||||
|
@ -66,7 +73,6 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
/** Write one byte */
|
/** Write one byte */
|
||||||
@Override
|
@Override
|
||||||
public synchronized void write(int b) throws IOException {
|
public synchronized void write(int b) throws IOException {
|
||||||
sum.update(b);
|
|
||||||
buf[count++] = (byte)b;
|
buf[count++] = (byte)b;
|
||||||
if(count == buf.length) {
|
if(count == buf.length) {
|
||||||
flushBuffer();
|
flushBuffer();
|
||||||
|
@ -111,18 +117,17 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
*/
|
*/
|
||||||
private int write1(byte b[], int off, int len) throws IOException {
|
private int write1(byte b[], int off, int len) throws IOException {
|
||||||
if(count==0 && len>=buf.length) {
|
if(count==0 && len>=buf.length) {
|
||||||
// local buffer is empty and user data has one chunk
|
// local buffer is empty and user buffer size >= local buffer size, so
|
||||||
// checksum and output data
|
// simply checksum the user buffer and send it directly to the underlying
|
||||||
|
// stream
|
||||||
final int length = buf.length;
|
final int length = buf.length;
|
||||||
sum.update(b, off, length);
|
writeChecksumChunks(b, off, length);
|
||||||
writeChecksumChunk(b, off, length, false);
|
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy user data to local buffer
|
// copy user data to local buffer
|
||||||
int bytesToCopy = buf.length-count;
|
int bytesToCopy = buf.length-count;
|
||||||
bytesToCopy = (len<bytesToCopy) ? len : bytesToCopy;
|
bytesToCopy = (len<bytesToCopy) ? len : bytesToCopy;
|
||||||
sum.update(b, off, bytesToCopy);
|
|
||||||
System.arraycopy(b, off, buf, count, bytesToCopy);
|
System.arraycopy(b, off, buf, count, bytesToCopy);
|
||||||
count += bytesToCopy;
|
count += bytesToCopy;
|
||||||
if (count == buf.length) {
|
if (count == buf.length) {
|
||||||
|
@ -136,22 +141,45 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
* the underlying output stream.
|
* the underlying output stream.
|
||||||
*/
|
*/
|
||||||
protected synchronized void flushBuffer() throws IOException {
|
protected synchronized void flushBuffer() throws IOException {
|
||||||
flushBuffer(false);
|
flushBuffer(false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Forces any buffered output bytes to be checksumed and written out to
|
/* Forces buffered output bytes to be checksummed and written out to
|
||||||
* the underlying output stream. If keep is true, then the state of
|
* the underlying output stream. If there is a trailing partial chunk in the
|
||||||
* this object remains intact.
|
* buffer,
|
||||||
|
* 1) flushPartial tells us whether to flush that chunk
|
||||||
|
* 2) if flushPartial is true, keep tells us whether to keep that chunk in the
|
||||||
|
* buffer (if flushPartial is false, it is always kept in the buffer)
|
||||||
|
*
|
||||||
|
* Returns the number of bytes that were flushed but are still left in the
|
||||||
|
* buffer (can only be non-zero if keep is true).
|
||||||
*/
|
*/
|
||||||
protected synchronized void flushBuffer(boolean keep) throws IOException {
|
protected synchronized int flushBuffer(boolean keep,
|
||||||
if (count != 0) {
|
boolean flushPartial) throws IOException {
|
||||||
int chunkLen = count;
|
int bufLen = count;
|
||||||
|
int partialLen = bufLen % sum.getBytesPerChecksum();
|
||||||
|
int lenToFlush = flushPartial ? bufLen : bufLen - partialLen;
|
||||||
|
if (lenToFlush != 0) {
|
||||||
|
writeChecksumChunks(buf, 0, lenToFlush);
|
||||||
|
if (!flushPartial || keep) {
|
||||||
|
count = partialLen;
|
||||||
|
System.arraycopy(buf, bufLen - count, buf, 0, count);
|
||||||
|
} else {
|
||||||
count = 0;
|
count = 0;
|
||||||
writeChecksumChunk(buf, 0, chunkLen, keep);
|
|
||||||
if (keep) {
|
|
||||||
count = chunkLen;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// total bytes left minus unflushed bytes left
|
||||||
|
return count - (bufLen - lenToFlush);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checksums all complete data chunks and flushes them to the underlying
|
||||||
|
* stream. If there is a trailing partial chunk, it is not flushed and is
|
||||||
|
* maintained in the buffer.
|
||||||
|
*/
|
||||||
|
public void flush() throws IOException {
|
||||||
|
flushBuffer(false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -161,18 +189,18 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Generate checksum for the data chunk and output data chunk & checksum
|
/** Generate checksums for the given data chunks and output chunks & checksums
|
||||||
* to the underlying output stream. If keep is true then keep the
|
* to the underlying output stream.
|
||||||
* current checksum intact, do not reset it.
|
|
||||||
*/
|
*/
|
||||||
private void writeChecksumChunk(byte b[], int off, int len, boolean keep)
|
private void writeChecksumChunks(byte b[], int off, int len)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
int tempChecksum = (int)sum.getValue();
|
sum.calculateChunkedSums(b, off, len, checksum, 0);
|
||||||
if (!keep) {
|
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
|
||||||
sum.reset();
|
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
|
||||||
|
int ckOffset = i / sum.getBytesPerChecksum() * sum.getChecksumSize();
|
||||||
|
writeChunk(b, off + i, chunkLen, checksum, ckOffset,
|
||||||
|
sum.getChecksumSize());
|
||||||
}
|
}
|
||||||
int2byte(tempChecksum, checksum);
|
|
||||||
writeChunk(b, off, len, checksum);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -196,9 +224,14 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
/**
|
/**
|
||||||
* Resets existing buffer with a new one of the specified size.
|
* Resets existing buffer with a new one of the specified size.
|
||||||
*/
|
*/
|
||||||
protected synchronized void resetChecksumChunk(int size) {
|
protected synchronized void setChecksumBufSize(int size) {
|
||||||
sum.reset();
|
|
||||||
this.buf = new byte[size];
|
this.buf = new byte[size];
|
||||||
|
this.checksum = new byte[((size - 1) / sum.getBytesPerChecksum() + 1) *
|
||||||
|
sum.getChecksumSize()];
|
||||||
this.count = 0;
|
this.count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected synchronized void resetChecksumBufSize() {
|
||||||
|
setChecksumBufSize(sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -232,6 +232,10 @@ class Globber {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (FileStatus child : children) {
|
for (FileStatus child : children) {
|
||||||
|
if (componentIdx < components.size() - 1) {
|
||||||
|
// Don't try to recurse into non-directories. See HADOOP-10957.
|
||||||
|
if (!child.isDirectory()) continue;
|
||||||
|
}
|
||||||
// Set the child path based on the parent path.
|
// Set the child path based on the parent path.
|
||||||
child.setPath(new Path(candidate.getPath(),
|
child.setPath(new Path(candidate.getPath(),
|
||||||
child.getPath().getName()));
|
child.getPath().getName()));
|
||||||
|
@ -249,8 +253,8 @@ class Globber {
|
||||||
new Path(candidate.getPath(), component));
|
new Path(candidate.getPath(), component));
|
||||||
if (childStatus != null) {
|
if (childStatus != null) {
|
||||||
newCandidates.add(childStatus);
|
newCandidates.add(childStatus);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
candidates = newCandidates;
|
candidates = newCandidates;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.fs.shell;
|
package org.apache.hadoop.fs.shell;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.EOFException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
|
@ -126,8 +127,17 @@ class Display extends FsCommand {
|
||||||
protected InputStream getInputStream(PathData item) throws IOException {
|
protected InputStream getInputStream(PathData item) throws IOException {
|
||||||
FSDataInputStream i = (FSDataInputStream)super.getInputStream(item);
|
FSDataInputStream i = (FSDataInputStream)super.getInputStream(item);
|
||||||
|
|
||||||
|
// Handle 0 and 1-byte files
|
||||||
|
short leadBytes;
|
||||||
|
try {
|
||||||
|
leadBytes = i.readShort();
|
||||||
|
} catch (EOFException e) {
|
||||||
|
i.seek(0);
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
// Check type of stream first
|
// Check type of stream first
|
||||||
switch(i.readShort()) {
|
switch(leadBytes) {
|
||||||
case 0x1f8b: { // RFC 1952
|
case 0x1f8b: { // RFC 1952
|
||||||
// Must be gzip
|
// Must be gzip
|
||||||
i.seek(0);
|
i.seek(0);
|
||||||
|
|
|
@ -44,6 +44,7 @@ import javax.servlet.http.HttpServletRequest;
|
||||||
import javax.servlet.http.HttpServletRequestWrapper;
|
import javax.servlet.http.HttpServletRequestWrapper;
|
||||||
import javax.servlet.http.HttpServletResponse;
|
import javax.servlet.http.HttpServletResponse;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
|
@ -415,6 +416,17 @@ public final class HttpServer2 implements FilterContainer {
|
||||||
private static WebAppContext createWebAppContext(String name,
|
private static WebAppContext createWebAppContext(String name,
|
||||||
Configuration conf, AccessControlList adminsAcl, final String appDir) {
|
Configuration conf, AccessControlList adminsAcl, final String appDir) {
|
||||||
WebAppContext ctx = new WebAppContext();
|
WebAppContext ctx = new WebAppContext();
|
||||||
|
ctx.setDefaultsDescriptor(null);
|
||||||
|
ServletHolder holder = new ServletHolder(new DefaultServlet());
|
||||||
|
Map<String, String> params = ImmutableMap. <String, String> builder()
|
||||||
|
.put("acceptRanges", "true")
|
||||||
|
.put("dirAllowed", "false")
|
||||||
|
.put("gzip", "true")
|
||||||
|
.put("useFileMappedBuffer", "true")
|
||||||
|
.build();
|
||||||
|
holder.setInitParameters(params);
|
||||||
|
ctx.setWelcomeFiles(new String[] {"index.html"});
|
||||||
|
ctx.addServlet(holder, "/");
|
||||||
ctx.setDisplayName(name);
|
ctx.setDisplayName(name);
|
||||||
ctx.setContextPath("/");
|
ctx.setContextPath("/");
|
||||||
ctx.setWar(appDir + "/" + name);
|
ctx.setWar(appDir + "/" + name);
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.fs.HardLink;
|
||||||
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
|
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
|
||||||
import org.apache.hadoop.util.NativeCodeLoader;
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
import org.apache.hadoop.util.Shell;
|
import org.apache.hadoop.util.Shell;
|
||||||
|
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
@ -196,7 +197,7 @@ public class NativeIO {
|
||||||
// This can happen if the user has an older version of libhadoop.so
|
// This can happen if the user has an older version of libhadoop.so
|
||||||
// installed - in this case we can continue without native IO
|
// installed - in this case we can continue without native IO
|
||||||
// after warning
|
// after warning
|
||||||
LOG.error("Unable to initialize NativeIO libraries", t);
|
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -574,7 +575,7 @@ public class NativeIO {
|
||||||
// This can happen if the user has an older version of libhadoop.so
|
// This can happen if the user has an older version of libhadoop.so
|
||||||
// installed - in this case we can continue without native IO
|
// installed - in this case we can continue without native IO
|
||||||
// after warning
|
// after warning
|
||||||
LOG.error("Unable to initialize NativeIO libraries", t);
|
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -593,7 +594,7 @@ public class NativeIO {
|
||||||
// This can happen if the user has an older version of libhadoop.so
|
// This can happen if the user has an older version of libhadoop.so
|
||||||
// installed - in this case we can continue without native IO
|
// installed - in this case we can continue without native IO
|
||||||
// after warning
|
// after warning
|
||||||
LOG.error("Unable to initialize NativeIO libraries", t);
|
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,6 +88,7 @@ import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
import org.htrace.Trace;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
@ -694,6 +695,9 @@ public class Client {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Connecting to "+server);
|
LOG.debug("Connecting to "+server);
|
||||||
}
|
}
|
||||||
|
if (Trace.isTracing()) {
|
||||||
|
Trace.addTimelineAnnotation("IPC client connecting to " + server);
|
||||||
|
}
|
||||||
short numRetries = 0;
|
short numRetries = 0;
|
||||||
Random rand = null;
|
Random rand = null;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -758,6 +762,10 @@ public class Client {
|
||||||
// update last activity time
|
// update last activity time
|
||||||
touch();
|
touch();
|
||||||
|
|
||||||
|
if (Trace.isTracing()) {
|
||||||
|
Trace.addTimelineAnnotation("IPC client connected to " + server);
|
||||||
|
}
|
||||||
|
|
||||||
// start the receiver thread after the socket connection has been set
|
// start the receiver thread after the socket connection has been set
|
||||||
// up
|
// up
|
||||||
start();
|
start();
|
||||||
|
|
|
@ -48,6 +48,9 @@ import org.apache.hadoop.security.token.SecretManager;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.util.ProtoUtil;
|
import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
import org.htrace.Sampler;
|
||||||
|
import org.htrace.Trace;
|
||||||
|
import org.htrace.TraceScope;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.protobuf.BlockingService;
|
import com.google.protobuf.BlockingService;
|
||||||
|
@ -191,6 +194,16 @@ public class ProtobufRpcEngine implements RpcEngine {
|
||||||
+ method.getName() + "]");
|
+ method.getName() + "]");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TraceScope traceScope = null;
|
||||||
|
// if Tracing is on then start a new span for this rpc.
|
||||||
|
// guard it in the if statement to make sure there isn't
|
||||||
|
// any extra string manipulation.
|
||||||
|
if (Trace.isTracing()) {
|
||||||
|
traceScope = Trace.startSpan(
|
||||||
|
method.getDeclaringClass().getCanonicalName() +
|
||||||
|
"." + method.getName());
|
||||||
|
}
|
||||||
|
|
||||||
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
|
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
|
||||||
|
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
|
@ -212,8 +225,13 @@ public class ProtobufRpcEngine implements RpcEngine {
|
||||||
remoteId + ": " + method.getName() +
|
remoteId + ": " + method.getName() +
|
||||||
" {" + e + "}");
|
" {" + e + "}");
|
||||||
}
|
}
|
||||||
|
if (Trace.isTracing()) {
|
||||||
|
traceScope.getSpan().addTimelineAnnotation(
|
||||||
|
"Call got exception: " + e.getMessage());
|
||||||
|
}
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
|
} finally {
|
||||||
|
if (traceScope != null) traceScope.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
|
|
@ -79,6 +79,7 @@ import org.apache.hadoop.conf.Configuration.IntegerRanges;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.io.DataOutputBuffer;
|
import org.apache.hadoop.io.DataOutputBuffer;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.Writable;
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
|
||||||
|
@ -115,6 +116,10 @@ import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
import org.htrace.Span;
|
||||||
|
import org.htrace.Trace;
|
||||||
|
import org.htrace.TraceInfo;
|
||||||
|
import org.htrace.TraceScope;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
|
@ -506,6 +511,7 @@ public abstract class Server {
|
||||||
private ByteBuffer rpcResponse; // the response for this call
|
private ByteBuffer rpcResponse; // the response for this call
|
||||||
private final RPC.RpcKind rpcKind;
|
private final RPC.RpcKind rpcKind;
|
||||||
private final byte[] clientId;
|
private final byte[] clientId;
|
||||||
|
private final Span traceSpan; // the tracing span on the server side
|
||||||
|
|
||||||
public Call(int id, int retryCount, Writable param,
|
public Call(int id, int retryCount, Writable param,
|
||||||
Connection connection) {
|
Connection connection) {
|
||||||
|
@ -515,6 +521,11 @@ public abstract class Server {
|
||||||
|
|
||||||
public Call(int id, int retryCount, Writable param, Connection connection,
|
public Call(int id, int retryCount, Writable param, Connection connection,
|
||||||
RPC.RpcKind kind, byte[] clientId) {
|
RPC.RpcKind kind, byte[] clientId) {
|
||||||
|
this(id, retryCount, param, connection, kind, clientId, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Call(int id, int retryCount, Writable param, Connection connection,
|
||||||
|
RPC.RpcKind kind, byte[] clientId, Span span) {
|
||||||
this.callId = id;
|
this.callId = id;
|
||||||
this.retryCount = retryCount;
|
this.retryCount = retryCount;
|
||||||
this.rpcRequest = param;
|
this.rpcRequest = param;
|
||||||
|
@ -523,6 +534,7 @@ public abstract class Server {
|
||||||
this.rpcResponse = null;
|
this.rpcResponse = null;
|
||||||
this.rpcKind = kind;
|
this.rpcKind = kind;
|
||||||
this.clientId = clientId;
|
this.clientId = clientId;
|
||||||
|
this.traceSpan = span;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -1921,9 +1933,18 @@ public abstract class Server {
|
||||||
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
|
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Span traceSpan = null;
|
||||||
|
if (header.hasTraceInfo()) {
|
||||||
|
// If the incoming RPC included tracing info, always continue the trace
|
||||||
|
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
|
||||||
|
header.getTraceInfo().getParentId());
|
||||||
|
traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach();
|
||||||
|
}
|
||||||
|
|
||||||
Call call = new Call(header.getCallId(), header.getRetryCount(),
|
Call call = new Call(header.getCallId(), header.getRetryCount(),
|
||||||
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()), header
|
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()),
|
||||||
.getClientId().toByteArray());
|
header.getClientId().toByteArray(), traceSpan);
|
||||||
|
|
||||||
callQueue.put(call); // queue the call; maybe blocked here
|
callQueue.put(call); // queue the call; maybe blocked here
|
||||||
incRpcCount(); // Increment the rpc count
|
incRpcCount(); // Increment the rpc count
|
||||||
}
|
}
|
||||||
|
@ -2067,6 +2088,7 @@ public abstract class Server {
|
||||||
ByteArrayOutputStream buf =
|
ByteArrayOutputStream buf =
|
||||||
new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
|
new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
|
||||||
while (running) {
|
while (running) {
|
||||||
|
TraceScope traceScope = null;
|
||||||
try {
|
try {
|
||||||
final Call call = callQueue.take(); // pop the queue; maybe blocked here
|
final Call call = callQueue.take(); // pop the queue; maybe blocked here
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
@ -2083,6 +2105,10 @@ public abstract class Server {
|
||||||
Writable value = null;
|
Writable value = null;
|
||||||
|
|
||||||
CurCall.set(call);
|
CurCall.set(call);
|
||||||
|
if (call.traceSpan != null) {
|
||||||
|
traceScope = Trace.continueSpan(call.traceSpan);
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Make the call as the user via Subject.doAs, thus associating
|
// Make the call as the user via Subject.doAs, thus associating
|
||||||
// the call with the Subject
|
// the call with the Subject
|
||||||
|
@ -2156,9 +2182,22 @@ public abstract class Server {
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
if (running) { // unexpected -- log it
|
if (running) { // unexpected -- log it
|
||||||
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
|
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
|
||||||
|
if (Trace.isTracing()) {
|
||||||
|
traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " +
|
||||||
|
StringUtils.stringifyException(e));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.info(Thread.currentThread().getName() + " caught an exception", e);
|
LOG.info(Thread.currentThread().getName() + " caught an exception", e);
|
||||||
|
if (Trace.isTracing()) {
|
||||||
|
traceScope.getSpan().addTimelineAnnotation("Exception: " +
|
||||||
|
StringUtils.stringifyException(e));
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (traceScope != null) {
|
||||||
|
traceScope.close();
|
||||||
|
}
|
||||||
|
IOUtils.cleanup(LOG, traceScope);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG.debug(Thread.currentThread().getName() + ": exiting");
|
LOG.debug(Thread.currentThread().getName() + ": exiting");
|
||||||
|
|
|
@ -41,6 +41,8 @@ import org.apache.hadoop.util.Time;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.*;
|
import org.apache.hadoop.conf.*;
|
||||||
|
import org.htrace.Trace;
|
||||||
|
import org.htrace.TraceScope;
|
||||||
|
|
||||||
/** An RpcEngine implementation for Writable data. */
|
/** An RpcEngine implementation for Writable data. */
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
@ -227,9 +229,19 @@ public class WritableRpcEngine implements RpcEngine {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
startTime = Time.now();
|
startTime = Time.now();
|
||||||
}
|
}
|
||||||
|
TraceScope traceScope = null;
|
||||||
ObjectWritable value = (ObjectWritable)
|
if (Trace.isTracing()) {
|
||||||
client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
|
traceScope = Trace.startSpan(
|
||||||
|
method.getDeclaringClass().getCanonicalName() +
|
||||||
|
"." + method.getName());
|
||||||
|
}
|
||||||
|
ObjectWritable value;
|
||||||
|
try {
|
||||||
|
value = (ObjectWritable)
|
||||||
|
client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
|
||||||
|
} finally {
|
||||||
|
if (traceScope != null) traceScope.close();
|
||||||
|
}
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
long callTime = Time.now() - startTime;
|
long callTime = Time.now() - startTime;
|
||||||
LOG.debug("Call: " + method.getName() + " " + callTime);
|
LOG.debug("Call: " + method.getName() + " " + callTime);
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.List;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.util.NativeCodeLoader;
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
|
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||||
|
|
||||||
public class JniBasedUnixGroupsMappingWithFallback implements
|
public class JniBasedUnixGroupsMappingWithFallback implements
|
||||||
GroupMappingServiceProvider {
|
GroupMappingServiceProvider {
|
||||||
|
@ -37,7 +38,7 @@ public class JniBasedUnixGroupsMappingWithFallback implements
|
||||||
if (NativeCodeLoader.isNativeCodeLoaded()) {
|
if (NativeCodeLoader.isNativeCodeLoaded()) {
|
||||||
this.impl = new JniBasedUnixGroupsMapping();
|
this.impl = new JniBasedUnixGroupsMapping();
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("Falling back to shell based");
|
PerformanceAdvisory.LOG.debug("Falling back to shell based");
|
||||||
this.impl = new ShellBasedUnixGroupsMapping();
|
this.impl = new ShellBasedUnixGroupsMapping();
|
||||||
}
|
}
|
||||||
if (LOG.isDebugEnabled()){
|
if (LOG.isDebugEnabled()){
|
||||||
|
|
|
@ -21,9 +21,7 @@ package org.apache.hadoop.security.alias;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -41,8 +39,6 @@ public class UserProvider extends CredentialProvider {
|
||||||
public static final String SCHEME_NAME = "user";
|
public static final String SCHEME_NAME = "user";
|
||||||
private final UserGroupInformation user;
|
private final UserGroupInformation user;
|
||||||
private final Credentials credentials;
|
private final Credentials credentials;
|
||||||
private final Map<String, CredentialEntry> cache = new HashMap<String,
|
|
||||||
CredentialEntry>();
|
|
||||||
|
|
||||||
private UserProvider() throws IOException {
|
private UserProvider() throws IOException {
|
||||||
user = UserGroupInformation.getCurrentUser();
|
user = UserGroupInformation.getCurrentUser();
|
||||||
|
@ -86,7 +82,6 @@ public class UserProvider extends CredentialProvider {
|
||||||
throw new IOException("Credential " + name +
|
throw new IOException("Credential " + name +
|
||||||
" does not exist in " + this);
|
" does not exist in " + this);
|
||||||
}
|
}
|
||||||
cache.remove(name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -221,7 +221,13 @@ public class AccessControlList implements Writable {
|
||||||
return groups;
|
return groups;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isUserAllowed(UserGroupInformation ugi) {
|
/**
|
||||||
|
* Checks if a user represented by the provided {@link UserGroupInformation}
|
||||||
|
* is a member of the Access Control List
|
||||||
|
* @param ugi UserGroupInformation to check if contained in the ACL
|
||||||
|
* @return true if ugi is member of the list
|
||||||
|
*/
|
||||||
|
public final boolean isUserInList(UserGroupInformation ugi) {
|
||||||
if (allAllowed || users.contains(ugi.getShortUserName())) {
|
if (allAllowed || users.contains(ugi.getShortUserName())) {
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -234,6 +240,10 @@ public class AccessControlList implements Writable {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean isUserAllowed(UserGroupInformation ugi) {
|
||||||
|
return isUserInList(ugi);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns descriptive way of users and groups that are part of this ACL.
|
* Returns descriptive way of users and groups that are part of this ACL.
|
||||||
* Use {@link #getAclString()} to get the exact String that can be given to
|
* Use {@link #getAclString()} to get the exact String that can be given to
|
||||||
|
|
|
@ -125,6 +125,8 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean useQueryStringforDelegationToken = false;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
|
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
|
||||||
* <p/>
|
* <p/>
|
||||||
|
@ -170,6 +172,34 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
|
||||||
super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator);
|
super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets if delegation token should be transmitted in the URL query string.
|
||||||
|
* By default it is transmitted using the
|
||||||
|
* {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
|
||||||
|
* <p/>
|
||||||
|
* This method is provided to enable WebHDFS backwards compatibility.
|
||||||
|
*
|
||||||
|
* @param useQueryString <code>TRUE</code> if the token is transmitted in the
|
||||||
|
* URL query string, <code>FALSE</code> if the delegation token is transmitted
|
||||||
|
* using the {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP
|
||||||
|
* header.
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
protected void setUseQueryStringForDelegationToken(boolean useQueryString) {
|
||||||
|
useQueryStringforDelegationToken = useQueryString;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns if delegation token is transmitted as a HTTP header.
|
||||||
|
*
|
||||||
|
* @return <code>TRUE</code> if the token is transmitted in the URL query
|
||||||
|
* string, <code>FALSE</code> if the delegation token is transmitted using the
|
||||||
|
* {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
|
||||||
|
*/
|
||||||
|
public boolean useQueryStringForDelegationToken() {
|
||||||
|
return useQueryStringforDelegationToken;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
|
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
|
||||||
* Token only if the given auth token is an instance of {@link Token} and
|
* Token only if the given auth token is an instance of {@link Token} and
|
||||||
|
@ -235,23 +265,41 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
|
||||||
* @throws IOException if an IO error occurred.
|
* @throws IOException if an IO error occurred.
|
||||||
* @throws AuthenticationException if an authentication exception occurred.
|
* @throws AuthenticationException if an authentication exception occurred.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
public HttpURLConnection openConnection(URL url, Token token, String doAs)
|
public HttpURLConnection openConnection(URL url, Token token, String doAs)
|
||||||
throws IOException, AuthenticationException {
|
throws IOException, AuthenticationException {
|
||||||
Preconditions.checkNotNull(url, "url");
|
Preconditions.checkNotNull(url, "url");
|
||||||
Preconditions.checkNotNull(token, "token");
|
Preconditions.checkNotNull(token, "token");
|
||||||
Map<String, String> extraParams = new HashMap<String, String>();
|
Map<String, String> extraParams = new HashMap<String, String>();
|
||||||
|
org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dToken
|
||||||
// delegation token
|
= null;
|
||||||
Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
|
// if we have valid auth token, it takes precedence over a delegation token
|
||||||
if (!creds.getAllTokens().isEmpty()) {
|
// and we don't even look for one.
|
||||||
InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
|
if (!token.isSet()) {
|
||||||
url.getPort());
|
// delegation token
|
||||||
Text service = SecurityUtil.buildTokenService(serviceAddr);
|
Credentials creds = UserGroupInformation.getCurrentUser().
|
||||||
org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dt =
|
getCredentials();
|
||||||
creds.getToken(service);
|
if (!creds.getAllTokens().isEmpty()) {
|
||||||
if (dt != null) {
|
InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
|
||||||
extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
|
url.getPort());
|
||||||
dt.encodeToUrlString());
|
Text service = SecurityUtil.buildTokenService(serviceAddr);
|
||||||
|
dToken = creds.getToken(service);
|
||||||
|
if (dToken != null) {
|
||||||
|
if (useQueryStringForDelegationToken()) {
|
||||||
|
// delegation token will go in the query string, injecting it
|
||||||
|
extraParams.put(
|
||||||
|
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
|
||||||
|
dToken.encodeToUrlString());
|
||||||
|
} else {
|
||||||
|
// delegation token will go as request header, setting it in the
|
||||||
|
// auth-token to ensure no authentication handshake is triggered
|
||||||
|
// (if we have a delegation token, we are authenticated)
|
||||||
|
// the delegation token header is injected in the connection request
|
||||||
|
// at the end of this method.
|
||||||
|
token.delegationToken = (org.apache.hadoop.security.token.Token
|
||||||
|
<AbstractDelegationTokenIdentifier>) dToken;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,7 +309,14 @@ public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
|
||||||
}
|
}
|
||||||
|
|
||||||
url = augmentURL(url, extraParams);
|
url = augmentURL(url, extraParams);
|
||||||
return super.openConnection(url, token);
|
HttpURLConnection conn = super.openConnection(url, token);
|
||||||
|
if (!token.isSet() && !useQueryStringForDelegationToken() && dToken != null) {
|
||||||
|
// injecting the delegation token header in the connection request
|
||||||
|
conn.setRequestProperty(
|
||||||
|
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
|
||||||
|
dToken.encodeToUrlString());
|
||||||
|
}
|
||||||
|
return conn;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -331,8 +331,7 @@ public abstract class DelegationTokenAuthenticationHandler
|
||||||
HttpServletResponse response)
|
HttpServletResponse response)
|
||||||
throws IOException, AuthenticationException {
|
throws IOException, AuthenticationException {
|
||||||
AuthenticationToken token;
|
AuthenticationToken token;
|
||||||
String delegationParam = ServletUtils.getParameter(request,
|
String delegationParam = getDelegationToken(request);
|
||||||
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
|
|
||||||
if (delegationParam != null) {
|
if (delegationParam != null) {
|
||||||
try {
|
try {
|
||||||
Token<DelegationTokenIdentifier> dt =
|
Token<DelegationTokenIdentifier> dt =
|
||||||
|
@ -356,4 +355,15 @@ public abstract class DelegationTokenAuthenticationHandler
|
||||||
return token;
|
return token;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getDelegationToken(HttpServletRequest request)
|
||||||
|
throws IOException {
|
||||||
|
String dToken = request.getHeader(
|
||||||
|
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER);
|
||||||
|
if (dToken == null) {
|
||||||
|
dToken = ServletUtils.getParameter(request,
|
||||||
|
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
|
||||||
|
}
|
||||||
|
return dToken;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,6 +56,9 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
|
||||||
|
|
||||||
public static final String OP_PARAM = "op";
|
public static final String OP_PARAM = "op";
|
||||||
|
|
||||||
|
public static final String DELEGATION_TOKEN_HEADER =
|
||||||
|
"X-Hadoop-Delegation-Token";
|
||||||
|
|
||||||
public static final String DELEGATION_PARAM = "delegation";
|
public static final String DELEGATION_PARAM = "delegation";
|
||||||
public static final String TOKEN_PARAM = "token";
|
public static final String TOKEN_PARAM = "token";
|
||||||
public static final String RENEWER_PARAM = "renewer";
|
public static final String RENEWER_PARAM = "renewer";
|
||||||
|
@ -101,15 +104,23 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
|
||||||
authenticator.setConnectionConfigurator(configurator);
|
authenticator.setConnectionConfigurator(configurator);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean hasDelegationToken(URL url) {
|
private boolean hasDelegationToken(URL url, AuthenticatedURL.Token token) {
|
||||||
String queryStr = url.getQuery();
|
boolean hasDt = false;
|
||||||
return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
|
if (token instanceof DelegationTokenAuthenticatedURL.Token) {
|
||||||
|
hasDt = ((DelegationTokenAuthenticatedURL.Token) token).
|
||||||
|
getDelegationToken() != null;
|
||||||
|
}
|
||||||
|
if (!hasDt) {
|
||||||
|
String queryStr = url.getQuery();
|
||||||
|
hasDt = (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
|
||||||
|
}
|
||||||
|
return hasDt;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void authenticate(URL url, AuthenticatedURL.Token token)
|
public void authenticate(URL url, AuthenticatedURL.Token token)
|
||||||
throws IOException, AuthenticationException {
|
throws IOException, AuthenticationException {
|
||||||
if (!hasDelegationToken(url)) {
|
if (!hasDelegationToken(url, token)) {
|
||||||
authenticator.authenticate(url, token);
|
authenticator.authenticate(url, token);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,153 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.tracing;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
import org.apache.hadoop.util.ShutdownHookManager;
|
||||||
|
import org.htrace.HTraceConfiguration;
|
||||||
|
import org.htrace.SpanReceiver;
|
||||||
|
import org.htrace.Trace;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class provides functions for reading the names of SpanReceivers from
|
||||||
|
* the Hadoop configuration, adding those SpanReceivers to the Tracer,
|
||||||
|
* and closing those SpanReceivers when appropriate.
|
||||||
|
* This class does nothing If no SpanReceiver is configured.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class SpanReceiverHost {
|
||||||
|
public static final String SPAN_RECEIVERS_CONF_KEY = "hadoop.trace.spanreceiver.classes";
|
||||||
|
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
|
||||||
|
private Collection<SpanReceiver> receivers = new HashSet<SpanReceiver>();
|
||||||
|
private boolean closed = false;
|
||||||
|
|
||||||
|
private static enum SingletonHolder {
|
||||||
|
INSTANCE;
|
||||||
|
Object lock = new Object();
|
||||||
|
SpanReceiverHost host = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static SpanReceiverHost getInstance(Configuration conf) {
|
||||||
|
if (SingletonHolder.INSTANCE.host != null) {
|
||||||
|
return SingletonHolder.INSTANCE.host;
|
||||||
|
}
|
||||||
|
synchronized (SingletonHolder.INSTANCE.lock) {
|
||||||
|
if (SingletonHolder.INSTANCE.host != null) {
|
||||||
|
return SingletonHolder.INSTANCE.host;
|
||||||
|
}
|
||||||
|
SpanReceiverHost host = new SpanReceiverHost();
|
||||||
|
host.loadSpanReceivers(conf);
|
||||||
|
SingletonHolder.INSTANCE.host = host;
|
||||||
|
ShutdownHookManager.get().addShutdownHook(new Runnable() {
|
||||||
|
public void run() {
|
||||||
|
SingletonHolder.INSTANCE.host.closeReceivers();
|
||||||
|
}
|
||||||
|
}, 0);
|
||||||
|
return SingletonHolder.INSTANCE.host;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads the names of classes specified in the
|
||||||
|
* "hadoop.trace.spanreceiver.classes" property and instantiates and registers
|
||||||
|
* them with the Tracer as SpanReceiver's.
|
||||||
|
*
|
||||||
|
* The nullary constructor is called during construction, but if the classes
|
||||||
|
* specified implement the Configurable interface, setConfiguration() will be
|
||||||
|
* called on them. This allows SpanReceivers to use values from the Hadoop
|
||||||
|
* configuration.
|
||||||
|
*/
|
||||||
|
public void loadSpanReceivers(Configuration conf) {
|
||||||
|
Class<?> implClass = null;
|
||||||
|
String[] receiverNames = conf.getTrimmedStrings(SPAN_RECEIVERS_CONF_KEY);
|
||||||
|
if (receiverNames == null || receiverNames.length == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for (String className : receiverNames) {
|
||||||
|
className = className.trim();
|
||||||
|
try {
|
||||||
|
implClass = Class.forName(className);
|
||||||
|
receivers.add(loadInstance(implClass, conf));
|
||||||
|
LOG.info("SpanReceiver " + className + " was loaded successfully.");
|
||||||
|
} catch (ClassNotFoundException e) {
|
||||||
|
LOG.warn("Class " + className + " cannot be found.", e);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.warn("Load SpanReceiver " + className + " failed.", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (SpanReceiver rcvr : receivers) {
|
||||||
|
Trace.addReceiver(rcvr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private SpanReceiver loadInstance(Class<?> implClass, Configuration conf)
|
||||||
|
throws IOException {
|
||||||
|
SpanReceiver impl;
|
||||||
|
try {
|
||||||
|
Object o = ReflectionUtils.newInstance(implClass, conf);
|
||||||
|
impl = (SpanReceiver)o;
|
||||||
|
impl.configure(wrapHadoopConf(conf));
|
||||||
|
} catch (SecurityException e) {
|
||||||
|
throw new IOException(e);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
throw new IOException(e);
|
||||||
|
} catch (RuntimeException e) {
|
||||||
|
throw new IOException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return impl;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static HTraceConfiguration wrapHadoopConf(final Configuration conf) {
|
||||||
|
return new HTraceConfiguration() {
|
||||||
|
public static final String HTRACE_CONF_PREFIX = "hadoop.";
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String get(String key) {
|
||||||
|
return conf.get(HTRACE_CONF_PREFIX + key);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String get(String key, String defaultValue) {
|
||||||
|
return conf.get(HTRACE_CONF_PREFIX + key, defaultValue);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calls close() on all SpanReceivers created by this SpanReceiverHost.
|
||||||
|
*/
|
||||||
|
public synchronized void closeReceivers() {
|
||||||
|
if (closed) return;
|
||||||
|
closed = true;
|
||||||
|
for (SpanReceiver rcvr : receivers) {
|
||||||
|
try {
|
||||||
|
rcvr.close();
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -339,6 +339,7 @@ public class DataChecksum implements Checksum {
|
||||||
byte[] data, int dataOff, int dataLen,
|
byte[] data, int dataOff, int dataLen,
|
||||||
byte[] checksums, int checksumsOff, String fileName,
|
byte[] checksums, int checksumsOff, String fileName,
|
||||||
long basePos) throws ChecksumException {
|
long basePos) throws ChecksumException {
|
||||||
|
if (type.size == 0) return;
|
||||||
|
|
||||||
if (NativeCrc32.isAvailable()) {
|
if (NativeCrc32.isAvailable()) {
|
||||||
NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, type.id,
|
NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, type.id,
|
||||||
|
@ -421,6 +422,7 @@ public class DataChecksum implements Checksum {
|
||||||
public void calculateChunkedSums(
|
public void calculateChunkedSums(
|
||||||
byte[] data, int dataOffset, int dataLength,
|
byte[] data, int dataOffset, int dataLength,
|
||||||
byte[] sums, int sumsOffset) {
|
byte[] sums, int sumsOffset) {
|
||||||
|
if (type.size == 0) return;
|
||||||
|
|
||||||
if (NativeCrc32.isAvailable()) {
|
if (NativeCrc32.isAvailable()) {
|
||||||
NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, type.id,
|
NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, type.id,
|
||||||
|
|
|
@ -42,7 +42,7 @@ class NativeCrc32 {
|
||||||
* modified.
|
* modified.
|
||||||
*
|
*
|
||||||
* @param bytesPerSum the chunk size (eg 512 bytes)
|
* @param bytesPerSum the chunk size (eg 512 bytes)
|
||||||
* @param checksumType the DataChecksum type constant
|
* @param checksumType the DataChecksum type constant (NULL is not supported)
|
||||||
* @param sums the DirectByteBuffer pointing at the beginning of the
|
* @param sums the DirectByteBuffer pointing at the beginning of the
|
||||||
* stored checksums
|
* stored checksums
|
||||||
* @param data the DirectByteBuffer pointing at the beginning of the
|
* @param data the DirectByteBuffer pointing at the beginning of the
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with this
|
||||||
|
* work for additional information regarding copyright ownership. The ASF
|
||||||
|
* licenses this file to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
* License for the specific language governing permissions and limitations under
|
||||||
|
* the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
public class PerformanceAdvisory {
|
||||||
|
public static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(PerformanceAdvisory.class);
|
||||||
|
}
|
|
@ -27,6 +27,8 @@ import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformation
|
||||||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
|
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
|
||||||
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.htrace.Span;
|
||||||
|
import org.htrace.Trace;
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
|
|
||||||
|
@ -165,6 +167,15 @@ public abstract class ProtoUtil {
|
||||||
RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
|
RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
|
||||||
result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId)
|
result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId)
|
||||||
.setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
|
.setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
|
||||||
|
|
||||||
|
// Add tracing info if we are currently tracing.
|
||||||
|
if (Trace.isTracing()) {
|
||||||
|
Span s = Trace.currentSpan();
|
||||||
|
result.setTraceInfo(RPCTraceInfoProto.newBuilder()
|
||||||
|
.setParentId(s.getSpanId())
|
||||||
|
.setTraceId(s.getTraceId()).build());
|
||||||
|
}
|
||||||
|
|
||||||
return result.build();
|
return result.build();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,7 +170,8 @@ public class VersionInfo {
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
LOG.debug("version: "+ getVersion());
|
LOG.debug("version: "+ getVersion());
|
||||||
System.out.println("Hadoop " + getVersion());
|
System.out.println("Hadoop " + getVersion());
|
||||||
System.out.println("Subversion " + getUrl() + " -r " + getRevision());
|
System.out.println("Source code repository " + getUrl() + " -r " +
|
||||||
|
getRevision());
|
||||||
System.out.println("Compiled by " + getUser() + " on " + getDate());
|
System.out.println("Compiled by " + getUser() + " on " + getDate());
|
||||||
System.out.println("Compiled with protoc " + getProtocVersion());
|
System.out.println("Compiled with protoc " + getProtocVersion());
|
||||||
System.out.println("From source with checksum " + getSrcChecksum());
|
System.out.println("From source with checksum " + getSrcChecksum());
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
Package: libhadoop
|
Package: libhadoop
|
||||||
Authors: Arun C Murthy <arunc@yahoo-inc.com>
|
|
||||||
|
|
||||||
MOTIVATION
|
MOTIVATION
|
||||||
|
|
||||||
The libhadoop package contains the native code for any of hadoop (http://hadoop.apache.org/core).
|
The libhadoop package contains the native code for Apache Hadoop (http://hadoop.apache.org/).
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
Any suggestions for improvements or patched should be sent to core-dev@hadoop.apache.org. Please go through http://wiki.apache.org/hadoop/HowToContribute for more information on how to contribute.
|
Any suggestions for improvements or patched should be sent to common-dev@hadoop.apache.org.
|
||||||
|
|
||||||
|
Please see http://wiki.apache.org/hadoop/HowToContribute for more information on how to contribute.
|
||||||
|
|
|
@ -53,6 +53,18 @@ enum RpcKindProto {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used to pass through the information necessary to continue
|
||||||
|
* a trace after an RPC is made. All we need is the traceid
|
||||||
|
* (so we know the overarching trace this message is a part of), and
|
||||||
|
* the id of the current span when this message was sent, so we know
|
||||||
|
* what span caused the new span we will create when this message is received.
|
||||||
|
*/
|
||||||
|
message RPCTraceInfoProto {
|
||||||
|
optional int64 traceId = 1;
|
||||||
|
optional int64 parentId = 2;
|
||||||
|
}
|
||||||
|
|
||||||
message RpcRequestHeaderProto { // the header for the RpcRequest
|
message RpcRequestHeaderProto { // the header for the RpcRequest
|
||||||
enum OperationProto {
|
enum OperationProto {
|
||||||
RPC_FINAL_PACKET = 0; // The final RPC Packet
|
RPC_FINAL_PACKET = 0; // The final RPC Packet
|
||||||
|
@ -67,6 +79,7 @@ message RpcRequestHeaderProto { // the header for the RpcRequest
|
||||||
// clientId + callId uniquely identifies a request
|
// clientId + callId uniquely identifies a request
|
||||||
// retry count, 1 means this is the first retry
|
// retry count, 1 means this is the first retry
|
||||||
optional sint32 retryCount = 5 [default = -1];
|
optional sint32 retryCount = 5 [default = -1];
|
||||||
|
optional RPCTraceInfoProto traceInfo = 6; // tracing info
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,169 @@
|
||||||
|
~~ Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
~~ you may not use this file except in compliance with the License.
|
||||||
|
~~ You may obtain a copy of the License at
|
||||||
|
~~
|
||||||
|
~~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~~
|
||||||
|
~~ Unless required by applicable law or agreed to in writing, software
|
||||||
|
~~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
~~ See the License for the specific language governing permissions and
|
||||||
|
~~ limitations under the License. See accompanying LICENSE file.
|
||||||
|
|
||||||
|
---
|
||||||
|
Hadoop Distributed File System-${project.version} - Enabling Dapper-like Tracing
|
||||||
|
---
|
||||||
|
---
|
||||||
|
${maven.build.timestamp}
|
||||||
|
|
||||||
|
Enabling Dapper-like Tracing in HDFS
|
||||||
|
|
||||||
|
%{toc|section=1|fromDepth=0}
|
||||||
|
|
||||||
|
* {Dapper-like Tracing in HDFS}
|
||||||
|
|
||||||
|
** HTrace
|
||||||
|
|
||||||
|
{{{https://issues.apache.org/jira/browse/HDFS-5274}HDFS-5274}}
|
||||||
|
added support for tracing requests through HDFS,
|
||||||
|
using the open source tracing library, {{{https://github.com/cloudera/htrace}HTrace}}.
|
||||||
|
Setting up tracing is quite simple, however it requires some very minor changes to your client code.
|
||||||
|
|
||||||
|
** SpanReceivers
|
||||||
|
|
||||||
|
The tracing system works by collecting information in structs called 'Spans'.
|
||||||
|
It is up to you to choose how you want to receive this information
|
||||||
|
by implementing the SpanReceiver interface, which defines one method:
|
||||||
|
|
||||||
|
+----
|
||||||
|
public void receiveSpan(Span span);
|
||||||
|
+----
|
||||||
|
|
||||||
|
Configure what SpanReceivers you'd like to use
|
||||||
|
by putting a comma separated list of the fully-qualified class name of
|
||||||
|
classes implementing SpanReceiver
|
||||||
|
in <<<hdfs-site.xml>>> property: <<<hadoop.trace.spanreceiver.classes>>>.
|
||||||
|
|
||||||
|
+----
|
||||||
|
<property>
|
||||||
|
<name>hadoop.trace.spanreceiver.classes</name>
|
||||||
|
<value>org.htrace.impl.LocalFileSpanReceiver</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hadoop.local-file-span-receiver.path</name>
|
||||||
|
<value>/var/log/hadoop/htrace.out</value>
|
||||||
|
</property>
|
||||||
|
+----
|
||||||
|
|
||||||
|
** Setting up ZipkinSpanReceiver
|
||||||
|
|
||||||
|
Instead of implementing SpanReceiver by yourself,
|
||||||
|
you can use <<<ZipkinSpanReceiver>>> which uses
|
||||||
|
{{{https://github.com/twitter/zipkin}Zipkin}}
|
||||||
|
for collecting and dispalying tracing data.
|
||||||
|
|
||||||
|
In order to use <<<ZipkinSpanReceiver>>>,
|
||||||
|
you need to download and setup {{{https://github.com/twitter/zipkin}Zipkin}} first.
|
||||||
|
|
||||||
|
you also need to add the jar of <<<htrace-zipkin>>> to the classpath of Hadoop on each node.
|
||||||
|
Here is example setup procedure.
|
||||||
|
|
||||||
|
+----
|
||||||
|
$ git clone https://github.com/cloudera/htrace
|
||||||
|
$ cd htrace/htrace-zipkin
|
||||||
|
$ mvn compile assembly:single
|
||||||
|
$ cp target/htrace-zipkin-*-jar-with-dependencies.jar $HADOOP_HOME/share/hadoop/hdfs/lib/
|
||||||
|
+----
|
||||||
|
|
||||||
|
The sample configuration for <<<ZipkinSpanReceiver>>> is shown below.
|
||||||
|
By adding these to <<<hdfs-site.xml>>> of NameNode and DataNodes,
|
||||||
|
<<<ZipkinSpanReceiver>>> is initialized on the startup.
|
||||||
|
You also need this configuration on the client node in addition to the servers.
|
||||||
|
|
||||||
|
+----
|
||||||
|
<property>
|
||||||
|
<name>hadoop.trace.spanreceiver.classes</name>
|
||||||
|
<value>org.htrace.impl.ZipkinSpanReceiver</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hadoop.zipkin.collector-hostname</name>
|
||||||
|
<value>192.168.1.2</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hadoop.zipkin.collector-port</name>
|
||||||
|
<value>9410</value>
|
||||||
|
</property>
|
||||||
|
+----
|
||||||
|
|
||||||
|
** Turning on tracing by HTrace API
|
||||||
|
|
||||||
|
In order to turn on Dapper-like tracing,
|
||||||
|
you will need to wrap the traced logic with <<tracing span>> as shown below.
|
||||||
|
When there is running tracing spans,
|
||||||
|
the tracing information is propagated to servers along with RPC requests.
|
||||||
|
|
||||||
|
In addition, you need to initialize <<<SpanReceiver>>> once per process.
|
||||||
|
|
||||||
|
+----
|
||||||
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||||
|
import org.htrace.Sampler;
|
||||||
|
import org.htrace.Trace;
|
||||||
|
import org.htrace.TraceScope;
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
SpanReceiverHost.getInstance(new HdfsConfiguration());
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
TraceScope ts = Trace.startSpan("Gets", Sampler.ALWAYS);
|
||||||
|
try {
|
||||||
|
... // traced logic
|
||||||
|
} finally {
|
||||||
|
if (ts != null) ts.close();
|
||||||
|
}
|
||||||
|
+----
|
||||||
|
|
||||||
|
** Sample code for tracing
|
||||||
|
|
||||||
|
The <<<TracingFsShell.java>>> shown below is the wrapper of FsShell
|
||||||
|
which start tracing span before invoking HDFS shell command.
|
||||||
|
|
||||||
|
+----
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FsShell;
|
||||||
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||||
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
import org.htrace.Sampler;
|
||||||
|
import org.htrace.Trace;
|
||||||
|
import org.htrace.TraceScope;
|
||||||
|
|
||||||
|
public class TracingFsShell {
|
||||||
|
public static void main(String argv[]) throws Exception {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
FsShell shell = new FsShell();
|
||||||
|
conf.setQuietMode(false);
|
||||||
|
shell.setConf(conf);
|
||||||
|
int res = 0;
|
||||||
|
SpanReceiverHost.init(new HdfsConfiguration());
|
||||||
|
TraceScope ts = null;
|
||||||
|
try {
|
||||||
|
ts = Trace.startSpan("FsShell", Sampler.ALWAYS);
|
||||||
|
res = ToolRunner.run(shell, argv);
|
||||||
|
} finally {
|
||||||
|
shell.close();
|
||||||
|
if (ts != null) ts.close();
|
||||||
|
}
|
||||||
|
System.exit(res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
+----
|
||||||
|
|
||||||
|
You can compile and execute this code as shown below.
|
||||||
|
|
||||||
|
+----
|
||||||
|
$ javac -cp `hadoop classpath` TracingFsShell.java
|
||||||
|
$ HADOOP_CLASSPATH=. hdfs TracingFsShell -put sample.txt /tmp/
|
||||||
|
+----
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.crypto.key;
|
||||||
|
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -32,6 +33,7 @@ public class TestCachingKeyProvider {
|
||||||
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
|
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
|
||||||
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
|
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
|
||||||
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
|
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
|
||||||
|
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
|
||||||
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
||||||
|
|
||||||
// asserting caching
|
// asserting caching
|
||||||
|
@ -58,6 +60,7 @@ public class TestCachingKeyProvider {
|
||||||
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
|
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
|
||||||
.thenReturn(mockKey);
|
.thenReturn(mockKey);
|
||||||
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
|
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
|
||||||
|
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
|
||||||
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
||||||
|
|
||||||
// asserting caching
|
// asserting caching
|
||||||
|
@ -88,6 +91,7 @@ public class TestCachingKeyProvider {
|
||||||
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
|
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
|
||||||
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta);
|
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta);
|
||||||
Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null);
|
Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null);
|
||||||
|
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
|
||||||
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
||||||
|
|
||||||
// asserting caching
|
// asserting caching
|
||||||
|
@ -112,6 +116,7 @@ public class TestCachingKeyProvider {
|
||||||
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
|
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
|
||||||
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
|
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
|
||||||
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
|
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
|
||||||
|
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
|
||||||
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
||||||
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
|
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
|
||||||
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
|
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
|
||||||
|
@ -134,6 +139,7 @@ public class TestCachingKeyProvider {
|
||||||
.thenReturn(mockKey);
|
.thenReturn(mockKey);
|
||||||
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
|
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
|
||||||
new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1));
|
new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1));
|
||||||
|
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
|
||||||
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
|
||||||
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
|
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
|
||||||
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
|
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
|
||||||
|
|
|
@ -159,6 +159,10 @@ public class TestKeyProvider {
|
||||||
private int size;
|
private int size;
|
||||||
private byte[] material;
|
private byte[] material;
|
||||||
|
|
||||||
|
public MyKeyProvider(Configuration conf) {
|
||||||
|
super(conf);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public KeyVersion getKeyVersion(String versionName)
|
public KeyVersion getKeyVersion(String versionName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -216,7 +220,7 @@ public class TestKeyProvider {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMaterialGeneration() throws Exception {
|
public void testMaterialGeneration() throws Exception {
|
||||||
MyKeyProvider kp = new MyKeyProvider();
|
MyKeyProvider kp = new MyKeyProvider(new Configuration());
|
||||||
KeyProvider.Options options = new KeyProvider.Options(new Configuration());
|
KeyProvider.Options options = new KeyProvider.Options(new Configuration());
|
||||||
options.setCipher(CIPHER);
|
options.setCipher(CIPHER);
|
||||||
options.setBitLength(128);
|
options.setBitLength(128);
|
||||||
|
@ -225,10 +229,19 @@ public class TestKeyProvider {
|
||||||
Assert.assertEquals(CIPHER, kp.algorithm);
|
Assert.assertEquals(CIPHER, kp.algorithm);
|
||||||
Assert.assertNotNull(kp.material);
|
Assert.assertNotNull(kp.material);
|
||||||
|
|
||||||
kp = new MyKeyProvider();
|
kp = new MyKeyProvider(new Configuration());
|
||||||
kp.rollNewVersion("hello");
|
kp.rollNewVersion("hello");
|
||||||
Assert.assertEquals(128, kp.size);
|
Assert.assertEquals(128, kp.size);
|
||||||
Assert.assertEquals(CIPHER, kp.algorithm);
|
Assert.assertEquals(CIPHER, kp.algorithm);
|
||||||
Assert.assertNotNull(kp.material);
|
Assert.assertNotNull(kp.material);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testConfiguration() throws Exception {
|
||||||
|
Configuration conf = new Configuration(false);
|
||||||
|
conf.set("a", "A");
|
||||||
|
MyKeyProvider kp = new MyKeyProvider(conf);
|
||||||
|
Assert.assertEquals("A", kp.getConf().get("a"));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,13 +29,18 @@ import org.apache.hadoop.security.Credentials;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
public class TestKeyProviderDelegationTokenExtension {
|
public class TestKeyProviderDelegationTokenExtension {
|
||||||
|
|
||||||
public static abstract class MockKeyProvider extends
|
public static abstract class MockKeyProvider extends
|
||||||
KeyProvider implements DelegationTokenExtension {
|
KeyProvider implements DelegationTokenExtension {
|
||||||
|
|
||||||
|
public MockKeyProvider() {
|
||||||
|
super(new Configuration(false));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCreateExtension() throws Exception {
|
public void testCreateExtension() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
|
@ -50,9 +55,11 @@ public class TestKeyProviderDelegationTokenExtension {
|
||||||
Assert.assertNull(kpDTE1.addDelegationTokens("user", credentials));
|
Assert.assertNull(kpDTE1.addDelegationTokens("user", credentials));
|
||||||
|
|
||||||
MockKeyProvider mock = mock(MockKeyProvider.class);
|
MockKeyProvider mock = mock(MockKeyProvider.class);
|
||||||
|
Mockito.when(mock.getConf()).thenReturn(new Configuration());
|
||||||
when(mock.addDelegationTokens("renewer", credentials)).thenReturn(
|
when(mock.addDelegationTokens("renewer", credentials)).thenReturn(
|
||||||
new Token<?>[] { new Token(null, null, new Text("kind"), new Text(
|
new Token<?>[]{new Token(null, null, new Text("kind"), new Text(
|
||||||
"service")) });
|
"service"))}
|
||||||
|
);
|
||||||
KeyProviderDelegationTokenExtension kpDTE2 =
|
KeyProviderDelegationTokenExtension kpDTE2 =
|
||||||
KeyProviderDelegationTokenExtension
|
KeyProviderDelegationTokenExtension
|
||||||
.createKeyProviderDelegationTokenExtension(mock);
|
.createKeyProviderDelegationTokenExtension(mock);
|
||||||
|
|
|
@ -42,29 +42,14 @@ public class TestTextCommand {
|
||||||
System.getProperty("test.build.data", "build/test/data/") + "/testText";
|
System.getProperty("test.build.data", "build/test/data/") + "/testText";
|
||||||
private static final String AVRO_FILENAME =
|
private static final String AVRO_FILENAME =
|
||||||
new Path(TEST_ROOT_DIR, "weather.avro").toUri().getPath();
|
new Path(TEST_ROOT_DIR, "weather.avro").toUri().getPath();
|
||||||
|
private static final String TEXT_FILENAME =
|
||||||
|
new Path(TEST_ROOT_DIR, "testtextfile.txt").toUri().getPath();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests whether binary Avro data files are displayed correctly.
|
* Tests whether binary Avro data files are displayed correctly.
|
||||||
*/
|
*/
|
||||||
@Test (timeout = 30000)
|
@Test (timeout = 30000)
|
||||||
public void testDisplayForAvroFiles() throws Exception {
|
public void testDisplayForAvroFiles() throws Exception {
|
||||||
// Create a small Avro data file on the local file system.
|
|
||||||
createAvroFile(generateWeatherAvroBinaryData());
|
|
||||||
|
|
||||||
// Prepare and call the Text command's protected getInputStream method
|
|
||||||
// using reflection.
|
|
||||||
Configuration conf = new Configuration();
|
|
||||||
URI localPath = new URI(AVRO_FILENAME);
|
|
||||||
PathData pathData = new PathData(localPath, conf);
|
|
||||||
Display.Text text = new Display.Text();
|
|
||||||
text.setConf(conf);
|
|
||||||
Method method = text.getClass().getDeclaredMethod(
|
|
||||||
"getInputStream", PathData.class);
|
|
||||||
method.setAccessible(true);
|
|
||||||
InputStream stream = (InputStream) method.invoke(text, pathData);
|
|
||||||
String output = inputStreamToString(stream);
|
|
||||||
|
|
||||||
// Check the output.
|
|
||||||
String expectedOutput =
|
String expectedOutput =
|
||||||
"{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
|
"{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
|
||||||
System.getProperty("line.separator") +
|
System.getProperty("line.separator") +
|
||||||
|
@ -77,18 +62,72 @@ public class TestTextCommand {
|
||||||
"{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
|
"{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
|
||||||
System.getProperty("line.separator");
|
System.getProperty("line.separator");
|
||||||
|
|
||||||
|
String output = readUsingTextCommand(AVRO_FILENAME,
|
||||||
|
generateWeatherAvroBinaryData());
|
||||||
assertEquals(expectedOutput, output);
|
assertEquals(expectedOutput, output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests that a zero-length file is displayed correctly.
|
||||||
|
*/
|
||||||
|
@Test (timeout = 30000)
|
||||||
|
public void testEmptyTextFil() throws Exception {
|
||||||
|
byte[] emptyContents = { };
|
||||||
|
String output = readUsingTextCommand(TEXT_FILENAME, emptyContents);
|
||||||
|
assertTrue("".equals(output));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests that a one-byte file is displayed correctly.
|
||||||
|
*/
|
||||||
|
@Test (timeout = 30000)
|
||||||
|
public void testOneByteTextFil() throws Exception {
|
||||||
|
byte[] oneByteContents = { 'x' };
|
||||||
|
String output = readUsingTextCommand(TEXT_FILENAME, oneByteContents);
|
||||||
|
assertTrue(new String(oneByteContents).equals(output));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests that a one-byte file is displayed correctly.
|
||||||
|
*/
|
||||||
|
@Test (timeout = 30000)
|
||||||
|
public void testTwoByteTextFil() throws Exception {
|
||||||
|
byte[] twoByteContents = { 'x', 'y' };
|
||||||
|
String output = readUsingTextCommand(TEXT_FILENAME, twoByteContents);
|
||||||
|
assertTrue(new String(twoByteContents).equals(output));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a file on the local file system and read it using
|
||||||
|
// the Display.Text class.
|
||||||
|
private String readUsingTextCommand(String fileName, byte[] fileContents)
|
||||||
|
throws Exception {
|
||||||
|
createFile(fileName, fileContents);
|
||||||
|
|
||||||
|
// Prepare and call the Text command's protected getInputStream method
|
||||||
|
// using reflection.
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
URI localPath = new URI(fileName);
|
||||||
|
PathData pathData = new PathData(localPath, conf);
|
||||||
|
Display.Text text = new Display.Text() {
|
||||||
|
@Override
|
||||||
|
public InputStream getInputStream(PathData item) throws IOException {
|
||||||
|
return super.getInputStream(item);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
text.setConf(conf);
|
||||||
|
InputStream stream = (InputStream) text.getInputStream(pathData);
|
||||||
|
return inputStreamToString(stream);
|
||||||
|
}
|
||||||
|
|
||||||
private String inputStreamToString(InputStream stream) throws IOException {
|
private String inputStreamToString(InputStream stream) throws IOException {
|
||||||
StringWriter writer = new StringWriter();
|
StringWriter writer = new StringWriter();
|
||||||
IOUtils.copy(stream, writer);
|
IOUtils.copy(stream, writer);
|
||||||
return writer.toString();
|
return writer.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createAvroFile(byte[] contents) throws IOException {
|
private void createFile(String fileName, byte[] contents) throws IOException {
|
||||||
(new File(TEST_ROOT_DIR)).mkdir();
|
(new File(TEST_ROOT_DIR)).mkdir();
|
||||||
File file = new File(AVRO_FILENAME);
|
File file = new File(fileName);
|
||||||
file.createNewFile();
|
file.createNewFile();
|
||||||
FileOutputStream stream = new FileOutputStream(file);
|
FileOutputStream stream = new FileOutputStream(file);
|
||||||
stream.write(contents);
|
stream.write(contents);
|
||||||
|
|
|
@ -284,11 +284,13 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAuthenticate() throws Exception {
|
public void testAuthenticate() throws Exception {
|
||||||
testValidDelegationToken();
|
testValidDelegationTokenQueryString();
|
||||||
testInvalidDelegationToken();
|
testValidDelegationTokenHeader();
|
||||||
|
testInvalidDelegationTokenQueryString();
|
||||||
|
testInvalidDelegationTokenHeader();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testValidDelegationToken() throws Exception {
|
private void testValidDelegationTokenQueryString() throws Exception {
|
||||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
||||||
Token<DelegationTokenIdentifier> dToken =
|
Token<DelegationTokenIdentifier> dToken =
|
||||||
|
@ -307,7 +309,26 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
|
||||||
Assert.assertTrue(token.isExpired());
|
Assert.assertTrue(token.isExpired());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testInvalidDelegationToken() throws Exception {
|
private void testValidDelegationTokenHeader() throws Exception {
|
||||||
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
|
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
||||||
|
Token<DelegationTokenIdentifier> dToken =
|
||||||
|
handler.getTokenManager().createToken(
|
||||||
|
UserGroupInformation.getCurrentUser(), "user");
|
||||||
|
Mockito.when(request.getHeader(Mockito.eq(
|
||||||
|
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
|
||||||
|
dToken.encodeToUrlString());
|
||||||
|
|
||||||
|
AuthenticationToken token = handler.authenticate(request, response);
|
||||||
|
Assert.assertEquals(UserGroupInformation.getCurrentUser().
|
||||||
|
getShortUserName(), token.getUserName());
|
||||||
|
Assert.assertEquals(0, token.getExpires());
|
||||||
|
Assert.assertEquals(handler.getType(),
|
||||||
|
token.getType());
|
||||||
|
Assert.assertTrue(token.isExpired());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testInvalidDelegationTokenQueryString() throws Exception {
|
||||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
||||||
Mockito.when(request.getQueryString()).thenReturn(
|
Mockito.when(request.getQueryString()).thenReturn(
|
||||||
|
@ -323,4 +344,21 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void testInvalidDelegationTokenHeader() throws Exception {
|
||||||
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
|
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
||||||
|
Mockito.when(request.getHeader(Mockito.eq(
|
||||||
|
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
|
||||||
|
"invalid");
|
||||||
|
|
||||||
|
try {
|
||||||
|
handler.authenticate(request, response);
|
||||||
|
Assert.fail();
|
||||||
|
} catch (AuthenticationException ex) {
|
||||||
|
//NOP
|
||||||
|
} catch (Exception ex) {
|
||||||
|
Assert.fail();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -149,6 +149,15 @@ public class TestWebDelegationToken {
|
||||||
throws ServletException, IOException {
|
throws ServletException, IOException {
|
||||||
resp.setStatus(HttpServletResponse.SC_OK);
|
resp.setStatus(HttpServletResponse.SC_OK);
|
||||||
resp.getWriter().write("ping");
|
resp.getWriter().write("ping");
|
||||||
|
if (req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER)
|
||||||
|
!= null) {
|
||||||
|
resp.setHeader("UsingHeader", "true");
|
||||||
|
}
|
||||||
|
if (req.getQueryString() != null &&
|
||||||
|
req.getQueryString().contains(
|
||||||
|
DelegationTokenAuthenticator.DELEGATION_PARAM + "=")) {
|
||||||
|
resp.setHeader("UsingQueryString", "true");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -314,7 +323,20 @@ public class TestWebDelegationToken {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDelegationTokenAuthenticatorCalls() throws Exception {
|
public void testDelegationTokenAuthenticatorCallsWithHeader()
|
||||||
|
throws Exception {
|
||||||
|
testDelegationTokenAuthenticatorCalls(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDelegationTokenAuthenticatorCallsWithQueryString()
|
||||||
|
throws Exception {
|
||||||
|
testDelegationTokenAuthenticatorCalls(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void testDelegationTokenAuthenticatorCalls(final boolean useQS)
|
||||||
|
throws Exception {
|
||||||
final Server jetty = createJettyServer();
|
final Server jetty = createJettyServer();
|
||||||
Context context = new Context();
|
Context context = new Context();
|
||||||
context.setContextPath("/foo");
|
context.setContextPath("/foo");
|
||||||
|
@ -324,14 +346,15 @@ public class TestWebDelegationToken {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
jetty.start();
|
jetty.start();
|
||||||
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
|
final URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
|
||||||
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
|
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
|
||||||
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
|
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
|
||||||
|
|
||||||
DelegationTokenAuthenticatedURL.Token token =
|
DelegationTokenAuthenticatedURL.Token token =
|
||||||
new DelegationTokenAuthenticatedURL.Token();
|
new DelegationTokenAuthenticatedURL.Token();
|
||||||
DelegationTokenAuthenticatedURL aUrl =
|
final DelegationTokenAuthenticatedURL aUrl =
|
||||||
new DelegationTokenAuthenticatedURL();
|
new DelegationTokenAuthenticatedURL();
|
||||||
|
aUrl.setUseQueryStringForDelegationToken(useQS);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
|
aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
|
||||||
|
@ -379,6 +402,27 @@ public class TestWebDelegationToken {
|
||||||
Assert.assertTrue(ex.getMessage().contains("401"));
|
Assert.assertTrue(ex.getMessage().contains("401"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
aUrl.getDelegationToken(authURL, token, "foo");
|
||||||
|
|
||||||
|
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||||
|
ugi.addToken(token.getDelegationToken());
|
||||||
|
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void run() throws Exception {
|
||||||
|
HttpURLConnection conn = aUrl.openConnection(nonAuthURL, new DelegationTokenAuthenticatedURL.Token());
|
||||||
|
Assert.assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
|
||||||
|
if (useQS) {
|
||||||
|
Assert.assertNull(conn.getHeaderField("UsingHeader"));
|
||||||
|
Assert.assertNotNull(conn.getHeaderField("UsingQueryString"));
|
||||||
|
} else {
|
||||||
|
Assert.assertNotNull(conn.getHeaderField("UsingHeader"));
|
||||||
|
Assert.assertNull(conn.getHeaderField("UsingQueryString"));
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
jetty.stop();
|
jetty.stop();
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,6 @@
|
||||||
<description>Apache Hadoop KMS</description>
|
<description>Apache Hadoop KMS</description>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<tomcat.version>6.0.36</tomcat.version>
|
|
||||||
<kms.tomcat.dist.dir>
|
<kms.tomcat.dist.dir>
|
||||||
${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/kms/tomcat
|
${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/kms/tomcat
|
||||||
</kms.tomcat.dist.dir>
|
</kms.tomcat.dist.dir>
|
||||||
|
|
|
@ -26,10 +26,10 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
|
||||||
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
|
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
|
||||||
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
||||||
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
|
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
|
||||||
|
|
||||||
|
|
||||||
import javax.ws.rs.Consumes;
|
import javax.ws.rs.Consumes;
|
||||||
import javax.ws.rs.DELETE;
|
import javax.ws.rs.DELETE;
|
||||||
import javax.ws.rs.DefaultValue;
|
import javax.ws.rs.DefaultValue;
|
||||||
|
@ -73,29 +73,14 @@ public class KMS {
|
||||||
kmsAudit= KMSWebApp.getKMSAudit();
|
kmsAudit= KMSWebApp.getKMSAudit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private static final String UNAUTHORIZED_MSG_WITH_KEY =
|
|
||||||
"User:%s not allowed to do '%s' on '%s'";
|
|
||||||
|
|
||||||
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
|
|
||||||
"User:%s not allowed to do '%s'";
|
|
||||||
|
|
||||||
private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
|
private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
|
||||||
KMSOp operation) throws AccessControlException {
|
KMSOp operation) throws AccessControlException {
|
||||||
assertAccess(aclType, ugi, operation, null);
|
KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertAccess(KMSACLs.Type aclType,
|
private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
|
||||||
UserGroupInformation ugi, KMSOp operation, String key)
|
KMSOp operation, String key) throws AccessControlException {
|
||||||
throws AccessControlException {
|
KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, key);
|
||||||
if (!KMSWebApp.getACLs().hasAccess(aclType, ugi)) {
|
|
||||||
KMSWebApp.getUnauthorizedCallsMeter().mark();
|
|
||||||
kmsAudit.unauthorized(ugi, operation, key);
|
|
||||||
throw new AuthorizationException(String.format(
|
|
||||||
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY
|
|
||||||
: UNAUTHORIZED_MSG_WITHOUT_KEY,
|
|
||||||
ugi.getShortUserName(), operation, key));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static KeyProvider.KeyVersion removeKeyMaterial(
|
private static KeyProvider.KeyVersion removeKeyMaterial(
|
||||||
|
|
|
@ -19,8 +19,11 @@ package org.apache.hadoop.crypto.key.kms.server;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
|
||||||
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||||
|
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -39,14 +42,23 @@ import java.util.concurrent.TimeUnit;
|
||||||
public class KMSACLs implements Runnable {
|
public class KMSACLs implements Runnable {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
|
private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
|
||||||
|
|
||||||
|
private static final String UNAUTHORIZED_MSG_WITH_KEY =
|
||||||
|
"User:%s not allowed to do '%s' on '%s'";
|
||||||
|
|
||||||
|
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
|
||||||
|
"User:%s not allowed to do '%s'";
|
||||||
|
|
||||||
public enum Type {
|
public enum Type {
|
||||||
CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA,
|
CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA,
|
||||||
SET_KEY_MATERIAL, GENERATE_EEK, DECRYPT_EEK;
|
SET_KEY_MATERIAL, GENERATE_EEK, DECRYPT_EEK;
|
||||||
|
|
||||||
public String getConfigKey() {
|
public String getAclConfigKey() {
|
||||||
return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString();
|
return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getBlacklistConfigKey() {
|
||||||
|
return KMSConfiguration.CONFIG_PREFIX + "blacklist." + this.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final String ACL_DEFAULT = AccessControlList.WILDCARD_ACL_VALUE;
|
public static final String ACL_DEFAULT = AccessControlList.WILDCARD_ACL_VALUE;
|
||||||
|
@ -54,6 +66,7 @@ public class KMSACLs implements Runnable {
|
||||||
public static final int RELOADER_SLEEP_MILLIS = 1000;
|
public static final int RELOADER_SLEEP_MILLIS = 1000;
|
||||||
|
|
||||||
private volatile Map<Type, AccessControlList> acls;
|
private volatile Map<Type, AccessControlList> acls;
|
||||||
|
private volatile Map<Type, AccessControlList> blacklistedAcls;
|
||||||
private ScheduledExecutorService executorService;
|
private ScheduledExecutorService executorService;
|
||||||
private long lastReload;
|
private long lastReload;
|
||||||
|
|
||||||
|
@ -70,12 +83,20 @@ public class KMSACLs implements Runnable {
|
||||||
|
|
||||||
private void setACLs(Configuration conf) {
|
private void setACLs(Configuration conf) {
|
||||||
Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
|
Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
|
||||||
|
Map<Type, AccessControlList> tempBlacklist = new HashMap<Type, AccessControlList>();
|
||||||
for (Type aclType : Type.values()) {
|
for (Type aclType : Type.values()) {
|
||||||
String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
|
String aclStr = conf.get(aclType.getAclConfigKey(), ACL_DEFAULT);
|
||||||
tempAcls.put(aclType, new AccessControlList(aclStr));
|
tempAcls.put(aclType, new AccessControlList(aclStr));
|
||||||
|
String blacklistStr = conf.get(aclType.getBlacklistConfigKey());
|
||||||
|
if (blacklistStr != null) {
|
||||||
|
// Only add if blacklist is present
|
||||||
|
tempBlacklist.put(aclType, new AccessControlList(blacklistStr));
|
||||||
|
LOG.info("'{}' Blacklist '{}'", aclType, blacklistStr);
|
||||||
|
}
|
||||||
LOG.info("'{}' ACL '{}'", aclType, aclStr);
|
LOG.info("'{}' ACL '{}'", aclType, aclStr);
|
||||||
}
|
}
|
||||||
acls = tempAcls;
|
acls = tempAcls;
|
||||||
|
blacklistedAcls = tempBlacklist;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -109,12 +130,38 @@ public class KMSACLs implements Runnable {
|
||||||
lastReload = System.currentTimeMillis();
|
lastReload = System.currentTimeMillis();
|
||||||
Configuration conf = KMSConfiguration.getACLsConf();
|
Configuration conf = KMSConfiguration.getACLsConf();
|
||||||
// triggering the resource loading.
|
// triggering the resource loading.
|
||||||
conf.get(Type.CREATE.getConfigKey());
|
conf.get(Type.CREATE.getAclConfigKey());
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* First Check if user is in ACL for the KMS operation, if yes, then
|
||||||
|
* return true if user is not present in any configured blacklist for
|
||||||
|
* the operation
|
||||||
|
* @param type KMS Operation
|
||||||
|
* @param ugi UserGroupInformation of user
|
||||||
|
* @return true is user has access
|
||||||
|
*/
|
||||||
public boolean hasAccess(Type type, UserGroupInformation ugi) {
|
public boolean hasAccess(Type type, UserGroupInformation ugi) {
|
||||||
return acls.get(type).isUserAllowed(ugi);
|
boolean access = acls.get(type).isUserAllowed(ugi);
|
||||||
|
if (access) {
|
||||||
|
AccessControlList blacklist = blacklistedAcls.get(type);
|
||||||
|
access = (blacklist == null) || !blacklist.isUserInList(ugi);
|
||||||
|
}
|
||||||
|
return access;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void assertAccess(KMSACLs.Type aclType,
|
||||||
|
UserGroupInformation ugi, KMSOp operation, String key)
|
||||||
|
throws AccessControlException {
|
||||||
|
if (!KMSWebApp.getACLs().hasAccess(aclType, ugi)) {
|
||||||
|
KMSWebApp.getUnauthorizedCallsMeter().mark();
|
||||||
|
KMSWebApp.getKMSAudit().unauthorized(ugi, operation, key);
|
||||||
|
throw new AuthorizationException(String.format(
|
||||||
|
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY
|
||||||
|
: UNAUTHORIZED_MSG_WITHOUT_KEY,
|
||||||
|
ugi.getShortUserName(), operation, key));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -274,8 +274,13 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
KMS ACLs configuration are defined in the KMS <<<etc/hadoop/kms-acls.xml>>>
|
KMS ACLs configuration are defined in the KMS <<<etc/hadoop/kms-acls.xml>>>
|
||||||
configuration file. This file is hot-reloaded when it changes.
|
configuration file. This file is hot-reloaded when it changes.
|
||||||
|
|
||||||
KMS supports a fine grained access control via a set ACL
|
KMS supports both fine grained access control as well as blacklist for kms
|
||||||
configuration properties:
|
operations via a set ACL configuration properties.
|
||||||
|
|
||||||
|
A user accessing KMS is first checked for inclusion in the Access Control
|
||||||
|
List for the requested operation and then checked for exclusion in the
|
||||||
|
Black list for the operation before access is granted.
|
||||||
|
|
||||||
|
|
||||||
+---+
|
+---+
|
||||||
<property>
|
<property>
|
||||||
|
@ -288,6 +293,16 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.CREATE</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
Blacklist for create-key operations.
|
||||||
|
If the user does is in the Blacklist, the key material is not returned
|
||||||
|
as part of the response.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.kms.acl.DELETE</name>
|
<name>hadoop.kms.acl.DELETE</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
|
@ -296,6 +311,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.DELETE</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
Blacklist for delete-key operations.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.kms.acl.ROLLOVER</name>
|
<name>hadoop.kms.acl.ROLLOVER</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
|
@ -306,6 +329,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.ROLLOVER</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
Blacklist for rollover-key operations.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.kms.acl.GET</name>
|
<name>hadoop.kms.acl.GET</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
|
@ -314,6 +345,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.GET</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
ACL for get-key-version and get-current-key operations.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.kms.acl.GET_KEYS</name>
|
<name>hadoop.kms.acl.GET_KEYS</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
|
@ -322,6 +361,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.GET_KEYS</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
Blacklist for get-keys operation.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.kms.acl.GET_METADATA</name>
|
<name>hadoop.kms.acl.GET_METADATA</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
|
@ -330,6 +377,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.GET_METADATA</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
Blacklist for get-key-metadata and get-keys-metadata operations.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
|
<name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
|
@ -339,6 +394,15 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.SET_KEY_MATERIAL</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
Complimentary Blacklist for CREATE and ROLLOVER operation to allow the client
|
||||||
|
to provide the key material when creating or rolling a key.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.kms.acl.GENERATE_EEK</name>
|
<name>hadoop.kms.acl.GENERATE_EEK</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
|
@ -348,6 +412,15 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.GENERATE_EEK</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
Blacklist for generateEncryptedKey
|
||||||
|
CryptoExtension operations
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.kms.acl.DECRYPT_EEK</name>
|
<name>hadoop.kms.acl.DECRYPT_EEK</name>
|
||||||
<value>*</value>
|
<value>*</value>
|
||||||
|
@ -357,6 +430,17 @@ $ keytool -genkey -alias tomcat -keyalg RSA
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.blacklist.DECRYPT_EEK</name>
|
||||||
|
<value>hdfs,foo</value>
|
||||||
|
<description>
|
||||||
|
Blacklist for decrypt EncryptedKey
|
||||||
|
CryptoExtension operations
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
||||||
|
|
||||||
+---+
|
+---+
|
||||||
|
|
||||||
** KMS Delegation Token Configuration
|
** KMS Delegation Token Configuration
|
||||||
|
|
|
@ -268,6 +268,8 @@ public class TestKMS {
|
||||||
List<String> principals = new ArrayList<String>();
|
List<String> principals = new ArrayList<String>();
|
||||||
principals.add("HTTP/localhost");
|
principals.add("HTTP/localhost");
|
||||||
principals.add("client");
|
principals.add("client");
|
||||||
|
principals.add("hdfs");
|
||||||
|
principals.add("otheradmin");
|
||||||
principals.add("client/host");
|
principals.add("client/host");
|
||||||
principals.add("client1");
|
principals.add("client1");
|
||||||
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
||||||
|
@ -621,12 +623,12 @@ public class TestKMS {
|
||||||
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
|
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
|
||||||
|
|
||||||
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
||||||
conf.set(type.getConfigKey(), type.toString());
|
conf.set(type.getAclConfigKey(), type.toString());
|
||||||
}
|
}
|
||||||
conf.set(KMSACLs.Type.CREATE.getConfigKey(),
|
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
|
||||||
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
|
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
|
||||||
|
|
||||||
conf.set(KMSACLs.Type.ROLLOVER.getConfigKey(),
|
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
|
||||||
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
|
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
|
||||||
|
|
||||||
writeConf(testDir, conf);
|
writeConf(testDir, conf);
|
||||||
|
@ -884,7 +886,7 @@ public class TestKMS {
|
||||||
|
|
||||||
// test ACL reloading
|
// test ACL reloading
|
||||||
Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer
|
Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer
|
||||||
conf.set(KMSACLs.Type.CREATE.getConfigKey(), "foo");
|
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "foo");
|
||||||
writeConf(testDir, conf);
|
writeConf(testDir, conf);
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
|
|
||||||
|
@ -914,6 +916,92 @@ public class TestKMS {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testKMSBlackList() throws Exception {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set("hadoop.security.authentication", "kerberos");
|
||||||
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
File testDir = getTestDir();
|
||||||
|
conf = createBaseKMSConf(testDir);
|
||||||
|
conf.set("hadoop.kms.authentication.type", "kerberos");
|
||||||
|
conf.set("hadoop.kms.authentication.kerberos.keytab",
|
||||||
|
keytab.getAbsolutePath());
|
||||||
|
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
|
||||||
|
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
|
||||||
|
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
||||||
|
conf.set(type.getAclConfigKey(), " ");
|
||||||
|
}
|
||||||
|
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client,hdfs,otheradmin");
|
||||||
|
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
|
||||||
|
conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
|
||||||
|
conf.set(KMSACLs.Type.DECRYPT_EEK.getBlacklistConfigKey(), "hdfs,otheradmin");
|
||||||
|
|
||||||
|
writeConf(testDir, conf);
|
||||||
|
|
||||||
|
runServer(null, null, testDir, new KMSCallable() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
final Configuration conf = new Configuration();
|
||||||
|
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
|
||||||
|
final URI uri = createKMSUri(getKMSUrl());
|
||||||
|
|
||||||
|
doAs("client", new PrivilegedExceptionAction<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void run() throws Exception {
|
||||||
|
try {
|
||||||
|
KMSClientProvider kp = new KMSClientProvider(uri, conf);
|
||||||
|
KeyProvider.KeyVersion kv = kp.createKey("ck0",
|
||||||
|
new KeyProvider.Options(conf));
|
||||||
|
EncryptedKeyVersion eek =
|
||||||
|
kp.generateEncryptedKey("ck0");
|
||||||
|
kp.decryptEncryptedKey(eek);
|
||||||
|
Assert.assertNull(kv.getMaterial());
|
||||||
|
} catch (Exception ex) {
|
||||||
|
Assert.fail(ex.getMessage());
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
doAs("hdfs", new PrivilegedExceptionAction<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void run() throws Exception {
|
||||||
|
try {
|
||||||
|
KMSClientProvider kp = new KMSClientProvider(uri, conf);
|
||||||
|
KeyProvider.KeyVersion kv = kp.createKey("ck1",
|
||||||
|
new KeyProvider.Options(conf));
|
||||||
|
EncryptedKeyVersion eek =
|
||||||
|
kp.generateEncryptedKey("ck1");
|
||||||
|
kp.decryptEncryptedKey(eek);
|
||||||
|
Assert.fail("admin user must not be allowed to decrypt !!");
|
||||||
|
} catch (Exception ex) {
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
doAs("otheradmin", new PrivilegedExceptionAction<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void run() throws Exception {
|
||||||
|
try {
|
||||||
|
KMSClientProvider kp = new KMSClientProvider(uri, conf);
|
||||||
|
KeyProvider.KeyVersion kv = kp.createKey("ck2",
|
||||||
|
new KeyProvider.Options(conf));
|
||||||
|
EncryptedKeyVersion eek =
|
||||||
|
kp.generateEncryptedKey("ck2");
|
||||||
|
kp.decryptEncryptedKey(eek);
|
||||||
|
Assert.fail("admin user must not be allowed to decrypt !!");
|
||||||
|
} catch (Exception ex) {
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testServicePrincipalACLs() throws Exception {
|
public void testServicePrincipalACLs() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
|
@ -927,9 +1015,9 @@ public class TestKMS {
|
||||||
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
|
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
|
||||||
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
|
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
|
||||||
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
||||||
conf.set(type.getConfigKey(), " ");
|
conf.set(type.getAclConfigKey(), " ");
|
||||||
}
|
}
|
||||||
conf.set(KMSACLs.Type.CREATE.getConfigKey(), "client");
|
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client");
|
||||||
|
|
||||||
writeConf(testDir, conf);
|
writeConf(testDir, conf);
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ public class TestKMSACLs {
|
||||||
public void testCustom() {
|
public void testCustom() {
|
||||||
Configuration conf = new Configuration(false);
|
Configuration conf = new Configuration(false);
|
||||||
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
||||||
conf.set(type.getConfigKey(), type.toString() + " ");
|
conf.set(type.getAclConfigKey(), type.toString() + " ");
|
||||||
}
|
}
|
||||||
KMSACLs acls = new KMSACLs(conf);
|
KMSACLs acls = new KMSACLs(conf);
|
||||||
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
for (KMSACLs.Type type : KMSACLs.Type.values()) {
|
||||||
|
|
|
@ -53,9 +53,19 @@ public class Nfs3FileAttributes {
|
||||||
* For Hadoop, currently this field is always zero.
|
* For Hadoop, currently this field is always zero.
|
||||||
*/
|
*/
|
||||||
public static class Specdata3 {
|
public static class Specdata3 {
|
||||||
final static int specdata1 = 0;
|
final int specdata1;
|
||||||
final static int specdata2 = 0;
|
final int specdata2;
|
||||||
|
|
||||||
|
public Specdata3() {
|
||||||
|
specdata1 = 0;
|
||||||
|
specdata2 = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Specdata3(int specdata1, int specdata2) {
|
||||||
|
this.specdata1 = specdata1;
|
||||||
|
this.specdata2 = specdata2;
|
||||||
|
}
|
||||||
|
|
||||||
public int getSpecdata1() {
|
public int getSpecdata1() {
|
||||||
return specdata1;
|
return specdata1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ACCESS3 Request
|
* ACCESS3 Request
|
||||||
*/
|
*/
|
||||||
public class ACCESS3Request extends RequestWithHandle {
|
public class ACCESS3Request extends RequestWithHandle {
|
||||||
public ACCESS3Request(XDR xdr) throws IOException {
|
public static ACCESS3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
|
return new ACCESS3Request(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
public ACCESS3Request(FileHandle handle) {
|
||||||
|
super(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -28,10 +29,17 @@ public class COMMIT3Request extends RequestWithHandle {
|
||||||
private final long offset;
|
private final long offset;
|
||||||
private final int count;
|
private final int count;
|
||||||
|
|
||||||
public COMMIT3Request(XDR xdr) throws IOException {
|
public static COMMIT3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
offset = xdr.readHyper();
|
long offset = xdr.readHyper();
|
||||||
count = xdr.readInt();
|
int count = xdr.readInt();
|
||||||
|
return new COMMIT3Request(handle, offset, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
public COMMIT3Request(FileHandle handle, long offset, int count) {
|
||||||
|
super(handle);
|
||||||
|
this.offset = offset;
|
||||||
|
this.count = count;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getOffset() {
|
public long getOffset() {
|
||||||
|
@ -41,4 +49,11 @@ public class COMMIT3Request extends RequestWithHandle {
|
||||||
public int getCount() {
|
public int getCount() {
|
||||||
return this.count;
|
return this.count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
xdr.writeLongAsHyper(offset);
|
||||||
|
xdr.writeInt(count);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -29,8 +29,8 @@ import org.apache.hadoop.oncrpc.XDR;
|
||||||
public class CREATE3Request extends RequestWithHandle {
|
public class CREATE3Request extends RequestWithHandle {
|
||||||
private final String name;
|
private final String name;
|
||||||
private final int mode;
|
private final int mode;
|
||||||
private SetAttr3 objAttr = null;
|
private final SetAttr3 objAttr;
|
||||||
private long verf;
|
private long verf = 0;
|
||||||
|
|
||||||
public CREATE3Request(FileHandle handle, String name, int mode,
|
public CREATE3Request(FileHandle handle, String name, int mode,
|
||||||
SetAttr3 objAttr, long verf) {
|
SetAttr3 objAttr, long verf) {
|
||||||
|
@ -41,12 +41,12 @@ public class CREATE3Request extends RequestWithHandle {
|
||||||
this.verf = verf;
|
this.verf = verf;
|
||||||
}
|
}
|
||||||
|
|
||||||
public CREATE3Request(XDR xdr) throws IOException {
|
public static CREATE3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
name = xdr.readString();
|
String name = xdr.readString();
|
||||||
mode = xdr.readInt();
|
int mode = xdr.readInt();
|
||||||
|
SetAttr3 objAttr = new SetAttr3();
|
||||||
objAttr = new SetAttr3();
|
long verf = 0;
|
||||||
if ((mode == Nfs3Constant.CREATE_UNCHECKED)
|
if ((mode == Nfs3Constant.CREATE_UNCHECKED)
|
||||||
|| (mode == Nfs3Constant.CREATE_GUARDED)) {
|
|| (mode == Nfs3Constant.CREATE_GUARDED)) {
|
||||||
objAttr.deserialize(xdr);
|
objAttr.deserialize(xdr);
|
||||||
|
@ -55,6 +55,7 @@ public class CREATE3Request extends RequestWithHandle {
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("Wrong create mode:" + mode);
|
throw new IOException("Wrong create mode:" + mode);
|
||||||
}
|
}
|
||||||
|
return new CREATE3Request(handle, name, mode, objAttr, verf);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getName() {
|
public String getName() {
|
||||||
|
@ -81,4 +82,5 @@ public class CREATE3Request extends RequestWithHandle {
|
||||||
xdr.writeInt(mode);
|
xdr.writeInt(mode);
|
||||||
objAttr.serialize(xdr);
|
objAttr.serialize(xdr);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* FSINFO3 Request
|
* FSINFO3 Request
|
||||||
*/
|
*/
|
||||||
public class FSINFO3Request extends RequestWithHandle {
|
public class FSINFO3Request extends RequestWithHandle {
|
||||||
public FSINFO3Request(XDR xdr) throws IOException {
|
public static FSINFO3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
|
return new FSINFO3Request(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
public FSINFO3Request(FileHandle handle) {
|
||||||
|
super(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* FSSTAT3 Request
|
* FSSTAT3 Request
|
||||||
*/
|
*/
|
||||||
public class FSSTAT3Request extends RequestWithHandle {
|
public class FSSTAT3Request extends RequestWithHandle {
|
||||||
public FSSTAT3Request(XDR xdr) throws IOException {
|
public static FSSTAT3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
|
return new FSSTAT3Request(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
public FSSTAT3Request(FileHandle handle) {
|
||||||
|
super(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* GETATTR3 Request
|
* GETATTR3 Request
|
||||||
*/
|
*/
|
||||||
public class GETATTR3Request extends RequestWithHandle {
|
public class GETATTR3Request extends RequestWithHandle {
|
||||||
public GETATTR3Request(XDR xdr) throws IOException {
|
public static GETATTR3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
|
return new GETATTR3Request(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
public GETATTR3Request(FileHandle handle) {
|
||||||
|
super(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* LINK3 Request
|
||||||
|
*/
|
||||||
|
public class LINK3Request extends RequestWithHandle {
|
||||||
|
private final FileHandle fromDirHandle;
|
||||||
|
private final String fromName;
|
||||||
|
|
||||||
|
public LINK3Request(FileHandle handle, FileHandle fromDirHandle,
|
||||||
|
String fromName) {
|
||||||
|
super(handle);
|
||||||
|
this.fromDirHandle = fromDirHandle;
|
||||||
|
this.fromName = fromName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LINK3Request deserialize(XDR xdr) throws IOException {
|
||||||
|
FileHandle handle = readHandle(xdr);
|
||||||
|
FileHandle fromDirHandle = readHandle(xdr);
|
||||||
|
String fromName = xdr.readString();
|
||||||
|
return new LINK3Request(handle, fromDirHandle, fromName);
|
||||||
|
}
|
||||||
|
|
||||||
|
public FileHandle getFromDirHandle() {
|
||||||
|
return fromDirHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getFromName() {
|
||||||
|
return fromName;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
fromDirHandle.serialize(xdr);
|
||||||
|
xdr.writeInt(fromName.length());
|
||||||
|
xdr.writeFixedOpaque(fromName.getBytes(), fromName.length());
|
||||||
|
}
|
||||||
|
}
|
|
@ -35,9 +35,10 @@ public class LOOKUP3Request extends RequestWithHandle {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
|
||||||
public LOOKUP3Request(XDR xdr) throws IOException {
|
public static LOOKUP3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
name = xdr.readString();
|
String name = xdr.readString();
|
||||||
|
return new LOOKUP3Request(handle, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getName() {
|
public String getName() {
|
||||||
|
@ -51,7 +52,7 @@ public class LOOKUP3Request extends RequestWithHandle {
|
||||||
@Override
|
@Override
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public void serialize(XDR xdr) {
|
public void serialize(XDR xdr) {
|
||||||
super.serialize(xdr);
|
handle.serialize(xdr);
|
||||||
xdr.writeInt(name.getBytes().length);
|
xdr.writeInt(name.getBytes().length);
|
||||||
xdr.writeFixedOpaque(name.getBytes());
|
xdr.writeFixedOpaque(name.getBytes());
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -28,13 +29,20 @@ public class MKDIR3Request extends RequestWithHandle {
|
||||||
private final String name;
|
private final String name;
|
||||||
private final SetAttr3 objAttr;
|
private final SetAttr3 objAttr;
|
||||||
|
|
||||||
public MKDIR3Request(XDR xdr) throws IOException {
|
public static MKDIR3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
name = xdr.readString();
|
String name = xdr.readString();
|
||||||
objAttr = new SetAttr3();
|
SetAttr3 objAttr = new SetAttr3();
|
||||||
objAttr.deserialize(xdr);
|
objAttr.deserialize(xdr);
|
||||||
|
return new MKDIR3Request(handle, name, objAttr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public MKDIR3Request(FileHandle handle, String name, SetAttr3 objAttr) {
|
||||||
|
super(handle);
|
||||||
|
this.name = name;
|
||||||
|
this.objAttr = objAttr;
|
||||||
|
}
|
||||||
|
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
@ -42,4 +50,12 @@ public class MKDIR3Request extends RequestWithHandle {
|
||||||
public SetAttr3 getObjAttr() {
|
public SetAttr3 getObjAttr() {
|
||||||
return objAttr;
|
return objAttr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
xdr.writeInt(name.getBytes().length);
|
||||||
|
xdr.writeFixedOpaque(name.getBytes());
|
||||||
|
objAttr.serialize(xdr);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,89 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.NfsFileType;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes.Specdata3;
|
||||||
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* MKNOD3 Request
|
||||||
|
*/
|
||||||
|
public class MKNOD3Request extends RequestWithHandle {
|
||||||
|
private final String name;
|
||||||
|
private int type;
|
||||||
|
private SetAttr3 objAttr = null;
|
||||||
|
private Specdata3 spec = null;
|
||||||
|
|
||||||
|
public MKNOD3Request(FileHandle handle, String name, int type,
|
||||||
|
SetAttr3 objAttr, Specdata3 spec) {
|
||||||
|
super(handle);
|
||||||
|
this.name = name;
|
||||||
|
this.type = type;
|
||||||
|
this.objAttr = objAttr;
|
||||||
|
this.spec = spec;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static MKNOD3Request deserialize(XDR xdr) throws IOException {
|
||||||
|
FileHandle handle = readHandle(xdr);
|
||||||
|
String name = xdr.readString();
|
||||||
|
int type = xdr.readInt();
|
||||||
|
SetAttr3 objAttr = new SetAttr3();
|
||||||
|
Specdata3 spec = null;
|
||||||
|
if (type == NfsFileType.NFSCHR.toValue()
|
||||||
|
|| type == NfsFileType.NFSBLK.toValue()) {
|
||||||
|
objAttr.deserialize(xdr);
|
||||||
|
spec = new Specdata3(xdr.readInt(), xdr.readInt());
|
||||||
|
} else if (type == NfsFileType.NFSSOCK.toValue()
|
||||||
|
|| type == NfsFileType.NFSFIFO.toValue()) {
|
||||||
|
objAttr.deserialize(xdr);
|
||||||
|
}
|
||||||
|
return new MKNOD3Request(handle, name, type, objAttr, spec);
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public SetAttr3 getObjAttr() {
|
||||||
|
return objAttr;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Specdata3 getSpec() {
|
||||||
|
return spec;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
xdr.writeInt(name.length());
|
||||||
|
xdr.writeFixedOpaque(name.getBytes(), name.length());
|
||||||
|
objAttr.serialize(xdr);
|
||||||
|
if (spec != null) {
|
||||||
|
xdr.writeInt(spec.getSpecdata1());
|
||||||
|
xdr.writeInt(spec.getSpecdata2());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An NFS request that uses {@link FileHandle} to identify a file.
|
||||||
|
*/
|
||||||
|
public abstract class NFS3Request {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deserialize a handle from an XDR object
|
||||||
|
*/
|
||||||
|
static FileHandle readHandle(XDR xdr) throws IOException {
|
||||||
|
FileHandle handle = new FileHandle();
|
||||||
|
if (!handle.deserialize(xdr)) {
|
||||||
|
throw new IOException("can't deserialize file handle");
|
||||||
|
}
|
||||||
|
return handle;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Subclass should implement. Usually handle is the first to be serialized
|
||||||
|
*/
|
||||||
|
public abstract void serialize(XDR xdr);
|
||||||
|
}
|
|
@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* PATHCONF3 Request
|
* PATHCONF3 Request
|
||||||
*/
|
*/
|
||||||
public class PATHCONF3Request extends RequestWithHandle {
|
public class PATHCONF3Request extends RequestWithHandle {
|
||||||
public PATHCONF3Request(XDR xdr) throws IOException {
|
public static PATHCONF3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
|
return new PATHCONF3Request(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
public PATHCONF3Request(FileHandle handle) {
|
||||||
|
super(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,10 +31,11 @@ public class READ3Request extends RequestWithHandle {
|
||||||
private final long offset;
|
private final long offset;
|
||||||
private final int count;
|
private final int count;
|
||||||
|
|
||||||
public READ3Request(XDR xdr) throws IOException {
|
public static READ3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
offset = xdr.readHyper();
|
long offset = xdr.readHyper();
|
||||||
count = xdr.readInt();
|
int count = xdr.readInt();
|
||||||
|
return new READ3Request(handle, offset, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -29,13 +30,22 @@ public class READDIR3Request extends RequestWithHandle {
|
||||||
private final long cookieVerf;
|
private final long cookieVerf;
|
||||||
private final int count;
|
private final int count;
|
||||||
|
|
||||||
public READDIR3Request(XDR xdr) throws IOException {
|
public static READDIR3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
cookie = xdr.readHyper();
|
long cookie = xdr.readHyper();
|
||||||
cookieVerf = xdr.readHyper();
|
long cookieVerf = xdr.readHyper();
|
||||||
count = xdr.readInt();
|
int count = xdr.readInt();
|
||||||
|
return new READDIR3Request(handle, cookie, cookieVerf, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public READDIR3Request(FileHandle handle, long cookie, long cookieVerf,
|
||||||
|
int count) {
|
||||||
|
super(handle);
|
||||||
|
this.cookie = cookie;
|
||||||
|
this.cookieVerf = cookieVerf;
|
||||||
|
this.count = count;
|
||||||
|
}
|
||||||
|
|
||||||
public long getCookie() {
|
public long getCookie() {
|
||||||
return this.cookie;
|
return this.cookie;
|
||||||
}
|
}
|
||||||
|
@ -47,4 +57,12 @@ public class READDIR3Request extends RequestWithHandle {
|
||||||
public long getCount() {
|
public long getCount() {
|
||||||
return this.count;
|
return this.count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
xdr.writeLongAsHyper(cookie);
|
||||||
|
xdr.writeLongAsHyper(cookieVerf);
|
||||||
|
xdr.writeInt(count);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -30,14 +31,25 @@ public class READDIRPLUS3Request extends RequestWithHandle {
|
||||||
private final int dirCount;
|
private final int dirCount;
|
||||||
private final int maxCount;
|
private final int maxCount;
|
||||||
|
|
||||||
public READDIRPLUS3Request(XDR xdr) throws IOException {
|
public static READDIRPLUS3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
cookie = xdr.readHyper();
|
long cookie = xdr.readHyper();
|
||||||
cookieVerf = xdr.readHyper();
|
long cookieVerf = xdr.readHyper();
|
||||||
dirCount = xdr.readInt();
|
int dirCount = xdr.readInt();
|
||||||
maxCount = xdr.readInt();
|
int maxCount = xdr.readInt();
|
||||||
|
return new READDIRPLUS3Request(handle, cookie, cookieVerf, dirCount,
|
||||||
|
maxCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public READDIRPLUS3Request(FileHandle handle, long cookie, long cookieVerf,
|
||||||
|
int dirCount, int maxCount) {
|
||||||
|
super(handle);
|
||||||
|
this.cookie = cookie;
|
||||||
|
this.cookieVerf = cookieVerf;
|
||||||
|
this.dirCount = dirCount;
|
||||||
|
this.maxCount = maxCount;
|
||||||
|
}
|
||||||
|
|
||||||
public long getCookie() {
|
public long getCookie() {
|
||||||
return this.cookie;
|
return this.cookie;
|
||||||
}
|
}
|
||||||
|
@ -53,4 +65,13 @@ public class READDIRPLUS3Request extends RequestWithHandle {
|
||||||
public int getMaxCount() {
|
public int getMaxCount() {
|
||||||
return maxCount;
|
return maxCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
xdr.writeLongAsHyper(cookie);
|
||||||
|
xdr.writeLongAsHyper(cookieVerf);
|
||||||
|
xdr.writeInt(dirCount);
|
||||||
|
xdr.writeInt(maxCount);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -26,7 +27,17 @@ import org.apache.hadoop.oncrpc.XDR;
|
||||||
*/
|
*/
|
||||||
public class READLINK3Request extends RequestWithHandle {
|
public class READLINK3Request extends RequestWithHandle {
|
||||||
|
|
||||||
public READLINK3Request(XDR xdr) throws IOException {
|
public static READLINK3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
|
return new READLINK3Request(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
public READLINK3Request(FileHandle handle) {
|
||||||
|
super(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -27,12 +28,25 @@ import org.apache.hadoop.oncrpc.XDR;
|
||||||
public class REMOVE3Request extends RequestWithHandle {
|
public class REMOVE3Request extends RequestWithHandle {
|
||||||
private final String name;
|
private final String name;
|
||||||
|
|
||||||
public REMOVE3Request(XDR xdr) throws IOException {
|
public static REMOVE3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
name = xdr.readString();
|
String name = xdr.readString();
|
||||||
|
return new REMOVE3Request(handle, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public REMOVE3Request(FileHandle handle, String name) {
|
||||||
|
super(handle);
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return this.name;
|
return this.name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
xdr.writeInt(name.getBytes().length);
|
||||||
|
xdr.writeFixedOpaque(name.getBytes());
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -25,23 +25,26 @@ import org.apache.hadoop.oncrpc.XDR;
|
||||||
/**
|
/**
|
||||||
* RENAME3 Request
|
* RENAME3 Request
|
||||||
*/
|
*/
|
||||||
public class RENAME3Request {
|
public class RENAME3Request extends NFS3Request {
|
||||||
private final FileHandle fromDirHandle;
|
private final FileHandle fromDirHandle;
|
||||||
private final String fromName;
|
private final String fromName;
|
||||||
private final FileHandle toDirHandle;
|
private final FileHandle toDirHandle;
|
||||||
private final String toName;
|
private final String toName;
|
||||||
|
|
||||||
public RENAME3Request(XDR xdr) throws IOException {
|
public static RENAME3Request deserialize(XDR xdr) throws IOException {
|
||||||
fromDirHandle = new FileHandle();
|
FileHandle fromDirHandle = readHandle(xdr);
|
||||||
if (!fromDirHandle.deserialize(xdr)) {
|
String fromName = xdr.readString();
|
||||||
throw new IOException("can't deserialize file handle");
|
FileHandle toDirHandle = readHandle(xdr);
|
||||||
}
|
String toName = xdr.readString();
|
||||||
fromName = xdr.readString();
|
return new RENAME3Request(fromDirHandle, fromName, toDirHandle, toName);
|
||||||
toDirHandle = new FileHandle();
|
}
|
||||||
if (!toDirHandle.deserialize(xdr)) {
|
|
||||||
throw new IOException("can't deserialize file handle");
|
public RENAME3Request(FileHandle fromDirHandle, String fromName,
|
||||||
}
|
FileHandle toDirHandle, String toName) {
|
||||||
toName = xdr.readString();
|
this.fromDirHandle = fromDirHandle;
|
||||||
|
this.fromName = fromName;
|
||||||
|
this.toDirHandle = toDirHandle;
|
||||||
|
this.toName = toName;
|
||||||
}
|
}
|
||||||
|
|
||||||
public FileHandle getFromDirHandle() {
|
public FileHandle getFromDirHandle() {
|
||||||
|
@ -59,4 +62,14 @@ public class RENAME3Request {
|
||||||
public String getToName() {
|
public String getToName() {
|
||||||
return toName;
|
return toName;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
fromDirHandle.serialize(xdr);
|
||||||
|
xdr.writeInt(fromName.getBytes().length);
|
||||||
|
xdr.writeFixedOpaque(fromName.getBytes());
|
||||||
|
toDirHandle.serialize(xdr);
|
||||||
|
xdr.writeInt(toName.getBytes().length);
|
||||||
|
xdr.writeFixedOpaque(toName.getBytes());
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -27,12 +28,25 @@ import org.apache.hadoop.oncrpc.XDR;
|
||||||
public class RMDIR3Request extends RequestWithHandle {
|
public class RMDIR3Request extends RequestWithHandle {
|
||||||
private final String name;
|
private final String name;
|
||||||
|
|
||||||
public RMDIR3Request(XDR xdr) throws IOException {
|
public static RMDIR3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
name = xdr.readString();
|
String name = xdr.readString();
|
||||||
|
return new RMDIR3Request(handle, name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public RMDIR3Request(FileHandle handle, String name) {
|
||||||
|
super(handle);
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return this.name;
|
return this.name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
xdr.writeInt(name.getBytes().length);
|
||||||
|
xdr.writeFixedOpaque(name.getBytes());
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -17,33 +17,19 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.nfs.nfs3.request;
|
package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An NFS request that uses {@link FileHandle} to identify a file.
|
* An NFS request that uses {@link FileHandle} to identify a file.
|
||||||
*/
|
*/
|
||||||
public class RequestWithHandle {
|
public abstract class RequestWithHandle extends NFS3Request {
|
||||||
protected final FileHandle handle;
|
protected final FileHandle handle;
|
||||||
|
|
||||||
RequestWithHandle(FileHandle handle) {
|
RequestWithHandle(FileHandle handle) {
|
||||||
this.handle = handle;
|
this.handle = handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
RequestWithHandle(XDR xdr) throws IOException {
|
|
||||||
handle = new FileHandle();
|
|
||||||
if (!handle.deserialize(xdr)) {
|
|
||||||
throw new IOException("can't deserialize file handle");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public FileHandle getHandle() {
|
public FileHandle getHandle() {
|
||||||
return this.handle;
|
return this.handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void serialize(XDR xdr) {
|
|
||||||
handle.serialize(xdr);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.nfs.NfsTime;
|
import org.apache.hadoop.nfs.NfsTime;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -38,16 +39,26 @@ public class SETATTR3Request extends RequestWithHandle {
|
||||||
private final boolean check;
|
private final boolean check;
|
||||||
private final NfsTime ctime;
|
private final NfsTime ctime;
|
||||||
|
|
||||||
public SETATTR3Request(XDR xdr) throws IOException {
|
public static SETATTR3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
attr = new SetAttr3();
|
SetAttr3 attr = new SetAttr3();
|
||||||
attr.deserialize(xdr);
|
attr.deserialize(xdr);
|
||||||
check = xdr.readBoolean();
|
boolean check = xdr.readBoolean();
|
||||||
|
NfsTime ctime;
|
||||||
if (check) {
|
if (check) {
|
||||||
ctime = NfsTime.deserialize(xdr);
|
ctime = NfsTime.deserialize(xdr);
|
||||||
} else {
|
} else {
|
||||||
ctime = null;
|
ctime = null;
|
||||||
}
|
}
|
||||||
|
return new SETATTR3Request(handle, attr, check, ctime);
|
||||||
|
}
|
||||||
|
|
||||||
|
public SETATTR3Request(FileHandle handle, SetAttr3 attr, boolean check,
|
||||||
|
NfsTime ctime) {
|
||||||
|
super(handle);
|
||||||
|
this.attr = attr;
|
||||||
|
this.check = check;
|
||||||
|
this.ctime = ctime;
|
||||||
}
|
}
|
||||||
|
|
||||||
public SetAttr3 getAttr() {
|
public SetAttr3 getAttr() {
|
||||||
|
@ -61,4 +72,14 @@ public class SETATTR3Request extends RequestWithHandle {
|
||||||
public NfsTime getCtime() {
|
public NfsTime getCtime() {
|
||||||
return ctime;
|
return ctime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
attr.serialize(xdr);
|
||||||
|
xdr.writeBoolean(check);
|
||||||
|
if (check) {
|
||||||
|
ctime.serialize(xdr);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -29,14 +30,23 @@ public class SYMLINK3Request extends RequestWithHandle {
|
||||||
private final SetAttr3 symAttr;
|
private final SetAttr3 symAttr;
|
||||||
private final String symData; // It contains the target
|
private final String symData; // It contains the target
|
||||||
|
|
||||||
public SYMLINK3Request(XDR xdr) throws IOException {
|
public static SYMLINK3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
name = xdr.readString();
|
String name = xdr.readString();
|
||||||
symAttr = new SetAttr3();
|
SetAttr3 symAttr = new SetAttr3();
|
||||||
symAttr.deserialize(xdr);
|
symAttr.deserialize(xdr);
|
||||||
symData = xdr.readString();
|
String symData = xdr.readString();
|
||||||
|
return new SYMLINK3Request(handle, name, symAttr, symData);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public SYMLINK3Request(FileHandle handle, String name, SetAttr3 symAttr,
|
||||||
|
String symData) {
|
||||||
|
super(handle);
|
||||||
|
this.name = name;
|
||||||
|
this.symAttr = symAttr;
|
||||||
|
this.symData = symData;
|
||||||
|
}
|
||||||
|
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
@ -48,4 +58,14 @@ public class SYMLINK3Request extends RequestWithHandle {
|
||||||
public String getSymData() {
|
public String getSymData() {
|
||||||
return symData;
|
return symData;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void serialize(XDR xdr) {
|
||||||
|
handle.serialize(xdr);
|
||||||
|
xdr.writeInt(name.getBytes().length);
|
||||||
|
xdr.writeFixedOpaque(name.getBytes());
|
||||||
|
symAttr.serialize(xdr);
|
||||||
|
xdr.writeInt(symData.getBytes().length);
|
||||||
|
xdr.writeFixedOpaque(symData.getBytes());
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -52,6 +52,15 @@ public class SetAttr3 {
|
||||||
size = 0;
|
size = 0;
|
||||||
updateFields = EnumSet.noneOf(SetAttrField.class);
|
updateFields = EnumSet.noneOf(SetAttrField.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public SetAttr3(int mode, int uid, int gid, long size, NfsTime atime,
|
||||||
|
NfsTime mtime, EnumSet<SetAttrField> updateFields) {
|
||||||
|
this.mode = mode;
|
||||||
|
this.uid = uid;
|
||||||
|
this.gid = gid;
|
||||||
|
this.size = size;
|
||||||
|
this.updateFields = updateFields;
|
||||||
|
}
|
||||||
|
|
||||||
public int getMode() {
|
public int getMode() {
|
||||||
return mode;
|
return mode;
|
||||||
|
|
|
@ -33,12 +33,13 @@ public class WRITE3Request extends RequestWithHandle {
|
||||||
private final WriteStableHow stableHow;
|
private final WriteStableHow stableHow;
|
||||||
private final ByteBuffer data;
|
private final ByteBuffer data;
|
||||||
|
|
||||||
public WRITE3Request(XDR xdr) throws IOException {
|
public static WRITE3Request deserialize(XDR xdr) throws IOException {
|
||||||
super(xdr);
|
FileHandle handle = readHandle(xdr);
|
||||||
offset = xdr.readHyper();
|
long offset = xdr.readHyper();
|
||||||
count = xdr.readInt();
|
int count = xdr.readInt();
|
||||||
stableHow = WriteStableHow.fromValue(xdr.readInt());
|
WriteStableHow stableHow = WriteStableHow.fromValue(xdr.readInt());
|
||||||
data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt()));
|
ByteBuffer data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt()));
|
||||||
|
return new WRITE3Request(handle, offset, count, stableHow, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
public WRITE3Request(FileHandle handle, final long offset, final int count,
|
public WRITE3Request(FileHandle handle, final long offset, final int count,
|
||||||
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.nfs.nfs3.response;
|
||||||
|
|
||||||
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||||
|
|
||||||
|
public class LINK3Response extends NFS3Response {
|
||||||
|
private final WccData fromDirWcc;
|
||||||
|
private final WccData linkDirWcc;
|
||||||
|
|
||||||
|
public LINK3Response(int status) {
|
||||||
|
this(status, new WccData(null, null), new WccData(null, null));
|
||||||
|
}
|
||||||
|
|
||||||
|
public LINK3Response(int status, WccData fromDirWcc,
|
||||||
|
WccData linkDirWcc) {
|
||||||
|
super(status);
|
||||||
|
this.fromDirWcc = fromDirWcc;
|
||||||
|
this.linkDirWcc = linkDirWcc;
|
||||||
|
}
|
||||||
|
|
||||||
|
public WccData getFromDirWcc() {
|
||||||
|
return fromDirWcc;
|
||||||
|
}
|
||||||
|
|
||||||
|
public WccData getLinkDirWcc() {
|
||||||
|
return linkDirWcc;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||||
|
super.writeHeaderAndResponse(out, xid, verifier);
|
||||||
|
fromDirWcc.serialize(out);
|
||||||
|
linkDirWcc.serialize(out);
|
||||||
|
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,68 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.nfs.nfs3.response;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||||
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
import org.apache.hadoop.oncrpc.security.Verifier;
|
||||||
|
|
||||||
|
public class MKNOD3Response extends NFS3Response {
|
||||||
|
private final FileHandle objFileHandle;
|
||||||
|
private final Nfs3FileAttributes objPostOpAttr;
|
||||||
|
private final WccData dirWcc;
|
||||||
|
|
||||||
|
public MKNOD3Response(int status) {
|
||||||
|
this(status, null, null, new WccData(null, null));
|
||||||
|
}
|
||||||
|
|
||||||
|
public MKNOD3Response(int status, FileHandle handle,
|
||||||
|
Nfs3FileAttributes attrs, WccData dirWcc) {
|
||||||
|
super(status);
|
||||||
|
this.objFileHandle = handle;
|
||||||
|
this.objPostOpAttr = attrs;
|
||||||
|
this.dirWcc = dirWcc;
|
||||||
|
}
|
||||||
|
|
||||||
|
public FileHandle getObjFileHandle() {
|
||||||
|
return objFileHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Nfs3FileAttributes getObjPostOpAttr() {
|
||||||
|
return objPostOpAttr;
|
||||||
|
}
|
||||||
|
|
||||||
|
public WccData getDirWcc() {
|
||||||
|
return dirWcc;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
|
||||||
|
super.writeHeaderAndResponse(out, xid, verifier);
|
||||||
|
if (this.getStatus() == Nfs3Status.NFS3_OK) {
|
||||||
|
out.writeBoolean(true);
|
||||||
|
objFileHandle.serialize(out);
|
||||||
|
out.writeBoolean(true);
|
||||||
|
objPostOpAttr.serialize(out);
|
||||||
|
}
|
||||||
|
dirWcc.serialize(out);
|
||||||
|
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
}
|
|
@ -114,6 +114,9 @@
|
||||||
run rm -rf hadoop-${project.version}
|
run rm -rf hadoop-${project.version}
|
||||||
run mkdir hadoop-${project.version}
|
run mkdir hadoop-${project.version}
|
||||||
run cd hadoop-${project.version}
|
run cd hadoop-${project.version}
|
||||||
|
run cp $ROOT/LICENSE.txt .
|
||||||
|
run cp $ROOT/NOTICE.txt .
|
||||||
|
run cp $ROOT/README.txt .
|
||||||
run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
|
run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
|
||||||
run cp -r $ROOT/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${project.version}/* .
|
run cp -r $ROOT/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${project.version}/* .
|
||||||
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
|
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
|
||||||
|
|
|
@ -34,7 +34,6 @@
|
||||||
<description>Apache Hadoop HttpFS</description>
|
<description>Apache Hadoop HttpFS</description>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<tomcat.version>6.0.36</tomcat.version>
|
|
||||||
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
|
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
|
||||||
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
|
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
|
||||||
<httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>
|
<httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>
|
||||||
|
|
|
@ -421,7 +421,7 @@ class OpenFileCtx {
|
||||||
if (existantWriteCtx != null) {
|
if (existantWriteCtx != null) {
|
||||||
if (!existantWriteCtx.getReplied()) {
|
if (!existantWriteCtx.getReplied()) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Repeated write request which hasn't be served: xid="
|
LOG.debug("Repeated write request which hasn't been served: xid="
|
||||||
+ xid + ", drop it.");
|
+ xid + ", drop it.");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -579,7 +579,7 @@ class OpenFileCtx {
|
||||||
* writing, and there is no other threads writing (i.e., asyncStatus is
|
* writing, and there is no other threads writing (i.e., asyncStatus is
|
||||||
* false), start the writing and set asyncStatus to true.
|
* false), start the writing and set asyncStatus to true.
|
||||||
*
|
*
|
||||||
* @return True if the new write is sequencial and we can start writing
|
* @return True if the new write is sequential and we can start writing
|
||||||
* (including the case that there is already a thread writing).
|
* (including the case that there is already a thread writing).
|
||||||
*/
|
*/
|
||||||
private synchronized boolean checkAndStartWrite(
|
private synchronized boolean checkAndStartWrite(
|
||||||
|
@ -898,7 +898,7 @@ class OpenFileCtx {
|
||||||
long offset = nextOffset.get();
|
long offset = nextOffset.get();
|
||||||
if (range.getMin() > offset) {
|
if (range.getMin() > offset) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("The next sequencial write has not arrived yet");
|
LOG.debug("The next sequential write has not arrived yet");
|
||||||
}
|
}
|
||||||
processCommits(nextOffset.get()); // handle race
|
processCommits(nextOffset.get()); // handle race
|
||||||
this.asyncStatus = false;
|
this.asyncStatus = false;
|
||||||
|
|
|
@ -268,7 +268,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
GETATTR3Request request = null;
|
GETATTR3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new GETATTR3Request(xdr);
|
request = GETATTR3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid GETATTR request");
|
LOG.error("Invalid GETATTR request");
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
|
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -360,7 +360,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
SETATTR3Request request = null;
|
SETATTR3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new SETATTR3Request(xdr);
|
request = SETATTR3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid SETATTR request");
|
LOG.error("Invalid SETATTR request");
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
|
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -445,7 +445,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
LOOKUP3Request request = null;
|
LOOKUP3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new LOOKUP3Request(xdr);
|
request = LOOKUP3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid LOOKUP request");
|
LOG.error("Invalid LOOKUP request");
|
||||||
return new LOOKUP3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new LOOKUP3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -513,7 +513,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
ACCESS3Request request = null;
|
ACCESS3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new ACCESS3Request(xdr);
|
request = ACCESS3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid ACCESS request");
|
LOG.error("Invalid ACCESS request");
|
||||||
return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -581,7 +581,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
READLINK3Request request = null;
|
READLINK3Request request = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
request = new READLINK3Request(xdr);
|
request = READLINK3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid READLINK request");
|
LOG.error("Invalid READLINK request");
|
||||||
return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -655,7 +655,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
READ3Request request = null;
|
READ3Request request = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
request = new READ3Request(xdr);
|
request = READ3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid READ request");
|
LOG.error("Invalid READ request");
|
||||||
return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -788,7 +788,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
WRITE3Request request = null;
|
WRITE3Request request = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
request = new WRITE3Request(xdr);
|
request = WRITE3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid WRITE request");
|
LOG.error("Invalid WRITE request");
|
||||||
return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -870,7 +870,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
CREATE3Request request = null;
|
CREATE3Request request = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
request = new CREATE3Request(xdr);
|
request = CREATE3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid CREATE request");
|
LOG.error("Invalid CREATE request");
|
||||||
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1003,7 +1003,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
MKDIR3Request request = null;
|
MKDIR3Request request = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
request = new MKDIR3Request(xdr);
|
request = MKDIR3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid MKDIR request");
|
LOG.error("Invalid MKDIR request");
|
||||||
return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1099,7 +1099,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
REMOVE3Request request = null;
|
REMOVE3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new REMOVE3Request(xdr);
|
request = REMOVE3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid REMOVE request");
|
LOG.error("Invalid REMOVE request");
|
||||||
return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1179,7 +1179,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
RMDIR3Request request = null;
|
RMDIR3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new RMDIR3Request(xdr);
|
request = RMDIR3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid RMDIR request");
|
LOG.error("Invalid RMDIR request");
|
||||||
return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1264,7 +1264,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
RENAME3Request request = null;
|
RENAME3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new RENAME3Request(xdr);
|
request = RENAME3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid RENAME request");
|
LOG.error("Invalid RENAME request");
|
||||||
return new RENAME3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new RENAME3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1360,7 +1360,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
SYMLINK3Request request = null;
|
SYMLINK3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new SYMLINK3Request(xdr);
|
request = SYMLINK3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid SYMLINK request");
|
LOG.error("Invalid SYMLINK request");
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
|
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1423,7 +1423,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
throw io;
|
throw io;
|
||||||
}
|
}
|
||||||
// This happens when startAfter was just deleted
|
// This happens when startAfter was just deleted
|
||||||
LOG.info("Cookie cound't be found: " + new String(startAfter)
|
LOG.info("Cookie couldn't be found: " + new String(startAfter)
|
||||||
+ ", do listing from beginning");
|
+ ", do listing from beginning");
|
||||||
dlisting = dfsClient
|
dlisting = dfsClient
|
||||||
.listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
|
.listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
|
||||||
|
@ -1453,7 +1453,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
READDIR3Request request = null;
|
READDIR3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new READDIR3Request(xdr);
|
request = READDIR3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid READDIR request");
|
LOG.error("Invalid READDIR request");
|
||||||
return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1611,7 +1611,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
READDIRPLUS3Request request = null;
|
READDIRPLUS3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new READDIRPLUS3Request(xdr);
|
request = READDIRPLUS3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid READDIRPLUS request");
|
LOG.error("Invalid READDIRPLUS request");
|
||||||
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1788,7 +1788,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
FSSTAT3Request request = null;
|
FSSTAT3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new FSSTAT3Request(xdr);
|
request = FSSTAT3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid FSSTAT request");
|
LOG.error("Invalid FSSTAT request");
|
||||||
return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1862,7 +1862,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
FSINFO3Request request = null;
|
FSINFO3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new FSINFO3Request(xdr);
|
request = FSINFO3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid FSINFO request");
|
LOG.error("Invalid FSINFO request");
|
||||||
return new FSINFO3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new FSINFO3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1926,7 +1926,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
PATHCONF3Request request = null;
|
PATHCONF3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new PATHCONF3Request(xdr);
|
request = PATHCONF3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid PATHCONF request");
|
LOG.error("Invalid PATHCONF request");
|
||||||
return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL);
|
return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
@ -1977,7 +1977,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
COMMIT3Request request = null;
|
COMMIT3Request request = null;
|
||||||
try {
|
try {
|
||||||
request = new COMMIT3Request(xdr);
|
request = COMMIT3Request.deserialize(xdr);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Invalid COMMIT request");
|
LOG.error("Invalid COMMIT request");
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
|
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
|
||||||
|
|
|
@ -17,12 +17,71 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.nfs.nfs3;
|
package org.apache.hadoop.hdfs.nfs.nfs3;
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.ACCESS3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.COMMIT3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.FSINFO3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.MKDIR3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.PATHCONF3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.READ3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.READDIR3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.READLINK3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.RENAME3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.MKDIR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.READLINK3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
||||||
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
||||||
|
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||||
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
import org.jboss.netty.channel.Channel;
|
import org.jboss.netty.channel.Channel;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -31,46 +90,6 @@ import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
||||||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
|
||||||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.request.READ3Request;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.READLINK3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
|
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
|
||||||
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
|
||||||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
|
||||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests for {@link RpcProgramNfs3}
|
* Tests for {@link RpcProgramNfs3}
|
||||||
|
@ -143,8 +162,9 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
handle.serialize(xdr_req);
|
GETATTR3Request req = new GETATTR3Request(handle);
|
||||||
|
req.serialize(xdr_req);
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
|
GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
|
@ -165,13 +185,12 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
handle.serialize(xdr_req);
|
SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null,
|
||||||
xdr_req.writeString("bar");
|
EnumSet.of(SetAttrField.UID));
|
||||||
SetAttr3 symAttr = new SetAttr3();
|
SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
|
||||||
symAttr.serialize(xdr_req);
|
req.serialize(xdr_req);
|
||||||
xdr_req.writeBoolean(false);
|
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unprivileged user should fail.
|
||||||
SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
|
SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
new InetSocketAddress("localhost", 1234));
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
@ -214,7 +233,8 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
handle.serialize(xdr_req);
|
ACCESS3Request req = new ACCESS3Request(handle);
|
||||||
|
req.serialize(xdr_req);
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(),
|
ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(),
|
||||||
|
@ -237,12 +257,10 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
handle.serialize(xdr_req);
|
SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
|
||||||
xdr_req.writeString("fubar");
|
"bar");
|
||||||
SetAttr3 symAttr = new SetAttr3();
|
req.serialize(xdr_req);
|
||||||
symAttr.serialize(xdr_req);
|
|
||||||
xdr_req.writeString("bar");
|
|
||||||
|
|
||||||
SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandler, new InetSocketAddress("localhost", 1234));
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
@ -251,7 +269,8 @@ public class TestRpcProgramNfs3 {
|
||||||
// Now perform readlink operations.
|
// Now perform readlink operations.
|
||||||
FileHandle handle2 = response.getObjFileHandle();
|
FileHandle handle2 = response.getObjFileHandle();
|
||||||
XDR xdr_req2 = new XDR();
|
XDR xdr_req2 = new XDR();
|
||||||
handle2.serialize(xdr_req2);
|
READLINK3Request req2 = new READLINK3Request(handle2);
|
||||||
|
req2.serialize(xdr_req2);
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
|
READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
|
||||||
|
@ -327,12 +346,10 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
handle.serialize(xdr_req);
|
CREATE3Request req = new CREATE3Request(handle, "fubar",
|
||||||
xdr_req.writeString("fubar");
|
Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
|
||||||
xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED);
|
req.serialize(xdr_req);
|
||||||
SetAttr3 symAttr = new SetAttr3();
|
|
||||||
symAttr.serialize(xdr_req);
|
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
|
CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
|
@ -348,26 +365,27 @@ public class TestRpcProgramNfs3 {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 60000)
|
@Test(timeout = 60000)
|
||||||
public void testMkdir() throws Exception {
|
public void testMkdir() throws Exception {//FixME
|
||||||
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
handle.serialize(xdr_req);
|
MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3());
|
||||||
xdr_req.writeString("fubar");
|
req.serialize(xdr_req);
|
||||||
SetAttr3 symAttr = new SetAttr3();
|
|
||||||
symAttr.serialize(xdr_req);
|
// Attempt to mkdir by an unprivileged user should fail.
|
||||||
xdr_req.writeString("bar");
|
MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(),
|
||||||
|
|
||||||
// Attempt to remove by an unpriviledged user should fail.
|
|
||||||
SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
new InetSocketAddress("localhost", 1234));
|
new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
response1.getStatus());
|
response1.getStatus());
|
||||||
|
|
||||||
// Attempt to remove by a priviledged user should pass.
|
XDR xdr_req2 = new XDR();
|
||||||
SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3());
|
||||||
|
req2.serialize(xdr_req2);
|
||||||
|
|
||||||
|
// Attempt to mkdir by a privileged user should pass.
|
||||||
|
MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(),
|
||||||
securityHandler, new InetSocketAddress("localhost", 1234));
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
response2.getStatus());
|
response2.getStatus());
|
||||||
|
@ -379,20 +397,18 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
handle.serialize(xdr_req);
|
SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
|
||||||
xdr_req.writeString("fubar");
|
"bar");
|
||||||
SetAttr3 symAttr = new SetAttr3();
|
req.serialize(xdr_req);
|
||||||
symAttr.serialize(xdr_req);
|
|
||||||
xdr_req.writeString("bar");
|
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unprivileged user should fail.
|
||||||
SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
new InetSocketAddress("localhost", 1234));
|
new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
response1.getStatus());
|
response1.getStatus());
|
||||||
|
|
||||||
// Attempt by a priviledged user should pass.
|
// Attempt by a privileged user should pass.
|
||||||
SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandler, new InetSocketAddress("localhost", 1234));
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
@ -405,8 +421,8 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
handle.serialize(xdr_req);
|
REMOVE3Request req = new REMOVE3Request(handle, "bar");
|
||||||
xdr_req.writeString("bar");
|
req.serialize(xdr_req);
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(),
|
REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(),
|
||||||
|
@ -428,17 +444,17 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
handle.serialize(xdr_req);
|
RMDIR3Request req = new RMDIR3Request(handle, "foo");
|
||||||
xdr_req.writeString("foo");
|
req.serialize(xdr_req);
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unprivileged user should fail.
|
||||||
RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
|
RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
new InetSocketAddress("localhost", 1234));
|
new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
response1.getStatus());
|
response1.getStatus());
|
||||||
|
|
||||||
// Attempt by a priviledged user should pass.
|
// Attempt by a privileged user should pass.
|
||||||
RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
|
RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandler, new InetSocketAddress("localhost", 1234));
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
@ -451,19 +467,17 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
handle.serialize(xdr_req);
|
RENAME3Request req = new RENAME3Request(handle, "bar", handle, "fubar");
|
||||||
xdr_req.writeString("bar");
|
req.serialize(xdr_req);
|
||||||
handle.serialize(xdr_req);
|
|
||||||
xdr_req.writeString("fubar");
|
// Attempt by an unprivileged user should fail.
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
|
||||||
RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(),
|
RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
new InetSocketAddress("localhost", 1234));
|
new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
response1.getStatus());
|
response1.getStatus());
|
||||||
|
|
||||||
// Attempt by a priviledged user should pass.
|
// Attempt by a privileged user should pass.
|
||||||
RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(),
|
RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandler, new InetSocketAddress("localhost", 1234));
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
@ -476,10 +490,8 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
handle.serialize(xdr_req);
|
READDIR3Request req = new READDIR3Request(handle, 0, 0, 100);
|
||||||
xdr_req.writeLongAsHyper(0);
|
req.serialize(xdr_req);
|
||||||
xdr_req.writeLongAsHyper(0);
|
|
||||||
xdr_req.writeInt(100);
|
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
|
READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
|
||||||
|
@ -501,20 +513,17 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
handle.serialize(xdr_req);
|
READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
|
||||||
xdr_req.writeLongAsHyper(0);
|
req.serialize(xdr_req);
|
||||||
xdr_req.writeLongAsHyper(0);
|
|
||||||
xdr_req.writeInt(3);
|
// Attempt by an unprivileged user should fail.
|
||||||
xdr_req.writeInt(2);
|
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
|
||||||
READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
|
READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
new InetSocketAddress("localhost", 1234));
|
new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
response1.getStatus());
|
response1.getStatus());
|
||||||
|
|
||||||
// Attempt by a priviledged user should pass.
|
// Attempt by a privileged user should pass.
|
||||||
READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
|
READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandler, new InetSocketAddress("localhost", 1234));
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
@ -527,8 +536,9 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
handle.serialize(xdr_req);
|
FSSTAT3Request req = new FSSTAT3Request(handle);
|
||||||
|
req.serialize(xdr_req);
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
|
FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
|
@ -549,8 +559,9 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
handle.serialize(xdr_req);
|
FSINFO3Request req = new FSINFO3Request(handle);
|
||||||
|
req.serialize(xdr_req);
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
|
FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
|
@ -571,8 +582,9 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
handle.serialize(xdr_req);
|
PATHCONF3Request req = new PATHCONF3Request(handle);
|
||||||
|
req.serialize(xdr_req);
|
||||||
|
|
||||||
// Attempt by an unpriviledged user should fail.
|
// Attempt by an unpriviledged user should fail.
|
||||||
PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
|
PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
|
||||||
securityHandlerUnpriviledged,
|
securityHandlerUnpriviledged,
|
||||||
|
@ -593,9 +605,8 @@ public class TestRpcProgramNfs3 {
|
||||||
long dirId = status.getFileId();
|
long dirId = status.getFileId();
|
||||||
FileHandle handle = new FileHandle(dirId);
|
FileHandle handle = new FileHandle(dirId);
|
||||||
XDR xdr_req = new XDR();
|
XDR xdr_req = new XDR();
|
||||||
handle.serialize(xdr_req);
|
COMMIT3Request req = new COMMIT3Request(handle, 0, 5);
|
||||||
xdr_req.writeLongAsHyper(0);
|
req.serialize(xdr_req);
|
||||||
xdr_req.writeInt(5);
|
|
||||||
|
|
||||||
Channel ch = Mockito.mock(Channel.class);
|
Channel ch = Mockito.mock(Channel.class);
|
||||||
|
|
||||||
|
|
|
@ -278,99 +278,6 @@ Trunk (Unreleased)
|
||||||
HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
|
HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
|
||||||
(Vinayakumar B via wheat 9)
|
(Vinayakumar B via wheat 9)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
|
||||||
|
|
||||||
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
|
||||||
encryption zone. (clamb)
|
|
||||||
|
|
||||||
HDFS-6386. HDFS Encryption Zones (clamb)
|
|
||||||
|
|
||||||
HDFS-6388. HDFS integration with KeyProvider. (clamb)
|
|
||||||
|
|
||||||
HDFS-6473. Protocol and API for Encryption Zones (clamb)
|
|
||||||
|
|
||||||
HDFS-6392. Wire crypto streams for encrypted files in
|
|
||||||
DFSClient. (clamb and yliu)
|
|
||||||
|
|
||||||
HDFS-6476. Print out the KeyProvider after finding KP successfully on
|
|
||||||
startup. (Juan Yu via wang)
|
|
||||||
|
|
||||||
HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
|
|
||||||
DFSClient. (Charles Lamb and wang)
|
|
||||||
|
|
||||||
HDFS-6389. Rename restrictions for encryption zones. (clamb)
|
|
||||||
|
|
||||||
HDFS-6605. Client server negotiation of cipher suite. (wang)
|
|
||||||
|
|
||||||
HDFS-6625. Remove the Delete Encryption Zone function (clamb)
|
|
||||||
|
|
||||||
HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
|
|
||||||
|
|
||||||
HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
|
|
||||||
|
|
||||||
HDFS-6635. Refactor encryption zone functionality into new
|
|
||||||
EncryptionZoneManager class. (wang)
|
|
||||||
|
|
||||||
HDFS-6474. Namenode needs to get the actual keys and iv from the
|
|
||||||
KeyProvider. (wang)
|
|
||||||
|
|
||||||
HDFS-6619. Clean up encryption-related tests. (wang)
|
|
||||||
|
|
||||||
HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
|
|
||||||
|
|
||||||
HDFS-6490. Fix the keyid format for generated keys in
|
|
||||||
FSNamesystem.createEncryptionZone (clamb)
|
|
||||||
|
|
||||||
HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
|
|
||||||
(wang)
|
|
||||||
|
|
||||||
HDFS-6718. Remove EncryptionZoneManager lock. (wang)
|
|
||||||
|
|
||||||
HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
|
|
||||||
|
|
||||||
HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
|
|
||||||
EZManager#createEncryptionZone. (clamb)
|
|
||||||
|
|
||||||
HDFS-6724. Decrypt EDEK before creating
|
|
||||||
CryptoInputStream/CryptoOutputStream. (wang)
|
|
||||||
|
|
||||||
HDFS-6509. Create a special /.reserved/raw directory for raw access to
|
|
||||||
encrypted data. (clamb via wang)
|
|
||||||
|
|
||||||
HDFS-6771. Require specification of an encryption key when creating
|
|
||||||
an encryption zone. (wang)
|
|
||||||
|
|
||||||
HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
|
|
||||||
|
|
||||||
HDFS-6692. Add more HDFS encryption tests. (wang)
|
|
||||||
|
|
||||||
HDFS-6780. Batch the encryption zones listing API. (wang)
|
|
||||||
|
|
||||||
HDFS-6394. HDFS encryption documentation. (wang)
|
|
||||||
|
|
||||||
HDFS-6834. Improve the configuration guidance in DFSClient when there
|
|
||||||
are no Codec classes found in configs. (umamahesh)
|
|
||||||
|
|
||||||
HDFS-6546. Add non-superuser capability to get the encryption zone
|
|
||||||
for a specific path. (clamb)
|
|
||||||
|
|
||||||
HDFS-6733. Creating encryption zone results in NPE when
|
|
||||||
KeyProvider is null. (clamb)
|
|
||||||
|
|
||||||
HDFS-6785. Should not be able to create encryption zone using path
|
|
||||||
to a non-directory file. (clamb)
|
|
||||||
|
|
||||||
HDFS-6807. Fix TestReservedRawPaths. (clamb)
|
|
||||||
|
|
||||||
HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
|
|
||||||
as boolean. (umamahesh)
|
|
||||||
|
|
||||||
HDFS-6817. Fix findbugs and other warnings. (yliu)
|
|
||||||
|
|
||||||
HDFS-6839. Fix TestCLI to expect new output. (clamb)
|
|
||||||
|
|
||||||
HDFS-6905. fs-encryption merge triggered release audit failures. (clamb via tucu)
|
|
||||||
|
|
||||||
HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
|
HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
|
||||||
intermittently with various symptoms - debugging patch. (Yongjun Zhang via
|
intermittently with various symptoms - debugging patch. (Yongjun Zhang via
|
||||||
Arpit Agarwal)
|
Arpit Agarwal)
|
||||||
|
@ -537,10 +444,30 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity
|
HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity
|
||||||
per volume. (Arpit Agarwal)
|
per volume. (Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-4486. Add log category for long-running DFSClient notices (Zhe Zhang
|
||||||
|
via Colin Patrick McCabe)
|
||||||
|
|
||||||
|
HDFS-6879. Adding tracing to Hadoop RPC (Masatake Iwasaki via Colin Patrick
|
||||||
|
McCabe)
|
||||||
|
|
||||||
|
HDFS-6774. Make FsDataset and DataStore support removing volumes. (Lei Xu
|
||||||
|
via atm)
|
||||||
|
|
||||||
|
HDFS-6634. inotify in HDFS. (James Thomas via wang)
|
||||||
|
|
||||||
|
HDFS-4257. The ReplaceDatanodeOnFailure policies could have a forgiving
|
||||||
|
option (szetszwo via cmccabe)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||||
|
|
||||||
|
HDFS-6773. MiniDFSCluster should skip edit log fsync by default (Stephen
|
||||||
|
Chu via Colin Patrick McCabe)
|
||||||
|
|
||||||
|
HDFS-6865. Byte array native checksumming on client side
|
||||||
|
(James Thomas via todd)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
|
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
|
||||||
|
@ -668,6 +595,117 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
|
HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
|
||||||
(Juan Yu and jing9 via jing9)
|
(Juan Yu and jing9 via jing9)
|
||||||
|
|
||||||
|
HDFS-6892. Add XDR packaging method for each NFS request (brandonli)
|
||||||
|
|
||||||
|
HDFS-6938. Cleanup javac warnings in FSNamesystem (Charles Lamb via wheat9)
|
||||||
|
|
||||||
|
HDFS-6902. FileWriter should be closed in finally block in
|
||||||
|
BlockReceiver#receiveBlock() (Tsuyoshi OZAWA via Colin Patrick McCabe)
|
||||||
|
|
||||||
|
HDFS-6800. Support Datanode layout changes with rolling upgrade.
|
||||||
|
(James Thomas via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-6972. TestRefreshUserMappings.testRefreshSuperUserGroupsConfiguration
|
||||||
|
doesn't decode url correctly. (Yongjun Zhang via wang)
|
||||||
|
|
||||||
|
HDFS-6942. Fix typos in log messages. (Ray Chiang via wheat9)
|
||||||
|
|
||||||
|
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
|
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
||||||
|
encryption zone. (clamb)
|
||||||
|
|
||||||
|
HDFS-6386. HDFS Encryption Zones (clamb)
|
||||||
|
|
||||||
|
HDFS-6388. HDFS integration with KeyProvider. (clamb)
|
||||||
|
|
||||||
|
HDFS-6473. Protocol and API for Encryption Zones (clamb)
|
||||||
|
|
||||||
|
HDFS-6392. Wire crypto streams for encrypted files in
|
||||||
|
DFSClient. (clamb and yliu)
|
||||||
|
|
||||||
|
HDFS-6476. Print out the KeyProvider after finding KP successfully on
|
||||||
|
startup. (Juan Yu via wang)
|
||||||
|
|
||||||
|
HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
|
||||||
|
DFSClient. (Charles Lamb and wang)
|
||||||
|
|
||||||
|
HDFS-6389. Rename restrictions for encryption zones. (clamb)
|
||||||
|
|
||||||
|
HDFS-6605. Client server negotiation of cipher suite. (wang)
|
||||||
|
|
||||||
|
HDFS-6625. Remove the Delete Encryption Zone function (clamb)
|
||||||
|
|
||||||
|
HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
|
||||||
|
|
||||||
|
HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
|
||||||
|
|
||||||
|
HDFS-6635. Refactor encryption zone functionality into new
|
||||||
|
EncryptionZoneManager class. (wang)
|
||||||
|
|
||||||
|
HDFS-6474. Namenode needs to get the actual keys and iv from the
|
||||||
|
KeyProvider. (wang)
|
||||||
|
|
||||||
|
HDFS-6619. Clean up encryption-related tests. (wang)
|
||||||
|
|
||||||
|
HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
|
||||||
|
|
||||||
|
HDFS-6490. Fix the keyid format for generated keys in
|
||||||
|
FSNamesystem.createEncryptionZone (clamb)
|
||||||
|
|
||||||
|
HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
|
||||||
|
(wang)
|
||||||
|
|
||||||
|
HDFS-6718. Remove EncryptionZoneManager lock. (wang)
|
||||||
|
|
||||||
|
HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
|
||||||
|
|
||||||
|
HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
|
||||||
|
EZManager#createEncryptionZone. (clamb)
|
||||||
|
|
||||||
|
HDFS-6724. Decrypt EDEK before creating
|
||||||
|
CryptoInputStream/CryptoOutputStream. (wang)
|
||||||
|
|
||||||
|
HDFS-6509. Create a special /.reserved/raw directory for raw access to
|
||||||
|
encrypted data. (clamb via wang)
|
||||||
|
|
||||||
|
HDFS-6771. Require specification of an encryption key when creating
|
||||||
|
an encryption zone. (wang)
|
||||||
|
|
||||||
|
HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
|
||||||
|
|
||||||
|
HDFS-6692. Add more HDFS encryption tests. (wang)
|
||||||
|
|
||||||
|
HDFS-6780. Batch the encryption zones listing API. (wang)
|
||||||
|
|
||||||
|
HDFS-6394. HDFS encryption documentation. (wang)
|
||||||
|
|
||||||
|
HDFS-6834. Improve the configuration guidance in DFSClient when there
|
||||||
|
are no Codec classes found in configs. (umamahesh)
|
||||||
|
|
||||||
|
HDFS-6546. Add non-superuser capability to get the encryption zone
|
||||||
|
for a specific path. (clamb)
|
||||||
|
|
||||||
|
HDFS-6733. Creating encryption zone results in NPE when
|
||||||
|
KeyProvider is null. (clamb)
|
||||||
|
|
||||||
|
HDFS-6785. Should not be able to create encryption zone using path
|
||||||
|
to a non-directory file. (clamb)
|
||||||
|
|
||||||
|
HDFS-6807. Fix TestReservedRawPaths. (clamb)
|
||||||
|
|
||||||
|
HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
|
||||||
|
as boolean. (umamahesh)
|
||||||
|
|
||||||
|
HDFS-6817. Fix findbugs and other warnings. (yliu)
|
||||||
|
|
||||||
|
HDFS-6839. Fix TestCLI to expect new output. (clamb)
|
||||||
|
|
||||||
|
HDFS-6954. With crypto, no native lib systems are too verbose. (clamb via wang)
|
||||||
|
|
||||||
|
HDFS-2975. Rename with overwrite flag true can make NameNode to stuck in safemode
|
||||||
|
on NN (crash + restart). (Yi Liu via umamahesh)
|
||||||
|
|
||||||
Release 2.5.1 - UNRELEASED
|
Release 2.5.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -680,6 +718,9 @@ Release 2.5.1 - UNRELEASED
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
|
HADOOP-10957. The globber will sometimes erroneously return a permission
|
||||||
|
denied exception when there is a non-terminal wildcard (cmccabe)
|
||||||
|
|
||||||
Release 2.5.0 - 2014-08-11
|
Release 2.5.0 - 2014-08-11
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -1,271 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
APACHE HADOOP SUBCOMPONENTS:
|
|
||||||
|
|
||||||
The Apache Hadoop project contains subcomponents with separate copyright
|
|
||||||
notices and license terms. Your use of the source code for the these
|
|
||||||
subcomponents is subject to the terms and conditions of the following
|
|
||||||
licenses.
|
|
||||||
|
|
||||||
For the org.apache.hadoop.util.bloom.* classes:
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* Copyright (c) 2005, European Commission project OneLab under contract
|
|
||||||
* 034819 (http://www.one-lab.org)
|
|
||||||
* All rights reserved.
|
|
||||||
* Redistribution and use in source and binary forms, with or
|
|
||||||
* without modification, are permitted provided that the following
|
|
||||||
* conditions are met:
|
|
||||||
* - Redistributions of source code must retain the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer.
|
|
||||||
* - Redistributions in binary form must reproduce the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer in
|
|
||||||
* the documentation and/or other materials provided with the distribution.
|
|
||||||
* - Neither the name of the University Catholique de Louvain - UCL
|
|
||||||
* nor the names of its contributors may be used to endorse or
|
|
||||||
* promote products derived from this software without specific prior
|
|
||||||
* written permission.
|
|
||||||
*
|
|
||||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
||||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
||||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
||||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
||||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
* POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
For src/main/native/util/tree.h:
|
|
||||||
|
|
||||||
/*-
|
|
||||||
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
|
||||||
* All rights reserved.
|
|
||||||
*
|
|
||||||
* Redistribution and use in source and binary forms, with or without
|
|
||||||
* modification, are permitted provided that the following conditions
|
|
||||||
* are met:
|
|
||||||
* 1. Redistributions of source code must retain the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer.
|
|
||||||
* 2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer in the
|
|
||||||
* documentation and/or other materials provided with the distribution.
|
|
||||||
*
|
|
||||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
||||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
||||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
||||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
||||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
||||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
*/
|
|
|
@ -1,2 +0,0 @@
|
||||||
This product includes software developed by The Apache Software
|
|
||||||
Foundation (http://www.apache.org/).
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue