HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS proxy with read/write capabilities. (tucu)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1212060 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Alejandro Abdelnur 2011-12-08 19:25:28 +00:00
parent 8cd80b3cbc
commit 3334306512
124 changed files with 14685 additions and 1 deletions

1
.gitignore vendored
View File

@ -8,3 +8,4 @@
.settings
target
hadoop-hdfs-project/hadoop-hdfs/downloads
hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads

View File

@ -0,0 +1,60 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<assembly>
<id>hadoop-httpfs-dist</id>
<formats>
<format>dir</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<fileSets>
<!-- Configuration files -->
<fileSet>
<directory>${basedir}/src/main/conf</directory>
<outputDirectory>/etc/hadoop</outputDirectory>
<includes>
<include>*</include>
</includes>
</fileSet>
<!-- Readme, licenses, etc. -->
<fileSet>
<directory>${basedir}</directory>
<outputDirectory>/</outputDirectory>
<includes>
<include>*.txt</include>
</includes>
</fileSet>
<fileSet>
<directory>${basedir}/src/main/sbin</directory>
<outputDirectory>/sbin</outputDirectory>
<includes>
<include>*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
<fileSet>
<directory>${basedir}/src/main/libexec</directory>
<outputDirectory>/libexec</outputDirectory>
<includes>
<include>*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
<!-- Documentation -->
<fileSet>
<directory>${project.build.directory}/site</directory>
<outputDirectory>/share/doc/hadoop/httpfs</outputDirectory>
</fileSet>
</fileSets>
</assembly>

View File

@ -264,6 +264,11 @@
<artifactId>hadoop-auth</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.googlecode.json-simple</groupId>
<artifactId>json-simple</artifactId>
<scope>compile</scope>
</dependency>
</dependencies>
<build>

View File

@ -112,6 +112,7 @@
run cd hadoop-${project.version}
run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
COMMON_LIB=share/hadoop/common/lib
MODULES=../../../../modules

View File

@ -0,0 +1,17 @@
-----------------------------------------------------------------------------
HttpFS - Hadoop HDFS over HTTP
HttpFS is a server that provides a REST HTTP gateway to HDFS with full
filesystem read & write capabilities.
HttpFS can be used to transfer data between clusters running different
versions of Hadoop (overcoming RPC versioning issues), for example using
Hadoop DistCP.
HttpFS can be used to access data in HDFS on a cluster behind of a firewall
(the HttpFS server acts as a gateway and is the only system that is allowed
to cross the firewall into the cluster).
HttpFS can be used to access data in HDFS using HTTP utilities (such as curl
and wget) and HTTP libraries Perl from other languages than Java.
-----------------------------------------------------------------------------

View File

@ -0,0 +1,530 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>0.24.0-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-httpfs</artifactId>
<version>0.24.0-SNAPSHOT</version>
<packaging>war</packaging>
<name>Apache Hadoop HttpFS</name>
<description>Apache Hadoop HttpFS</description>
<properties>
<tomcat.version>6.0.32</tomcat.version>
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
<httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>
<maven.build.timestamp.format>yyyy-MM-dd'T'HH:mm:ssZ</maven.build.timestamp.format>
<httpfs.build.timestamp>${maven.build.timestamp}</httpfs.build.timestamp>
<httpfs.tomcat.dist.dir>
${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/httpfs/tomcat
</httpfs.tomcat.dist.dir>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.jdom</groupId>
<artifactId>jdom</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.googlecode.json-simple</groupId>
<artifactId>json-simple</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>javax.xml.stream</groupId>
<artifactId>stax-api</artifactId>
</exclusion>
<exclusion>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</exclusion>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>commons-el</groupId>
<artifactId>commons-el</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</exclusion>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
</exclusion>
<exclusion>
<groupId>commons-el</groupId>
<artifactId>commons-el</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>compile</scope>
</dependency>
</dependencies>
<build>
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>true</filtering>
<includes>
<include>httpfs.properties</include>
</includes>
</resource>
<resource>
<directory>src/main/resources</directory>
<filtering>false</filtering>
<excludes>
<exclude>httpfs.properties</exclude>
</excludes>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<threadCount>1</threadCount>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>javadoc</goal>
</goals>
<phase>site</phase>
<configuration>
<linksource>true</linksource>
<quiet>true</quiet>
<verbose>false</verbose>
<source>${maven.compile.source}</source>
<charset>${maven.compile.encoding}</charset>
<groups>
<group>
<title>HttpFs API</title>
<packages>*</packages>
</group>
</groups>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-project-info-reports-plugin</artifactId>
<executions>
<execution>
<configuration>
<dependencyLocationsEnabled>false</dependencyLocationsEnabled>
</configuration>
<goals>
<goal>dependencies</goal>
</goals>
<phase>site</phase>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<configuration>
<excludes>
</excludes>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>create-web-xmls</id>
<phase>generate-test-resources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="${project.build.directory}/test-classes/webapp"/>
<copy todir="${project.build.directory}/test-classes/webapp">
<fileset dir="${basedir}/src/main/webapp"/>
</copy>
</target>
</configuration>
</execution>
<execution>
<id>site</id>
<phase>site</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<xslt in="${basedir}/src/main/resources/httpfs-default.xml"
out="${project.build.directory}/site/httpfs-default.html"
style="${basedir}/src/site/configuration.xsl"/>
</target>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-war-plugin</artifactId>
<executions>
<execution>
<id>default-war</id>
<phase>package</phase>
<goals>
<goal>war</goal>
</goals>
<configuration>
<warName>webhdfs</warName>
<webappDirectory>${project.build.directory}/webhdfs</webappDirectory>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<profile>
<id>docs</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-site-plugin</artifactId>
<executions>
<execution>
<id>docs</id>
<phase>prepare-package</phase>
<goals>
<goal>site</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>dist</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-assemblies</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>dist</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<finalName>${project.artifactId}-${project.version}</finalName>
<appendAssemblyId>false</appendAssemblyId>
<attach>false</attach>
<descriptorRefs>
<descriptorRef>hadoop-httpfs-dist</descriptorRef>
</descriptorRefs>
</configuration>
</execution>
</executions>
</plugin>
<!-- Downloading Tomcat TAR.GZ, using downloads/ dir to avoid downloading over an over -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>dist</id>
<goals>
<goal>run</goal>
</goals>
<phase>package</phase>
<configuration>
<target>
<mkdir dir="downloads"/>
<get
src="http://archive.apache.org/dist/tomcat/tomcat-6/v${tomcat.version}/bin/apache-tomcat-${tomcat.version}.tar.gz"
dest="downloads/tomcat.tar.gz" verbose="true" skipexisting="true"/>
<delete dir="${project.build.directory}/tomcat.exp"/>
<mkdir dir="${project.build.directory}/tomcat.exp"/>
<!-- Using Unix script to preserve file permissions -->
<echo file="${project.build.directory}/tomcat-untar.sh">
which cygpath 2> /dev/null
if [ $? = 1 ]; then
BUILD_DIR="${project.build.directory}"
else
BUILD_DIR=`cygpath --unix '${project.build.directory}'`
fi
cd $BUILD_DIR/tomcat.exp
tar xzf ${basedir}/downloads/tomcat.tar.gz
</echo>
<exec executable="sh" dir="${project.build.directory}" failonerror="true">
<arg line="./tomcat-untar.sh"/>
</exec>
<move file="${project.build.directory}/tomcat.exp/apache-tomcat-${tomcat.version}"
tofile="${httpfs.tomcat.dist.dir}"/>
<delete dir="${project.build.directory}/tomcat.exp"/>
<delete dir="${httpfs.tomcat.dist.dir}/webapps"/>
<mkdir dir="${httpfs.tomcat.dist.dir}/webapps"/>
<delete file="${httpfs.tomcat.dist.dir}/conf/server.xml"/>
<copy file="${basedir}/src/main/tomcat/server.xml"
toDir="${httpfs.tomcat.dist.dir}/conf"/>
<copy file="${basedir}/src/main/tomcat/logging.properties"
toDir="${httpfs.tomcat.dist.dir}/conf"/>
<copy toDir="${httpfs.tomcat.dist.dir}/webapps/ROOT">
<fileset dir="${basedir}/src/main/tomcat/ROOT"/>
</copy>
<copy toDir="${httpfs.tomcat.dist.dir}/webapps/webhdfs">
<fileset dir="${project.build.directory}/webhdfs"/>
</copy>
</target>
</configuration>
</execution>
<execution>
<id>tar</id>
<phase>package</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target if="tar">
<!-- Using Unix script to preserve symlinks -->
<echo file="${project.build.directory}/dist-maketar.sh">
which cygpath 2> /dev/null
if [ $? = 1 ]; then
BUILD_DIR="${project.build.directory}"
else
BUILD_DIR=`cygpath --unix '${project.build.directory}'`
fi
cd $BUILD_DIR
tar czf ${project.artifactId}-${project.version}.tar.gz ${project.artifactId}-${project.version}
</echo>
<exec executable="sh" dir="${project.build.directory}" failonerror="true">
<arg line="./dist-maketar.sh"/>
</exec>
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,41 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
#
# Set httpfs specific environment variables here.
# Settings for the Embedded Tomcat that runs HttpFS
# Java System properties for HttpFS should be specified in this variable
#
# export CATALINA_OPTS=
# HttpFS logs directory
#
# export HTTPFS_LOG=${HTTPFS_HOME}/logs
# HttpFS temporary directory
#
# export HTTPFS_TEMP=${HTTPFS_HOME}/temp
# The HTTP port used by HttpFS
#
# export HTTPFS_HTTP_PORT=14000
# The Admin port used by HttpFS
#
# export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1`
# The hostname HttpFS server runs on
#
# export HTTPFS_HTTP_HOSTNAME=`hostname -f`

View File

@ -0,0 +1,35 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
#
# If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time
# Setup sets its value to '${httpfs.home}/logs'
log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender
log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd
log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log
log4j.appender.httpfs.Append=true
log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout
log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender
log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd
log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log
log4j.appender.httpfsaudit.Append=true
log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout
log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
log4j.logger.httpfsaudit=INFO, httpfsaudit
log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs
log4j.logger.org.apache.hadoop.lib=INFO, httpfs

View File

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
</configuration>

View File

@ -0,0 +1,863 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.FileNotFoundException;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.lang.reflect.Constructor;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLEncoder;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.Map;
/**
* HttpFSServer implementation of the FileSystemAccess FileSystem.
* <p/>
* This implementation allows a user to access HDFS over HTTP via a HttpFSServer server.
*/
public class HttpFSFileSystem extends FileSystem {
public static final String SERVICE_NAME = "/webhdfs";
public static final String SERVICE_VERSION = "/v1";
public static final String SERVICE_PREFIX = SERVICE_NAME + SERVICE_VERSION;
public static final String OP_PARAM = "op";
public static final String DO_AS_PARAM = "doas";
public static final String OVERWRITE_PARAM = "overwrite";
public static final String REPLICATION_PARAM = "replication";
public static final String BLOCKSIZE_PARAM = "blocksize";
public static final String PERMISSION_PARAM = "permission";
public static final String DESTINATION_PARAM = "destination";
public static final String RECURSIVE_PARAM = "recursive";
public static final String OWNER_PARAM = "owner";
public static final String GROUP_PARAM = "group";
public static final String MODIFICATION_TIME_PARAM = "modificationtime";
public static final String ACCESS_TIME_PARAM = "accesstime";
public static final String RENEWER_PARAM = "renewer";
public static final String DEFAULT_PERMISSION = "default";
public static final String RENAME_JSON = "boolean";
public static final String DELETE_JSON = "boolean";
public static final String MKDIRS_JSON = "boolean";
public static final String HOME_DIR_JSON = "Path";
public static final String SET_REPLICATION_JSON = "boolean";
public static enum FILE_TYPE {
FILE, DIRECTORY, SYMLINK;
public static FILE_TYPE getType(FileStatus fileStatus) {
if (fileStatus.isFile()) {
return FILE;
}
if (fileStatus.isDirectory()) {
return DIRECTORY;
}
if (fileStatus.isSymlink()) {
return SYMLINK;
}
throw new IllegalArgumentException("Could not determine filetype for: " +
fileStatus.getPath());
}
}
public static final String FILE_STATUSES_JSON = "FileStatuses";
public static final String FILE_STATUS_JSON = "FileStatus";
public static final String PATH_SUFFIX_JSON = "pathSuffix";
public static final String TYPE_JSON = "type";
public static final String LENGTH_JSON = "length";
public static final String OWNER_JSON = "owner";
public static final String GROUP_JSON = "group";
public static final String PERMISSION_JSON = "permission";
public static final String ACCESS_TIME_JSON = "accessTime";
public static final String MODIFICATION_TIME_JSON = "modificationTime";
public static final String BLOCK_SIZE_JSON = "blockSize";
public static final String REPLICATION_JSON = "replication";
public static final String FILE_CHECKSUM_JSON = "FileChecksum";
public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
public static final String CHECKSUM_BYTES_JSON = "bytes";
public static final String CHECKSUM_LENGTH_JSON = "length";
public static final String CONTENT_SUMMARY_JSON = "ContentSummary";
public static final String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON = "directoryCount";
public static final String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount";
public static final String CONTENT_SUMMARY_LENGTH_JSON = "length";
public static final String CONTENT_SUMMARY_QUOTA_JSON = "quota";
public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed";
public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota";
public static final String DELEGATION_TOKEN_JSON = "Token";
public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
public static final String ERROR_JSON = "RemoteException";
public static final String ERROR_EXCEPTION_JSON = "exception";
public static final String ERROR_CLASSNAME_JSON = "javaClassName";
public static final String ERROR_MESSAGE_JSON = "message";
public static final int HTTP_TEMPORARY_REDIRECT = 307;
/**
* Get operations.
*/
public enum GetOpValues {
OPEN, GETFILESTATUS, LISTSTATUS, GETHOMEDIR, GETCONTENTSUMMARY, GETFILECHECKSUM,
GETDELEGATIONTOKEN, GETFILEBLOCKLOCATIONS, INSTRUMENTATION
}
/**
* Post operations.
*/
public static enum PostOpValues {
APPEND
}
/**
* Put operations.
*/
public static enum PutOpValues {
CREATE, MKDIRS, RENAME, SETOWNER, SETPERMISSION, SETREPLICATION, SETTIMES,
RENEWDELEGATIONTOKEN, CANCELDELEGATIONTOKEN
}
/**
* Delete operations.
*/
public static enum DeleteOpValues {
DELETE
}
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
private static final String HTTP_POST = "POST";
private static final String HTTP_DELETE = "DELETE";
private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
private URI uri;
private Path workingDir;
private String doAs;
/**
* Convenience method that creates a <code>HttpURLConnection</code> for the
* HttpFSServer file system operations.
* <p/>
* This methods performs and injects any needed authentication credentials
* via the {@link #getConnection(URL, String)} method
*
* @param method the HTTP method.
* @param params the query string parameters.
* @param path the file path
* @param makeQualified if the path should be 'makeQualified'
*
* @return a <code>HttpURLConnection</code> for the HttpFSServer server,
* authenticated and ready to use for the specified path and file system operation.
*
* @throws IOException thrown if an IO error occurrs.
*/
private HttpURLConnection getConnection(String method, Map<String, String> params,
Path path, boolean makeQualified) throws IOException {
params.put(DO_AS_PARAM, doAs);
if (makeQualified) {
path = makeQualified(path);
}
URI uri = path.toUri();
StringBuilder sb = new StringBuilder();
sb.append(uri.getScheme()).append("://").append(uri.getAuthority()).
append(SERVICE_PREFIX).append(uri.getPath());
String separator = "?";
for (Map.Entry<String, String> entry : params.entrySet()) {
sb.append(separator).append(entry.getKey()).append("=").
append(URLEncoder.encode(entry.getValue(), "UTF8"));
separator = "&";
}
URL url = new URL(sb.toString());
return getConnection(url, method);
}
/**
* Convenience method that creates a <code>HttpURLConnection</code> for the specified URL.
* <p/>
* This methods performs and injects any needed authentication credentials.
*
* @param url url to connect to.
* @param method the HTTP method.
*
* @return a <code>HttpURLConnection</code> for the HttpFSServer server, authenticated and ready to use for
* the specified path and file system operation.
*
* @throws IOException thrown if an IO error occurrs.
*/
private HttpURLConnection getConnection(URL url, String method) throws IOException {
Class<? extends Authenticator> klass =
getConf().getClass("httpfs.authenticator.class", HttpKerberosAuthenticator.class, Authenticator.class);
Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
try {
HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
conn.setRequestMethod(method);
if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
conn.setDoOutput(true);
}
return conn;
} catch (Exception ex) {
throw new IOException(ex);
}
}
/**
* Convenience method that JSON Parses the <code>InputStream</code> of a <code>HttpURLConnection</code>.
*
* @param conn the <code>HttpURLConnection</code>.
*
* @return the parsed JSON object.
*
* @throws IOException thrown if the <code>InputStream</code> could not be JSON parsed.
*/
private static Object jsonParse(HttpURLConnection conn) throws IOException {
try {
JSONParser parser = new JSONParser();
return parser.parse(new InputStreamReader(conn.getInputStream()));
} catch (ParseException ex) {
throw new IOException("JSON parser error, " + ex.getMessage(), ex);
}
}
/**
* Validates the status of an <code>HttpURLConnection</code> against an expected HTTP
* status code. If the current status code is not the expected one it throws an exception
* with a detail message using Server side error messages if available.
*
* @param conn the <code>HttpURLConnection</code>.
* @param expected the expected HTTP status code.
*
* @throws IOException thrown if the current status code does not match the expected one.
*/
private static void validateResponse(HttpURLConnection conn, int expected) throws IOException {
int status = conn.getResponseCode();
if (status != expected) {
try {
JSONObject json = (JSONObject) jsonParse(conn);
json = (JSONObject) json.get(ERROR_JSON);
String message = (String) json.get(ERROR_MESSAGE_JSON);
String exception = (String) json.get(ERROR_EXCEPTION_JSON);
String className = (String) json.get(ERROR_CLASSNAME_JSON);
try {
ClassLoader cl = HttpFSFileSystem.class.getClassLoader();
Class klass = cl.loadClass(className);
Constructor constr = klass.getConstructor(String.class);
throw (IOException) constr.newInstance(message);
} catch (IOException ex) {
throw ex;
} catch (Exception ex) {
throw new IOException(MessageFormat.format("{0} - {1}", exception, message));
}
} catch (IOException ex) {
if (ex.getCause() instanceof IOException) {
throw (IOException) ex.getCause();
}
throw new IOException(MessageFormat.format("HTTP status [{0}], {1}", status, conn.getResponseMessage()));
}
}
}
/**
* Called after a new FileSystem instance is constructed.
*
* @param name a uri whose authority section names the host, port, etc. for this FileSystem
* @param conf the configuration
*/
@Override
public void initialize(URI name, Configuration conf) throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
doAs = ugi.getUserName();
super.initialize(name, conf);
try {
uri = new URI(name.getScheme() + "://" + name.getHost() + ":" + name.getPort());
} catch (URISyntaxException ex) {
throw new IOException(ex);
}
}
/**
* Returns a URI whose scheme and authority identify this FileSystem.
*
* @return the URI whose scheme and authority identify this FileSystem.
*/
@Override
public URI getUri() {
return uri;
}
/**
* HttpFSServer subclass of the <code>FSDataInputStream</code>.
* <p/>
* This implementation does not support the
* <code>PositionReadable</code> and <code>Seekable</code> methods.
*/
private static class HttpFSDataInputStream extends FilterInputStream implements Seekable, PositionedReadable {
protected HttpFSDataInputStream(InputStream in, int bufferSize) {
super(new BufferedInputStream(in, bufferSize));
}
@Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void seek(long pos) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long getPos() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
throw new UnsupportedOperationException();
}
}
/**
* Opens an FSDataInputStream at the indicated Path.
* </p>
* IMPORTANT: the returned <code><FSDataInputStream/code> does not support the
* <code>PositionReadable</code> and <code>Seekable</code> methods.
*
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, GetOpValues.OPEN.toString());
HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
return new FSDataInputStream(new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
}
/**
* HttpFSServer subclass of the <code>FSDataOutputStream</code>.
* <p/>
* This implementation closes the underlying HTTP connection validating the Http connection status
* at closing time.
*/
private static class HttpFSDataOutputStream extends FSDataOutputStream {
private HttpURLConnection conn;
private int closeStatus;
public HttpFSDataOutputStream(HttpURLConnection conn, OutputStream out, int closeStatus, Statistics stats)
throws IOException {
super(out, stats);
this.conn = conn;
this.closeStatus = closeStatus;
}
@Override
public void close() throws IOException {
try {
super.close();
} finally {
validateResponse(conn, closeStatus);
}
}
}
/**
* Converts a <code>FsPermission</code> to a Unix octal representation.
*
* @param p the permission.
*
* @return the Unix string symbolic reprentation.
*/
public static String permissionToString(FsPermission p) {
return (p == null) ? DEFAULT_PERMISSION : Integer.toString(p.toShort(), 8);
}
/*
* Common handling for uploading data for create and append operations.
*/
private FSDataOutputStream uploadData(String method, Path f, Map<String, String> params,
int bufferSize, int expectedStatus) throws IOException {
HttpURLConnection conn = getConnection(method, params, f, true);
conn.setInstanceFollowRedirects(false);
boolean exceptionAlreadyHandled = false;
try {
if (conn.getResponseCode() == HTTP_TEMPORARY_REDIRECT) {
exceptionAlreadyHandled = true;
String location = conn.getHeaderField("Location");
if (location != null) {
conn = getConnection(new URL(location), method);
conn.setRequestProperty("Content-Type", "application/octet-stream");
try {
OutputStream os = new BufferedOutputStream(conn.getOutputStream(), bufferSize);
return new HttpFSDataOutputStream(conn, os, expectedStatus, statistics);
} catch (IOException ex) {
validateResponse(conn, expectedStatus);
throw ex;
}
} else {
validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
throw new IOException("Missing HTTP 'Location' header for [" + conn.getURL() + "]");
}
} else {
throw new IOException(
MessageFormat.format("Expected HTTP status was [307], received [{0}]",
conn.getResponseCode()));
}
} catch (IOException ex) {
if (exceptionAlreadyHandled) {
throw ex;
} else {
validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
throw ex;
}
}
}
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* <p/>
* IMPORTANT: The <code>Progressable</code> parameter is not used.
*
* @param f the file name to open.
* @param permission file permission.
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize block size.
* @param progress progressable.
*
* @throws IOException
* @see #setPermission(Path, FsPermission)
*/
@Override
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, PutOpValues.CREATE.toString());
params.put(OVERWRITE_PARAM, Boolean.toString(overwrite));
params.put(REPLICATION_PARAM, Short.toString(replication));
params.put(BLOCKSIZE_PARAM, Long.toString(blockSize));
params.put(PERMISSION_PARAM, permissionToString(permission));
return uploadData(HTTP_PUT, f, params, bufferSize, HttpURLConnection.HTTP_CREATED);
}
/**
* Append to an existing file (optional operation).
* <p/>
* IMPORTANT: The <code>Progressable</code> parameter is not used.
*
* @param f the existing file to be appended.
* @param bufferSize the size of the buffer to be used.
* @param progress for reporting progress if it is not null.
*
* @throws IOException
*/
@Override
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, PostOpValues.APPEND.toString());
return uploadData(HTTP_POST, f, params, bufferSize, HttpURLConnection.HTTP_OK);
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, PutOpValues.RENAME.toString());
params.put(DESTINATION_PARAM, dst.toString());
HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return (Boolean) json.get(RENAME_JSON);
}
/**
* Delete a file.
*
* @deprecated Use delete(Path, boolean) instead
*/
@SuppressWarnings({"deprecation"})
@Deprecated
@Override
public boolean delete(Path f) throws IOException {
return delete(f, false);
}
/**
* Delete a file.
*
* @param f the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
*
* @return true if delete is successful else false.
*
* @throws IOException
*/
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, DeleteOpValues.DELETE.toString());
params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
HttpURLConnection conn = getConnection(HTTP_DELETE, params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return (Boolean) json.get(DELETE_JSON);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f given path
*
* @return the statuses of the files/directories in the given patch
*
* @throws IOException
*/
@Override
public FileStatus[] listStatus(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, GetOpValues.LISTSTATUS.toString());
HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
json = (JSONObject) json.get(FILE_STATUSES_JSON);
JSONArray jsonArray = (JSONArray) json.get(FILE_STATUS_JSON);
FileStatus[] array = new FileStatus[jsonArray.size()];
f = makeQualified(f);
for (int i = 0; i < jsonArray.size(); i++) {
array[i] = createFileStatus(f, (JSONObject) jsonArray.get(i));
}
return array;
}
/**
* Set the current working directory for the given file system. All relative
* paths will be resolved relative to it.
*
* @param newDir new directory.
*/
@Override
public void setWorkingDirectory(Path newDir) {
workingDir = newDir;
}
/**
* Get the current working directory for the given file system
*
* @return the directory pathname
*/
@Override
public Path getWorkingDirectory() {
if (workingDir == null) {
workingDir = getHomeDirectory();
}
return workingDir;
}
/**
* Make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
*/
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, PutOpValues.MKDIRS.toString());
params.put(PERMISSION_PARAM, permissionToString(permission));
HttpURLConnection conn = getConnection(HTTP_PUT, params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return (Boolean) json.get(MKDIRS_JSON);
}
/**
* Return a file status object that represents the path.
*
* @param f The path we want information from
*
* @return a FileStatus object
*
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, GetOpValues.GETFILESTATUS.toString());
HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
json = (JSONObject) json.get(FILE_STATUS_JSON);
f = makeQualified(f);
return createFileStatus(f, json);
}
/**
* Return the current user's home directory in this filesystem.
* The default implementation returns "/user/$USER/".
*/
@Override
public Path getHomeDirectory() {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, GetOpValues.GETHOMEDIR.toString());
try {
HttpURLConnection conn = getConnection(HTTP_GET, params, new Path(getUri().toString(), "/"), false);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return new Path((String) json.get(HOME_DIR_JSON));
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
/**
* Set owner of a path (i.e. a file or a directory).
* The parameters username and groupname cannot both be null.
*
* @param p The path
* @param username If it is null, the original username remains unchanged.
* @param groupname If it is null, the original groupname remains unchanged.
*/
@Override
public void setOwner(Path p, String username, String groupname) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, PutOpValues.SETOWNER.toString());
params.put(OWNER_PARAM, username);
params.put(GROUP_PARAM, groupname);
HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Set permission of a path.
*
* @param p path.
* @param permission permission.
*/
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, PutOpValues.SETPERMISSION.toString());
params.put(PERMISSION_PARAM, permissionToString(permission));
HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Set access time of a file
*
* @param p The path
* @param mtime Set the modification time of this file.
* The number of milliseconds since Jan 1, 1970.
* A value of -1 means that this call should not set modification time.
* @param atime Set the access time of this file.
* The number of milliseconds since Jan 1, 1970.
* A value of -1 means that this call should not set access time.
*/
@Override
public void setTimes(Path p, long mtime, long atime) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, PutOpValues.SETTIMES.toString());
params.put(MODIFICATION_TIME_PARAM, Long.toString(mtime));
params.put(ACCESS_TIME_PARAM, Long.toString(atime));
HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
}
/**
* Set replication for an existing file.
*
* @param src file name
* @param replication new replication
*
* @return true if successful;
* false if file does not exist or is a directory
*
* @throws IOException
*/
@Override
public boolean setReplication(Path src, short replication) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, PutOpValues.SETREPLICATION.toString());
params.put(REPLICATION_PARAM, Short.toString(replication));
HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return (Boolean) json.get(SET_REPLICATION_JSON);
}
/**
* Creates a <code>FileStatus</code> object using a JSON file-status payload
* received from a HttpFSServer server.
*
* @param json a JSON file-status payload received from a HttpFSServer server
*
* @return the corresponding <code>FileStatus</code>
*/
private FileStatus createFileStatus(Path parent, JSONObject json) {
String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
long len = (Long) json.get(LENGTH_JSON);
String owner = (String) json.get(OWNER_JSON);
String group = (String) json.get(GROUP_JSON);
FsPermission permission =
new FsPermission(Short.parseShort((String) json.get(PERMISSION_JSON), 8));
long aTime = (Long) json.get(ACCESS_TIME_JSON);
long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
FileStatus fileStatus = null;
switch (type) {
case FILE:
case DIRECTORY:
fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY),
replication, blockSize, mTime, aTime,
permission, owner, group, path);
break;
case SYMLINK:
Path symLink = null;
fileStatus = new FileStatus(len, false,
replication, blockSize, mTime, aTime,
permission, owner, group, symLink,
path);
}
return fileStatus;
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, GetOpValues.GETCONTENTSUMMARY.toString());
HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
(Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
(Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
(Long) json.get(CONTENT_SUMMARY_QUOTA_JSON),
(Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON),
(Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)
);
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, GetOpValues.GETFILECHECKSUM.toString());
HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
final JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
return new FileChecksum() {
@Override
public String getAlgorithmName() {
return (String) json.get(CHECKSUM_ALGORITHM_JSON);
}
@Override
public int getLength() {
return ((Long) json.get(CHECKSUM_LENGTH_JSON)).intValue();
}
@Override
public byte[] getBytes() {
return StringUtils.hexStringToByte((String) json.get(CHECKSUM_BYTES_JSON));
}
@Override
public void write(DataOutput out) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
}
};
}
}

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
/**
* A <code>KerberosAuthenticator</code> subclass that fallback to
* {@link HttpPseudoAuthenticator}.
*/
public class HttpKerberosAuthenticator extends KerberosAuthenticator {
/**
* Returns the fallback authenticator if the server does not use
* Kerberos SPNEGO HTTP authentication.
*
* @return a {@link HttpPseudoAuthenticator} instance.
*/
@Override
protected Authenticator getFallBackAuthenticator() {
return new HttpPseudoAuthenticator();
}
}

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import java.io.IOException;
/**
* A <code>PseudoAuthenticator</code> subclass that uses FileSystemAccess's
* <code>UserGroupInformation</code> to obtain the client user name (the UGI's login user).
*/
public class HttpPseudoAuthenticator extends PseudoAuthenticator {
/**
* Return the client user name.
*
* @return the client user name.
*/
@Override
protected String getUserName() {
try {
return UserGroupInformation.getLoginUser().getUserName();
} catch (IOException ex) {
throw new SecurityException("Could not obtain current user, " + ex.getMessage(), ex);
}
}
}

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import javax.servlet.FilterConfig;
import java.util.Map;
import java.util.Properties;
/**
* Subclass of Alfredo's <code>AuthenticationFilter</code> that obtains its configuration
* from HttpFSServer's server configuration.
*/
public class AuthFilter extends AuthenticationFilter {
private static final String CONF_PREFIX = "httpfs.authentication.";
/**
* Returns the Alfredo configuration from HttpFSServer's configuration.
* <p/>
* It returns all HttpFSServer's configuration properties prefixed with
* <code>httpfs.authentication</code>. The <code>httpfs.authentication</code>
* prefix is removed from the returned property names.
*
* @param configPrefix parameter not used.
* @param filterConfig parameter not used.
*
* @return Alfredo configuration read from HttpFSServer's configuration.
*/
@Override
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) {
Properties props = new Properties();
Configuration conf = HttpFSServerWebApp.get().getConfig();
props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(CONF_PREFIX)) {
String value = conf.get(name);
name = name.substring(CONF_PREFIX.length());
props.setProperty(name, value);
}
}
return props;
}
}

View File

@ -0,0 +1,717 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.GlobFilter;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* FileSystem operation executors used by {@link HttpFSServer}.
*/
public class FSOperations {
/**
* Converts a Unix permission octal & symbolic representation
* (i.e. 655 or -rwxr--r--) into a FileSystemAccess permission.
*
* @param str Unix permission symbolic representation.
*
* @return the FileSystemAccess permission. If the given string was
* 'default', it returns <code>FsPermission.getDefault()</code>.
*/
private static FsPermission getPermission(String str) {
FsPermission permission;
if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) {
permission = FsPermission.getDefault();
} else if (str.length() == 3) {
permission = new FsPermission(Short.parseShort(str, 8));
} else {
permission = FsPermission.valueOf(str);
}
return permission;
}
@SuppressWarnings({"unchecked", "deprecation"})
private static Map fileStatusToJSONRaw(FileStatus status, boolean emptyPathSuffix) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.PATH_SUFFIX_JSON, (emptyPathSuffix) ? "" : status.getPath().getName());
json.put(HttpFSFileSystem.TYPE_JSON, HttpFSFileSystem.FILE_TYPE.getType(status).toString());
json.put(HttpFSFileSystem.LENGTH_JSON, status.getLen());
json.put(HttpFSFileSystem.OWNER_JSON, status.getOwner());
json.put(HttpFSFileSystem.GROUP_JSON, status.getGroup());
json.put(HttpFSFileSystem.PERMISSION_JSON, HttpFSFileSystem.permissionToString(status.getPermission()));
json.put(HttpFSFileSystem.ACCESS_TIME_JSON, status.getAccessTime());
json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON, status.getModificationTime());
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, status.getBlockSize());
json.put(HttpFSFileSystem.REPLICATION_JSON, status.getReplication());
return json;
}
/**
* Converts a FileSystemAccess <code>FileStatus</code> object into a JSON
* object.
*
* @param status FileSystemAccess file status.
*
* @return The JSON representation of the file status.
*/
@SuppressWarnings({"unchecked", "deprecation"})
private static Map fileStatusToJSON(FileStatus status) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.FILE_STATUS_JSON, fileStatusToJSONRaw(status, true));
return json;
}
/**
* Converts a <code>FileChecksum</code> object into a JSON array
* object.
*
* @param checksum file checksum.
*
* @return The JSON representation of the file checksum.
*/
@SuppressWarnings({"unchecked"})
private static Map fileChecksumToJSON(FileChecksum checksum) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CHECKSUM_ALGORITHM_JSON, checksum.getAlgorithmName());
json.put(HttpFSFileSystem.CHECKSUM_BYTES_JSON,
org.apache.hadoop.util.StringUtils.byteToHexString(checksum.getBytes()));
json.put(HttpFSFileSystem.CHECKSUM_LENGTH_JSON, checksum.getLength());
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.FILE_CHECKSUM_JSON, json);
return response;
}
/**
* Converts a <code>ContentSummary</code> object into a JSON array
* object.
*
* @param contentSummary the content summary
*
* @return The JSON representation of the content summary.
*/
@SuppressWarnings({"unchecked"})
private static Map contentSummaryToJSON(ContentSummary contentSummary) {
Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota());
Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
return response;
}
/**
* Converts a FileSystemAccess <code>FileStatus</code> array into a JSON array
* object.
*
* @param status FileSystemAccess file status array.
* <code>SCHEME://HOST:PORT</code> in the file status.
*
* @return The JSON representation of the file status array.
*/
@SuppressWarnings("unchecked")
private static Map fileStatusToJSON(FileStatus[] status) {
JSONArray json = new JSONArray();
if (status != null) {
for (FileStatus s : status) {
json.add(fileStatusToJSONRaw(s, false));
}
}
Map response = new LinkedHashMap();
Map temp = new LinkedHashMap();
temp.put(HttpFSFileSystem.FILE_STATUS_JSON, json);
response.put(HttpFSFileSystem.FILE_STATUSES_JSON, temp);
return response;
}
/**
* Converts an object into a Json Map with with one key-value entry.
* <p/>
* It assumes the given value is either a JSON primitive type or a
* <code>JsonAware</code> instance.
*
* @param name name for the key of the entry.
* @param value for the value of the entry.
*
* @return the JSON representation of the key-value pair.
*/
@SuppressWarnings("unchecked")
private static JSONObject toJSON(String name, Object value) {
JSONObject json = new JSONObject();
json.put(name, value);
return json;
}
/**
* Executor that performs an append FileSystemAccess files system operation.
*/
public static class FSAppend implements FileSystemAccess.FileSystemExecutor<Void> {
private InputStream is;
private Path path;
/**
* Creates an Append executor.
*
* @param is input stream to append.
* @param path path of the file to append.
*/
public FSAppend(InputStream is, String path) {
this.is = is;
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
OutputStream os = fs.append(path, bufferSize);
IOUtils.copyBytes(is, os, bufferSize, true);
os.close();
return null;
}
}
/**
* Executor that performs a content-summary FileSystemAccess files system operation.
*/
public static class FSContentSummary implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates a content-summary executor.
*
* @param path the path to retrieve the content-summary.
*/
public FSContentSummary(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the content-summary.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
ContentSummary contentSummary = fs.getContentSummary(path);
return contentSummaryToJSON(contentSummary);
}
}
/**
* Executor that performs a create FileSystemAccess files system operation.
*/
public static class FSCreate implements FileSystemAccess.FileSystemExecutor<Void> {
private InputStream is;
private Path path;
private String permission;
private boolean override;
private short replication;
private long blockSize;
/**
* Creates a Create executor.
*
* @param is input stream to for the file to create.
* @param path path of the file to create.
* @param perm permission for the file.
* @param override if the file should be overriden if it already exist.
* @param repl the replication factor for the file.
* @param blockSize the block size for the file.
*/
public FSCreate(InputStream is, String path, String perm, boolean override, short repl, long blockSize) {
this.is = is;
this.path = new Path(path);
this.permission = perm;
this.override = override;
this.replication = repl;
this.blockSize = blockSize;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return The URI of the created file.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
if (replication == -1) {
replication = (short) fs.getConf().getInt("dfs.replication", 3);
}
if (blockSize == -1) {
blockSize = fs.getConf().getInt("dfs.block.size", 67108864);
}
FsPermission fsPermission = getPermission(permission);
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
IOUtils.copyBytes(is, os, bufferSize, true);
os.close();
return null;
}
}
/**
* Executor that performs a delete FileSystemAccess files system operation.
*/
public static class FSDelete implements FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private boolean recursive;
/**
* Creates a Delete executor.
*
* @param path path to delete.
* @param recursive if the delete should be recursive or not.
*/
public FSDelete(String path, boolean recursive) {
this.path = new Path(path);
this.recursive = recursive;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the delete operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
boolean deleted = fs.delete(path, recursive);
return toJSON(HttpFSFileSystem.DELETE_JSON.toLowerCase(), deleted);
}
}
/**
* Executor that performs a file-checksum FileSystemAccess files system operation.
*/
public static class FSFileChecksum implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates a file-checksum executor.
*
* @param path the path to retrieve the checksum.
*/
public FSFileChecksum(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the file checksum.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
FileChecksum checksum = fs.getFileChecksum(path);
return fileChecksumToJSON(checksum);
}
}
/**
* Executor that performs a file-status FileSystemAccess files system operation.
*/
public static class FSFileStatus implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
/**
* Creates a file-status executor.
*
* @param path the path to retrieve the status.
*/
public FSFileStatus(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map object (JSON friendly) with the file status.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
FileStatus status = fs.getFileStatus(path);
return fileStatusToJSON(status);
}
}
/**
* Executor that performs a home-dir FileSystemAccess files system operation.
*/
public static class FSHomeDir implements FileSystemAccess.FileSystemExecutor<JSONObject> {
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a JSON object with the user home directory.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
@SuppressWarnings("unchecked")
public JSONObject execute(FileSystem fs) throws IOException {
Path homeDir = fs.getHomeDirectory();
JSONObject json = new JSONObject();
json.put(HttpFSFileSystem.HOME_DIR_JSON, homeDir.toUri().getPath());
return json;
}
}
/**
* Executor that performs a list-status FileSystemAccess files system operation.
*/
public static class FSListStatus implements FileSystemAccess.FileSystemExecutor<Map>, PathFilter {
private Path path;
private PathFilter filter;
/**
* Creates a list-status executor.
*
* @param path the directory to retrieve the status of its contents.
* @param filter glob filter to use.
*
* @throws IOException thrown if the filter expression is incorrect.
*/
public FSListStatus(String path, String filter) throws IOException {
this.path = new Path(path);
this.filter = (filter == null) ? this : new GlobFilter(filter);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return a Map with the file status of the directory
* contents.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Map execute(FileSystem fs) throws IOException {
FileStatus[] status = fs.listStatus(path, filter);
return fileStatusToJSON(status);
}
@Override
public boolean accept(Path path) {
return true;
}
}
/**
* Executor that performs a mkdirs FileSystemAccess files system operation.
*/
public static class FSMkdirs implements FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private String permission;
/**
* Creates a mkdirs executor.
*
* @param path directory path to create.
* @param permission permission to use.
*/
public FSMkdirs(String path, String permission) {
this.path = new Path(path);
this.permission = permission;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the mkdirs operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
FsPermission fsPermission = getPermission(permission);
boolean mkdirs = fs.mkdirs(path, fsPermission);
return toJSON(HttpFSFileSystem.MKDIRS_JSON, mkdirs);
}
}
/**
* Executor that performs a open FileSystemAccess files system operation.
*/
public static class FSOpen implements FileSystemAccess.FileSystemExecutor<InputStream> {
private Path path;
/**
* Creates a open executor.
*
* @param path file to open.
*/
public FSOpen(String path) {
this.path = new Path(path);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return The inputstream of the file.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public InputStream execute(FileSystem fs) throws IOException {
int bufferSize = HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096);
return fs.open(path, bufferSize);
}
}
/**
* Executor that performs a rename FileSystemAccess files system operation.
*/
public static class FSRename implements FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private Path toPath;
/**
* Creates a rename executor.
*
* @param path path to rename.
* @param toPath new name.
*/
public FSRename(String path, String toPath) {
this.path = new Path(path);
this.toPath = new Path(toPath);
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the rename operation was successful,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public JSONObject execute(FileSystem fs) throws IOException {
boolean renamed = fs.rename(path, toPath);
return toJSON(HttpFSFileSystem.RENAME_JSON, renamed);
}
}
/**
* Executor that performs a set-owner FileSystemAccess files system operation.
*/
public static class FSSetOwner implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private String owner;
private String group;
/**
* Creates a set-owner executor.
*
* @param path the path to set the owner.
* @param owner owner to set.
* @param group group to set.
*/
public FSSetOwner(String path, String owner, String group) {
this.path = new Path(path);
this.owner = owner;
this.group = group;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.setOwner(path, owner, group);
return null;
}
}
/**
* Executor that performs a set-permission FileSystemAccess files system operation.
*/
public static class FSSetPermission implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private String permission;
/**
* Creates a set-permission executor.
*
* @param path path to set the permission.
* @param permission permission to set.
*/
public FSSetPermission(String path, String permission) {
this.path = new Path(path);
this.permission = permission;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
FsPermission fsPermission = getPermission(permission);
fs.setPermission(path, fsPermission);
return null;
}
}
/**
* Executor that performs a set-replication FileSystemAccess files system operation.
*/
public static class FSSetReplication implements FileSystemAccess.FileSystemExecutor<JSONObject> {
private Path path;
private short replication;
/**
* Creates a set-replication executor.
*
* @param path path to set the replication factor.
* @param replication replication factor to set.
*/
public FSSetReplication(String path, short replication) {
this.path = new Path(path);
this.replication = replication;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return <code>true</code> if the replication value was set,
* <code>false</code> otherwise.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
@SuppressWarnings("unchecked")
public JSONObject execute(FileSystem fs) throws IOException {
boolean ret = fs.setReplication(path, replication);
JSONObject json = new JSONObject();
json.put(HttpFSFileSystem.SET_REPLICATION_JSON, ret);
return json;
}
}
/**
* Executor that performs a set-times FileSystemAccess files system operation.
*/
public static class FSSetTimes implements FileSystemAccess.FileSystemExecutor<Void> {
private Path path;
private long mTime;
private long aTime;
/**
* Creates a set-times executor.
*
* @param path path to set the times.
* @param mTime modified time to set.
* @param aTime access time to set.
*/
public FSSetTimes(String path, long mTime, long aTime) {
this.path = new Path(path);
this.mTime = mTime;
this.aTime = aTime;
}
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return void.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
fs.setTimes(path, mTime, aTime);
return null;
}
}
}

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.wsrs.ExceptionProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.Provider;
import java.io.FileNotFoundException;
import java.io.IOException;
/**
* JAX-RS <code>ExceptionMapper</code> implementation that maps HttpFSServer's
* exceptions to HTTP status codes.
*/
@Provider
public class HttpFSExceptionProvider extends ExceptionProvider {
private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
private static Logger LOG = LoggerFactory.getLogger(HttpFSExceptionProvider.class);
/**
* Maps different exceptions thrown by HttpFSServer to HTTP status codes.
* <p/>
* <ul>
* <li>SecurityException : HTTP UNAUTHORIZED</li>
* <li>FileNotFoundException : HTTP NOT_FOUND</li>
* <li>IOException : INTERNAL_HTTP SERVER_ERROR</li>
* <li>UnsupporteOperationException : HTTP BAD_REQUEST</li>
* <li>all other exceptions : HTTP INTERNAL_SERVER_ERROR </li>
* </ul>
*
* @param throwable exception thrown.
*
* @return mapped HTTP status code
*/
@Override
public Response toResponse(Throwable throwable) {
Response.Status status;
if (throwable instanceof FileSystemAccessException) {
throwable = throwable.getCause();
}
if (throwable instanceof SecurityException) {
status = Response.Status.UNAUTHORIZED;
} else if (throwable instanceof FileNotFoundException) {
status = Response.Status.NOT_FOUND;
} else if (throwable instanceof IOException) {
status = Response.Status.INTERNAL_SERVER_ERROR;
} else if (throwable instanceof UnsupportedOperationException) {
status = Response.Status.BAD_REQUEST;
} else {
status = Response.Status.INTERNAL_SERVER_ERROR;
}
return createResponse(status, throwable);
}
/**
* Logs the HTTP status code and exception in HttpFSServer's log.
*
* @param status HTTP status code.
* @param throwable exception thrown.
*/
@Override
protected void log(Response.Status status, Throwable throwable) {
String method = MDC.get("method");
String path = MDC.get("path");
String message = getOneLineMessage(throwable);
AUDIT_LOG.warn("FAILED [{}:{}] response [{}] {}", new Object[]{method, path, status, message});
LOG.warn("[{}:{}] response [{}] {}", new Object[]{method, path, status, message, throwable});
}
}

View File

@ -0,0 +1,536 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.lib.wsrs.BooleanParam;
import org.apache.hadoop.lib.wsrs.EnumParam;
import org.apache.hadoop.lib.wsrs.LongParam;
import org.apache.hadoop.lib.wsrs.ShortParam;
import org.apache.hadoop.lib.wsrs.StringParam;
import org.apache.hadoop.lib.wsrs.UserProvider;
import org.slf4j.MDC;
import java.util.regex.Pattern;
/**
* HttpFS HTTP Parameters used by {@link HttpFSServer}.
*/
public class HttpFSParams {
/**
* To avoid instantiation.
*/
private HttpFSParams() {
}
/**
* Class for access-time parameter.
*/
public static class AccessTimeParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "-1";
/**
* Constructor.
*
* @param str parameter value.
*/
public AccessTimeParam(String str) {
super(NAME, str);
}
}
/**
* Class for block-size parameter.
*/
public static class BlockSizeParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "-1";
/**
* Constructor.
*
* @param str parameter value.
*/
public BlockSizeParam(String str) {
super(NAME, str);
}
}
/**
* Class for data parameter.
*/
public static class DataParam extends BooleanParam {
/**
* Parameter name.
*/
public static final String NAME = "data";
/**
* Default parameter value.
*/
public static final String DEFAULT = "false";
/**
* Constructor.
*
* @param str parameter value.
*/
public DataParam(String str) {
super(NAME, str);
}
}
/**
* Class for DELETE operation parameter.
*/
public static class DeleteOpParam extends EnumParam<HttpFSFileSystem.DeleteOpValues> {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OP_PARAM;
/**
* Constructor.
*
* @param str parameter value.
*/
public DeleteOpParam(String str) {
super(NAME, str, HttpFSFileSystem.DeleteOpValues.class);
}
}
/**
* Class for delete's recursive parameter.
*/
public static class DeleteRecursiveParam extends BooleanParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "false";
/**
* Constructor.
*
* @param str parameter value.
*/
public DeleteRecursiveParam(String str) {
super(NAME, str);
}
}
/**
* Class for do-as parameter.
*/
public static class DoAsParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "";
/**
* Constructor.
*
* @param str parameter value.
*/
public DoAsParam(String str) {
super(NAME, str, UserProvider.USER_PATTERN);
}
/**
* Delegates to parent and then adds do-as user to
* MDC context for logging purposes.
*
* @param name parameter name.
* @param str parameter value.
*
* @return parsed parameter
*/
@Override
public String parseParam(String name, String str) {
String doAs = super.parseParam(name, str);
MDC.put(NAME, (doAs != null) ? doAs : "-");
return doAs;
}
}
/**
* Class for filter parameter.
*/
public static class FilterParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = "filter";
/**
* Default parameter value.
*/
public static final String DEFAULT = "";
/**
* Constructor.
*
* @param expr parameter value.
*/
public FilterParam(String expr) {
super(NAME, expr);
}
}
/**
* Class for path parameter.
*/
public static class FsPathParam extends StringParam {
/**
* Constructor.
*
* @param path parameter value.
*/
public FsPathParam(String path) {
super("path", path);
}
/**
* Makes the path absolute adding '/' to it.
* <p/>
* This is required because JAX-RS resolution of paths does not add
* the root '/'.
*
* @returns absolute path.
*/
public void makeAbsolute() {
String path = value();
path = "/" + ((path != null) ? path : "");
setValue(path);
}
}
/**
* Class for GET operation parameter.
*/
public static class GetOpParam extends EnumParam<HttpFSFileSystem.GetOpValues> {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OP_PARAM;
/**
* Constructor.
*
* @param str parameter value.
*/
public GetOpParam(String str) {
super(NAME, str, HttpFSFileSystem.GetOpValues.class);
}
}
/**
* Class for group parameter.
*/
public static class GroupParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "";
/**
* Constructor.
*
* @param str parameter value.
*/
public GroupParam(String str) {
super(NAME, str, UserProvider.USER_PATTERN);
}
}
/**
* Class for len parameter.
*/
public static class LenParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = "len";
/**
* Default parameter value.
*/
public static final String DEFAULT = "-1";
/**
* Constructor.
*
* @param str parameter value.
*/
public LenParam(String str) {
super(NAME, str);
}
}
/**
* Class for modified-time parameter.
*/
public static class ModifiedTimeParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "-1";
/**
* Constructor.
*
* @param str parameter value.
*/
public ModifiedTimeParam(String str) {
super(NAME, str);
}
}
/**
* Class for offset parameter.
*/
public static class OffsetParam extends LongParam {
/**
* Parameter name.
*/
public static final String NAME = "offset";
/**
* Default parameter value.
*/
public static final String DEFAULT = "0";
/**
* Constructor.
*
* @param str parameter value.
*/
public OffsetParam(String str) {
super(NAME, str);
}
}
/**
* Class for overwrite parameter.
*/
public static class OverwriteParam extends BooleanParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "true";
/**
* Constructor.
*
* @param str parameter value.
*/
public OverwriteParam(String str) {
super(NAME, str);
}
}
/**
* Class for owner parameter.
*/
public static class OwnerParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "";
/**
* Constructor.
*
* @param str parameter value.
*/
public OwnerParam(String str) {
super(NAME, str, UserProvider.USER_PATTERN);
}
}
/**
* Class for permission parameter.
*/
public static class PermissionParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = HttpFSFileSystem.DEFAULT_PERMISSION;
/**
* Symbolic Unix permissions regular expression pattern.
*/
private static final Pattern PERMISSION_PATTERN =
Pattern.compile(DEFAULT + "|(-[-r][-w][-x][-r][-w][-x][-r][-w][-x])" + "|[0-7][0-7][0-7]");
/**
* Constructor.
*
* @param permission parameter value.
*/
public PermissionParam(String permission) {
super(NAME, permission.toLowerCase(), PERMISSION_PATTERN);
}
}
/**
* Class for POST operation parameter.
*/
public static class PostOpParam extends EnumParam<HttpFSFileSystem.PostOpValues> {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OP_PARAM;
/**
* Constructor.
*
* @param str parameter value.
*/
public PostOpParam(String str) {
super(NAME, str, HttpFSFileSystem.PostOpValues.class);
}
}
/**
* Class for PUT operation parameter.
*/
public static class PutOpParam extends EnumParam<HttpFSFileSystem.PutOpValues> {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.OP_PARAM;
/**
* Constructor.
*
* @param str parameter value.
*/
public PutOpParam(String str) {
super(NAME, str, HttpFSFileSystem.PutOpValues.class);
}
}
/**
* Class for replication parameter.
*/
public static class ReplicationParam extends ShortParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "-1";
/**
* Constructor.
*
* @param str parameter value.
*/
public ReplicationParam(String str) {
super(NAME, str);
}
}
/**
* Class for to-path parameter.
*/
public static class ToPathParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
/**
* Default parameter value.
*/
public static final String DEFAULT = "";
/**
* Constructor.
*
* @param path parameter value.
*/
public ToPathParam(String path) {
super(NAME, path);
}
}
}

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
/**
* Filter that releases FileSystemAccess filesystem instances upon HTTP request
* completion.
*/
public class HttpFSReleaseFilter extends FileSystemReleaseFilter {
/**
* Returns the {@link FileSystemAccess} service to return the FileSystemAccess filesystem
* instance to.
*
* @return the FileSystemAccess service.
*/
@Override
protected FileSystemAccess getFileSystemAccess() {
return HttpFSServerWebApp.get().get(FileSystemAccess.class);
}
}

View File

@ -0,0 +1,604 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.server.HttpFSParams.AccessTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.BlockSizeParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.DataParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteOpParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteRecursiveParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.DoAsParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.FilterParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.FsPathParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.GetOpParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.GroupParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.LenParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.ModifiedTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.OffsetParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.OverwriteParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.OwnerParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.PermissionParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.PostOpParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.PutOpParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.ReplicationParam;
import org.apache.hadoop.fs.http.server.HttpFSParams.ToPathParam;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.ProxyUser;
import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
import org.apache.hadoop.lib.servlet.HostnameFilter;
import org.apache.hadoop.lib.wsrs.InputStreamEntity;
import org.json.simple.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.security.AccessControlException;
import java.security.Principal;
import java.text.MessageFormat;
import java.util.List;
import java.util.Map;
/**
* Main class of HttpFSServer server.
* <p/>
* The <code>HttpFSServer</code> class uses Jersey JAX-RS to binds HTTP requests to the
* different operations.
*/
@Path(HttpFSFileSystem.SERVICE_VERSION)
public class HttpFSServer {
private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
/**
* Special binding for '/' as it is not handled by the wildcard binding.
*
* @param user principal making the request.
* @param op GET operation, default value is {@link HttpFSFileSystem.GetOpValues#OPEN}.
* @param filter Glob filter, default value is none. Used only if the
* operation is {@link HttpFSFileSystem.GetOpValues#LISTSTATUS}
* @param doAs user being impersonated, defualt value is none. It can be used
* only if the current user is a HttpFSServer proxyuser.
*
* @return the request response
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
@GET
@Path("/")
@Produces(MediaType.APPLICATION_JSON)
public Response root(@Context Principal user,
@QueryParam(GetOpParam.NAME) GetOpParam op,
@QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
throws IOException, FileSystemAccessException {
return get(user, new FsPathParam(""), op, new OffsetParam(OffsetParam.DEFAULT),
new LenParam(LenParam.DEFAULT), filter, doAs,
new OverwriteParam(OverwriteParam.DEFAULT),
new BlockSizeParam(BlockSizeParam.DEFAULT),
new PermissionParam(PermissionParam.DEFAULT),
new ReplicationParam(ReplicationParam.DEFAULT));
}
/**
* Resolves the effective user that will be used to request a FileSystemAccess filesystem.
* <p/>
* If the doAs-user is NULL or the same as the user, it returns the user.
* <p/>
* Otherwise it uses proxyuser rules (see {@link ProxyUser} to determine if the
* current user can impersonate the doAs-user.
* <p/>
* If the current user cannot impersonate the doAs-user an
* <code>AccessControlException</code> will be thrown.
*
* @param user principal for whom the filesystem instance is.
* @param doAs do-as user, if any.
*
* @return the effective user.
*
* @throws IOException thrown if an IO error occurrs.
* @throws AccessControlException thrown if the current user cannot impersonate
* the doAs-user.
*/
private String getEffectiveUser(Principal user, String doAs) throws IOException {
String effectiveUser = user.getName();
if (doAs != null && !doAs.equals(user.getName())) {
ProxyUser proxyUser = HttpFSServerWebApp.get().get(ProxyUser.class);
proxyUser.validate(user.getName(), HostnameFilter.get(), doAs);
effectiveUser = doAs;
AUDIT_LOG.info("Proxy user [{}] DoAs user [{}]", user.getName(), doAs);
}
return effectiveUser;
}
/**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem for the effective
* user.
*
* @param user principal making the request.
* @param doAs do-as user, if any.
* @param executor FileSystemExecutor to execute.
*
* @return FileSystemExecutor response
*
* @throws IOException thrown if an IO error occurrs.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
private <T> T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystemExecutor<T> executor)
throws IOException, FileSystemAccessException {
String hadoopUser = getEffectiveUser(user, doAs);
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration();
return fsAccess.execute(hadoopUser, conf, executor);
}
/**
* Returns a filesystem instance. The fileystem instance is wired for release at the completion of
* the current Servlet request via the {@link FileSystemReleaseFilter}.
* <p/>
* If a do-as user is specified, the current user must be a valid proxyuser, otherwise an
* <code>AccessControlException</code> will be thrown.
*
* @param user principal for whom the filesystem instance is.
* @param doAs do-as user, if any.
*
* @return a filesystem for the specified user or do-as user.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
private FileSystem createFileSystem(Principal user, String doAs) throws IOException, FileSystemAccessException {
String hadoopUser = getEffectiveUser(user, doAs);
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration();
FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf);
FileSystemReleaseFilter.setFileSystem(fs);
return fs;
}
/**
* Binding to handle all GET requests, supported operations are
* {@link HttpFSFileSystem.GetOpValues}.
* <p/>
* The {@link HttpFSFileSystem.GetOpValues#INSTRUMENTATION} operation is available only
* to users that are in HttpFSServer's admin group (see {@link HttpFSServer}. It returns
* HttpFSServer instrumentation data. The specified path must be '/'.
*
* @param user principal making the request.
* @param path path for the GET request.
* @param op GET operation, default value is {@link HttpFSFileSystem.GetOpValues#OPEN}.
* @param offset of the file being fetch, used only with
* {@link HttpFSFileSystem.GetOpValues#OPEN} operations.
* @param len amounts of bytes, used only with {@link HttpFSFileSystem.GetOpValues#OPEN}
* operations.
* @param filter Glob filter, default value is none. Used only if the
* operation is {@link HttpFSFileSystem.GetOpValues#LISTSTATUS}
* @param doAs user being impersonated, defualt value is none. It can be used
* only if the current user is a HttpFSServer proxyuser.
* @param override, default is true. Used only for
* {@link HttpFSFileSystem.PutOpValues#CREATE} operations.
* @param blockSize block size to set, used only by
* {@link HttpFSFileSystem.PutOpValues#CREATE} operations.
* @param permission permission to set, used only by
* {@link HttpFSFileSystem.PutOpValues#SETPERMISSION}.
* @param replication replication factor to set, used only by
* {@link HttpFSFileSystem.PutOpValues#SETREPLICATION}.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
@GET
@Path("{path:.*}")
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response get(@Context Principal user,
@PathParam("path") @DefaultValue("") FsPathParam path,
@QueryParam(GetOpParam.NAME) GetOpParam op,
@QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) OffsetParam offset,
@QueryParam(LenParam.NAME) @DefaultValue(LenParam.DEFAULT) LenParam len,
@QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs,
//these params are only for createHandle operation acceptance purposes
@QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
@QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
@QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
PermissionParam permission,
@QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
ReplicationParam replication
)
throws IOException, FileSystemAccessException {
Response response = null;
if (op == null) {
throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", GetOpParam.NAME));
} else {
path.makeAbsolute();
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
switch (op.value()) {
case OPEN: {
//Invoking the command directly using an unmanaged FileSystem that is released by the
//FileSystemReleaseFilter
FSOperations.FSOpen command = new FSOperations.FSOpen(path.value());
FileSystem fs = createFileSystem(user, doAs.value());
InputStream is = command.execute(fs);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[]{path, offset, len});
InputStreamEntity entity = new InputStreamEntity(is, offset.value(), len.value());
response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
break;
}
case GETFILESTATUS: {
FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path.value());
Map json = fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS: {
FSOperations.FSListStatus command = new FSOperations.FSListStatus(path.value(), filter.value());
Map json = fsExecute(user, doAs.value(), command);
if (filter.value() == null) {
AUDIT_LOG.info("[{}]", path);
} else {
AUDIT_LOG.info("[{}] filter [{}]", path, filter.value());
}
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETHOMEDIR: {
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case INSTRUMENTATION: {
if (!path.value().equals("/")) {
throw new UnsupportedOperationException(
MessageFormat.format("Invalid path for {0}={1}, must be '/'",
GetOpParam.NAME, HttpFSFileSystem.GetOpValues.INSTRUMENTATION));
}
Groups groups = HttpFSServerWebApp.get().get(Groups.class);
List<String> userGroups = groups.getGroups(user.getName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException("User not in HttpFSServer admin group");
}
Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
Map snapshot = instrumentation.getSnapshot();
response = Response.ok(snapshot).build();
break;
}
case GETCONTENTSUMMARY: {
FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path.value());
Map json = fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM: {
FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path.value());
Map json = fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETDELEGATIONTOKEN: {
response = Response.status(Response.Status.BAD_REQUEST).build();
break;
}
case GETFILEBLOCKLOCATIONS: {
response = Response.status(Response.Status.BAD_REQUEST).build();
break;
}
}
return response;
}
}
/**
* Creates the URL for an upload operation (create or append).
*
* @param uriInfo uri info of the request.
* @param uploadOperation operation for the upload URL.
*
* @return the URI for uploading data.
*/
protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum<?> uploadOperation) {
UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
uriBuilder = uriBuilder.replaceQueryParam(PutOpParam.NAME, uploadOperation).
queryParam(DataParam.NAME, Boolean.TRUE);
return uriBuilder.build(null);
}
/**
* Binding to handle all DELETE requests.
*
* @param user principal making the request.
* @param path path for the DELETE request.
* @param op DELETE operation, default value is {@link HttpFSFileSystem.DeleteOpValues#DELETE}.
* @param recursive indicates if the delete is recursive, default is <code>false</code>
* @param doAs user being impersonated, defualt value is none. It can be used
* only if the current user is a HttpFSServer proxyuser.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
@DELETE
@Path("{path:.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(@Context Principal user,
@PathParam("path") FsPathParam path,
@QueryParam(DeleteOpParam.NAME) DeleteOpParam op,
@QueryParam(DeleteRecursiveParam.NAME) @DefaultValue(DeleteRecursiveParam.DEFAULT)
DeleteRecursiveParam recursive,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
throws IOException, FileSystemAccessException {
Response response = null;
if (op == null) {
throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", DeleteOpParam.NAME));
}
switch (op.value()) {
case DELETE: {
path.makeAbsolute();
MDC.put(HttpFSFileSystem.OP_PARAM, "DELETE");
AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
FSOperations.FSDelete command = new FSOperations.FSDelete(path.value(), recursive.value());
JSONObject json = fsExecute(user, doAs.value(), command);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
}
return response;
}
/**
* Binding to handle all PUT requests, supported operations are
* {@link HttpFSFileSystem.PutOpValues}.
*
* @param is request input stream, used only for
* {@link HttpFSFileSystem.PostOpValues#APPEND} operations.
* @param user principal making the request.
* @param uriInfo the request uriInfo.
* @param path path for the PUT request.
* @param op PUT operation, no default value.
* @param toPath new path, used only for
* {@link HttpFSFileSystem.PutOpValues#RENAME} operations.
* {@link HttpFSFileSystem.PutOpValues#SETTIMES}.
* @param owner owner to set, used only for
* {@link HttpFSFileSystem.PutOpValues#SETOWNER} operations.
* @param group group to set, used only for
* {@link HttpFSFileSystem.PutOpValues#SETOWNER} operations.
* @param override, default is true. Used only for
* {@link HttpFSFileSystem.PutOpValues#CREATE} operations.
* @param blockSize block size to set, used only by
* {@link HttpFSFileSystem.PutOpValues#CREATE} operations.
* @param permission permission to set, used only by
* {@link HttpFSFileSystem.PutOpValues#SETPERMISSION}.
* @param replication replication factor to set, used only by
* {@link HttpFSFileSystem.PutOpValues#SETREPLICATION}.
* @param modifiedTime modified time, in seconds since EPOC, used only by
* {@link HttpFSFileSystem.PutOpValues#SETTIMES}.
* @param accessTime accessed time, in seconds since EPOC, used only by
* {@link HttpFSFileSystem.PutOpValues#SETTIMES}.
* @param hasData indicates if the append request is uploading data or not
* (just getting the handle).
* @param doAs user being impersonated, defualt value is none. It can be used
* only if the current user is a HttpFSServer proxyuser.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
@PUT
@Path("{path:.*}")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON})
public Response put(InputStream is,
@Context Principal user,
@Context UriInfo uriInfo,
@PathParam("path") FsPathParam path,
@QueryParam(PutOpParam.NAME) PutOpParam op,
@QueryParam(ToPathParam.NAME) @DefaultValue(ToPathParam.DEFAULT) ToPathParam toPath,
@QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT) OwnerParam owner,
@QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT) GroupParam group,
@QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
@QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
@QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
PermissionParam permission,
@QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
ReplicationParam replication,
@QueryParam(ModifiedTimeParam.NAME) @DefaultValue(ModifiedTimeParam.DEFAULT)
ModifiedTimeParam modifiedTime,
@QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
AccessTimeParam accessTime,
@QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
throws IOException, FileSystemAccessException {
Response response = null;
if (op == null) {
throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PutOpParam.NAME));
}
path.makeAbsolute();
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
switch (op.value()) {
case CREATE: {
if (!hasData.value()) {
response = Response.temporaryRedirect(
createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PutOpValues.CREATE)).build();
} else {
FSOperations.FSCreate
command = new FSOperations.FSCreate(is, path.value(), permission.value(), override.value(),
replication.value(), blockSize.value());
fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
new Object[]{path, permission, override, replication, blockSize});
response = Response.status(Response.Status.CREATED).build();
}
break;
}
case MKDIRS: {
FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path.value(), permission.value());
JSONObject json = fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}] permission [{}]", path, permission.value());
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case RENAME: {
FSOperations.FSRename command = new FSOperations.FSRename(path.value(), toPath.value());
JSONObject json = fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}] to [{}]", path, toPath);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case SETOWNER: {
FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path.value(), owner.value(), group.value());
fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner.value() + ":" + group.value());
response = Response.ok().build();
break;
}
case SETPERMISSION: {
FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path.value(), permission.value());
fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}] to [{}]", path, permission.value());
response = Response.ok().build();
break;
}
case SETREPLICATION: {
FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path.value(), replication.value());
JSONObject json = fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}] to [{}]", path, replication.value());
response = Response.ok(json).build();
break;
}
case SETTIMES: {
FSOperations.FSSetTimes
command = new FSOperations.FSSetTimes(path.value(), modifiedTime.value(), accessTime.value());
fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}] to (M/A)[{}]", path, modifiedTime.value() + ":" + accessTime.value());
response = Response.ok().build();
break;
}
case RENEWDELEGATIONTOKEN: {
response = Response.status(Response.Status.BAD_REQUEST).build();
break;
}
case CANCELDELEGATIONTOKEN: {
response = Response.status(Response.Status.BAD_REQUEST).build();
break;
}
}
return response;
}
/**
* Binding to handle all OPST requests, supported operations are
* {@link HttpFSFileSystem.PostOpValues}.
*
* @param is request input stream, used only for
* {@link HttpFSFileSystem.PostOpValues#APPEND} operations.
* @param user principal making the request.
* @param uriInfo the request uriInfo.
* @param path path for the POST request.
* @param op POST operation, default is {@link HttpFSFileSystem.PostOpValues#APPEND}.
* @param hasData indicates if the append request is uploading data or not (just getting the handle).
* @param doAs user being impersonated, defualt value is none. It can be used
* only if the current user is a HttpFSServer proxyuser.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}.
*/
@POST
@Path("{path:.*}")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON})
public Response post(InputStream is,
@Context Principal user,
@Context UriInfo uriInfo,
@PathParam("path") FsPathParam path,
@QueryParam(PostOpParam.NAME) PostOpParam op,
@QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
throws IOException, FileSystemAccessException {
Response response = null;
if (op == null) {
throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PostOpParam.NAME));
}
path.makeAbsolute();
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
switch (op.value()) {
case APPEND: {
if (!hasData.value()) {
response = Response.temporaryRedirect(
createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PostOpValues.APPEND)).build();
} else {
FSOperations.FSAppend command = new FSOperations.FSAppend(is, path.value());
fsExecute(user, doAs.value(), command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok().type(MediaType.APPLICATION_JSON).build();
}
break;
}
}
return response;
}
}

View File

@ -0,0 +1,126 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.ServerException;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.servlet.ServerWebApp;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
/**
* Bootstrap class that manages the initialization and destruction of the
* HttpFSServer server, it is a <code>javax.servlet.ServletContextListener</code>
* implementation that is wired in HttpFSServer's WAR <code>WEB-INF/web.xml</code>.
* <p/>
* It provides acces to the server context via the singleton {@link #get}.
* <p/>
* All the configuration is loaded from configuration properties prefixed
* with <code>httpfs.</code>.
*/
public class HttpFSServerWebApp extends ServerWebApp {
private static final Logger LOG = LoggerFactory.getLogger(HttpFSServerWebApp.class);
/**
* Server name and prefix for all configuration properties.
*/
public static final String NAME = "httpfs";
/**
* Configuration property that defines HttpFSServer admin group.
*/
public static final String CONF_ADMIN_GROUP = "admin.group";
private static HttpFSServerWebApp SERVER;
private String adminGroup;
/**
* Default constructor.
*
* @throws IOException thrown if the home/conf/log/temp directory paths
* could not be resolved.
*/
public HttpFSServerWebApp() throws IOException {
super(NAME);
}
/**
* Constructor used for testing purposes.
*/
protected HttpFSServerWebApp(String homeDir, String configDir, String logDir, String tempDir,
Configuration config) {
super(NAME, homeDir, configDir, logDir, tempDir, config);
}
/**
* Constructor used for testing purposes.
*/
public HttpFSServerWebApp(String homeDir, Configuration config) {
super(NAME, homeDir, config);
}
/**
* Initializes the HttpFSServer server, loads configuration and required services.
*
* @throws ServerException thrown if HttpFSServer server could not be initialized.
*/
@Override
public void init() throws ServerException {
super.init();
if (SERVER != null) {
throw new RuntimeException("HttpFSServer server already initialized");
}
SERVER = this;
adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin");
LOG.info("Connects to Namenode [{}]",
get().get(FileSystemAccess.class).getDefaultConfiguration().get("fs.default.name"));
}
/**
* Shutdowns all running services.
*/
@Override
public void destroy() {
SERVER = null;
super.destroy();
}
/**
* Returns HttpFSServer server singleton, configuration and services are accessible through it.
*
* @return the HttpFSServer server singleton.
*/
public static HttpFSServerWebApp get() {
return SERVER;
}
/**
* Returns HttpFSServer admin group.
*
* @return httpfs admin group.
*/
public String getAdminGroup() {
return adminGroup;
}
}

View File

@ -0,0 +1,96 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.lang;
import org.apache.hadoop.lib.util.Check;
import java.util.concurrent.Callable;
/**
* Adapter class that allows <code>Runnable</code>s and <code>Callable</code>s to
* be treated as the other.
*/
public class RunnableCallable implements Callable<Void>, Runnable {
private Runnable runnable;
private Callable<?> callable;
/**
* Constructor that takes a runnable.
*
* @param runnable runnable.
*/
public RunnableCallable(Runnable runnable) {
this.runnable = Check.notNull(runnable, "runnable");
}
/**
* Constructor that takes a callable.
*
* @param callable callable.
*/
public RunnableCallable(Callable<?> callable) {
this.callable = Check.notNull(callable, "callable");
}
/**
* Invokes the wrapped callable/runnable as a callable.
*
* @return void
*
* @throws Exception thrown by the wrapped callable/runnable invocation.
*/
@Override
public Void call() throws Exception {
if (runnable != null) {
runnable.run();
} else {
callable.call();
}
return null;
}
/**
* Invokes the wrapped callable/runnable as a runnable.
*
* @return void
*
* @throws Exception thrown by the wrapped callable/runnable invocation.
*/
@Override
public void run() {
if (runnable != null) {
runnable.run();
} else {
try {
callable.call();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
}
/**
* Returns the class name of the wrapper callable/runnable.
*
* @return the class name of the wrapper callable/runnable.
*/
public String toString() {
return (runnable != null) ? runnable.getClass().getSimpleName() : callable.getClass().getSimpleName();
}
}

View File

@ -0,0 +1,134 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.lang;
import org.apache.hadoop.lib.util.Check;
import java.text.MessageFormat;
/**
* Generic exception that requires error codes and uses the a message
* template from the error code.
*/
public class XException extends Exception {
/**
* Interface to define error codes.
*/
public static interface ERROR {
/**
* Returns the template for the error.
*
* @return the template for the error, the template must be in JDK
* <code>MessageFormat</code> syntax (using {#} positional parameters).
*/
public String getTemplate();
}
private ERROR error;
/**
* Private constructor used by the public constructors.
*
* @param error error code.
* @param message error message.
* @param cause exception cause if any.
*/
private XException(ERROR error, String message, Throwable cause) {
super(message, cause);
this.error = error;
}
/**
* Creates an XException using another XException as cause.
* <p/>
* The error code and error message are extracted from the cause.
*
* @param cause exception cause.
*/
public XException(XException cause) {
this(cause.getError(), cause.getMessage(), cause);
}
/**
* Creates an XException using the specified error code. The exception
* message is resolved using the error code template and the passed
* parameters.
*
* @param error error code for the XException.
* @param params parameters to use when creating the error message
* with the error code template.
*/
@SuppressWarnings({"ThrowableResultOfMethodCallIgnored"})
public XException(ERROR error, Object... params) {
this(Check.notNull(error, "error"), format(error, params), getCause(params));
}
/**
* Returns the error code of the exception.
*
* @return the error code of the exception.
*/
public ERROR getError() {
return error;
}
/**
* Creates a message using a error message template and arguments.
* <p/>
* The template must be in JDK <code>MessageFormat</code> syntax
* (using {#} positional parameters).
*
* @param error error code, to get the template from.
* @param args arguments to use for creating the message.
*
* @return the resolved error message.
*/
private static String format(ERROR error, Object... args) {
String template = error.getTemplate();
if (template == null) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < args.length; i++) {
sb.append(" {").append(i).append("}");
}
template = sb.deleteCharAt(0).toString();
}
return error + ": " + MessageFormat.format(error.getTemplate(), args);
}
/**
* Returns the last parameter if it is an instance of <code>Throwable</code>
* returns it else it returns NULL.
*
* @param params parameters to look for a cause.
*
* @return the last parameter if it is an instance of <code>Throwable</code>
* returns it else it returns NULL.
*/
private static Throwable getCause(Object... params) {
Throwable throwable = null;
if (params != null && params.length > 0 && params[params.length - 1] instanceof Throwable) {
throwable = (Throwable) params[params.length - 1];
}
return throwable;
}
}

View File

@ -0,0 +1,178 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.util.ConfigurationUtils;
import java.util.Map;
/**
* Convenience class implementing the {@link Service} interface.
*/
public abstract class BaseService implements Service {
private String prefix;
private Server server;
private Configuration serviceConfig;
/**
* Service constructor.
*
* @param prefix service prefix.
*/
public BaseService(String prefix) {
this.prefix = prefix;
}
/**
* Initializes the service.
* <p/>
* It collects all service properties (properties having the
* <code>#SERVER#.#SERVICE#.</code> prefix). The property names are then
* trimmed from the <code>#SERVER#.#SERVICE#.</code> prefix.
* <p/>
* After collecting the service properties it delegates to the
* {@link #init()} method.
*
* @param server the server initializing the service, give access to the
* server context.
*
* @throws ServiceException thrown if the service could not be initialized.
*/
@Override
public final void init(Server server) throws ServiceException {
this.server = server;
String servicePrefix = getPrefixedName("");
serviceConfig = new Configuration(false);
for (Map.Entry<String, String> entry : ConfigurationUtils.resolve(server.getConfig())) {
String key = entry.getKey();
if (key.startsWith(servicePrefix)) {
serviceConfig.set(key.substring(servicePrefix.length()), entry.getValue());
}
}
init();
}
/**
* Post initializes the service. This method is called by the
* {@link Server} after all services of the server have been initialized.
* <p/>
* This method does a NOP.
*
* @throws ServiceException thrown if the service could not be
* post-initialized.
*/
@Override
public void postInit() throws ServiceException {
}
/**
* Destroy the services. This method is called once, when the
* {@link Server} owning the service is being destroyed.
* <p/>
* This method does a NOP.
*/
@Override
public void destroy() {
}
/**
* Returns the service dependencies of this service. The service will be
* instantiated only if all the service dependencies are already initialized.
* <p/>
* This method returns an empty array (size 0)
*
* @return an empty array (size 0).
*/
@Override
public Class[] getServiceDependencies() {
return new Class[0];
}
/**
* Notification callback when the server changes its status.
* <p/>
* This method returns an empty array (size 0)
*
* @param oldStatus old server status.
* @param newStatus new server status.
*
* @throws ServiceException thrown if the service could not process the status change.
*/
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
}
/**
* Returns the service prefix.
*
* @return the service prefix.
*/
protected String getPrefix() {
return prefix;
}
/**
* Returns the server owning the service.
*
* @return the server owning the service.
*/
protected Server getServer() {
return server;
}
/**
* Returns the full prefixed name of a service property.
*
* @param name of the property.
*
* @return prefixed name of the property.
*/
protected String getPrefixedName(String name) {
return server.getPrefixedName(prefix + "." + name);
}
/**
* Returns the service configuration properties. Property
* names are trimmed off from its prefix.
* <p/>
* The sevice configuration properties are all properties
* with names starting with <code>#SERVER#.#SERVICE#.</code>
* in the server configuration.
*
* @return the service configuration properties with names
* trimmed off from their <code>#SERVER#.#SERVICE#.</code>
* prefix.
*/
protected Configuration getServiceConfig() {
return serviceConfig;
}
/**
* Initializes the server.
* <p/>
* This method is called by {@link #init(Server)} after all service properties
* (properties prefixed with
*
* @throws ServiceException thrown if the service could not be initialized.
*/
protected abstract void init() throws ServiceException;
}

View File

@ -0,0 +1,766 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.util.Check;
import org.apache.hadoop.lib.util.ConfigurationUtils;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* A Server class provides standard configuration, logging and {@link Service}
* lifecyle management.
* <p/>
* A Server normally has a home directory, a configuration directory, a temp
* directory and logs directory.
* <p/>
* The Server configuration is loaded from 2 overlapped files,
* <code>#SERVER#-default.xml</code> and <code>#SERVER#-site.xml</code>. The
* default file is loaded from the classpath, the site file is laoded from the
* configuration directory.
* <p/>
* The Server collects all configuration properties prefixed with
* <code>#SERVER#</code>. The property names are then trimmed from the
* <code>#SERVER#</code> prefix.
* <p/>
* The Server log configuration is loaded from the
* <code>#SERVICE#-log4j.properties</code> file in the configuration directory.
* <p/>
* The lifecycle of server is defined in by {@link Server.Status} enum.
* When a server is create, its status is UNDEF, when being initialized it is
* BOOTING, once initialization is complete by default transitions to NORMAL.
* The <code>#SERVER#.startup.status</code> configuration property can be used
* to specify a different startup status (NORMAL, ADMIN or HALTED).
* <p/>
* Services classes are defined in the <code>#SERVER#.services</code> and
* <code>#SERVER#.services.ext</code> properties. They are loaded in order
* (services first, then services.ext).
* <p/>
* Before initializing the services, they are traversed and duplicate service
* interface are removed from the service list. The last service using a given
* interface wins (this enables a simple override mechanism).
* <p/>
* After the services have been resoloved by interface de-duplication they are
* initialized in order. Once all services are initialized they are
* post-initialized (this enables late/conditional service bindings).
* <p/>
*/
public class Server {
private Logger log;
/**
* Server property name that defines the service classes.
*/
public static final String CONF_SERVICES = "services";
/**
* Server property name that defines the service extension classes.
*/
public static final String CONF_SERVICES_EXT = "services.ext";
/**
* Server property name that defines server startup status.
*/
public static final String CONF_STARTUP_STATUS = "startup.status";
/**
* Enumeration that defines the server status.
*/
public enum Status {
UNDEF(false, false),
BOOTING(false, true),
HALTED(true, true),
ADMIN(true, true),
NORMAL(true, true),
SHUTTING_DOWN(false, true),
SHUTDOWN(false, false);
private boolean settable;
private boolean operational;
/**
* Status constructor.
*
* @param settable indicates if the status is settable.
* @param operational indicates if the server is operational
* when in this status.
*/
private Status(boolean settable, boolean operational) {
this.settable = settable;
this.operational = operational;
}
/**
* Returns if this server status is operational.
*
* @return if this server status is operational.
*/
public boolean isOperational() {
return operational;
}
}
/**
* Name of the log4j configuration file the Server will load from the
* classpath if the <code>#SERVER#-log4j.properties</code> is not defined
* in the server configuration directory.
*/
public static final String DEFAULT_LOG4J_PROPERTIES = "default-log4j.properties";
private Status status;
private String name;
private String homeDir;
private String configDir;
private String logDir;
private String tempDir;
private Configuration config;
private Map<Class, Service> services = new LinkedHashMap<Class, Service>();
/**
* Creates a server instance.
* <p/>
* The config, log and temp directories are all under the specified home directory.
*
* @param name server name.
* @param homeDir server home directory.
*/
public Server(String name, String homeDir) {
this(name, homeDir, null);
}
/**
* Creates a server instance.
*
* @param name server name.
* @param homeDir server home directory.
* @param configDir config directory.
* @param logDir log directory.
* @param tempDir temp directory.
*/
public Server(String name, String homeDir, String configDir, String logDir, String tempDir) {
this(name, homeDir, configDir, logDir, tempDir, null);
}
/**
* Creates a server instance.
* <p/>
* The config, log and temp directories are all under the specified home directory.
* <p/>
* It uses the provided configuration instead loading it from the config dir.
*
* @param name server name.
* @param homeDir server home directory.
* @param config server configuration.
*/
public Server(String name, String homeDir, Configuration config) {
this(name, homeDir, homeDir + "/conf", homeDir + "/log", homeDir + "/temp", config);
}
/**
* Creates a server instance.
* <p/>
* It uses the provided configuration instead loading it from the config dir.
*
* @param name server name.
* @param homeDir server home directory.
* @param configDir config directory.
* @param logDir log directory.
* @param tempDir temp directory.
* @param config server configuration.
*/
public Server(String name, String homeDir, String configDir, String logDir, String tempDir, Configuration config) {
this.name = Check.notEmpty(name, "name").trim().toLowerCase();
this.homeDir = Check.notEmpty(homeDir, "homeDir");
this.configDir = Check.notEmpty(configDir, "configDir");
this.logDir = Check.notEmpty(logDir, "logDir");
this.tempDir = Check.notEmpty(tempDir, "tempDir");
checkAbsolutePath(homeDir, "homeDir");
checkAbsolutePath(configDir, "configDir");
checkAbsolutePath(logDir, "logDir");
checkAbsolutePath(tempDir, "tempDir");
if (config != null) {
this.config = new Configuration(false);
ConfigurationUtils.copy(config, this.config);
}
status = Status.UNDEF;
}
/**
* Validates that the specified value is an absolute path (starts with '/').
*
* @param value value to verify it is an absolute path.
* @param name name to use in the exception if the value is not an absolute
* path.
*
* @return the value.
*
* @throws IllegalArgumentException thrown if the value is not an absolute
* path.
*/
private String checkAbsolutePath(String value, String name) {
if (!value.startsWith("/")) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] must be an absolute path [{1}]", name, value));
}
return value;
}
/**
* Returns the current server status.
*
* @return the current server status.
*/
public Status getStatus() {
return status;
}
/**
* Sets a new server status.
* <p/>
* The status must be settable.
* <p/>
* All services will be notified o the status change via the
* {@link Service#serverStatusChange(Status, Status)} method. If a service
* throws an exception during the notification, the server will be destroyed.
*
* @param status status to set.
*
* @throws ServerException thrown if the service has been destroy because of
* a failed notification to a service.
*/
public void setStatus(Status status) throws ServerException {
Check.notNull(status, "status");
if (status.settable) {
if (status != this.status) {
Status oldStatus = this.status;
this.status = status;
for (Service service : services.values()) {
try {
service.serverStatusChange(oldStatus, status);
} catch (Exception ex) {
log.error("Service [{}] exception during status change to [{}] -server shutting down-, {}",
new Object[]{service.getInterface().getSimpleName(), status, ex.getMessage(), ex});
destroy();
throw new ServerException(ServerException.ERROR.S11, service.getInterface().getSimpleName(),
status, ex.getMessage(), ex);
}
}
}
} else {
throw new IllegalArgumentException("Status [" + status + " is not settable");
}
}
/**
* Verifies the server is operational.
*
* @throws IllegalStateException thrown if the server is not operational.
*/
protected void ensureOperational() {
if (!getStatus().isOperational()) {
throw new IllegalStateException("Server is not running");
}
}
/**
* Convenience method that returns a resource as inputstream from the
* classpath.
* <p/>
* It first attempts to use the Thread's context classloader and if not
* set it uses the <code>ClassUtils</code> classloader.
*
* @param name resource to retrieve.
*
* @return inputstream with the resource, NULL if the resource does not
* exist.
*/
static InputStream getResource(String name) {
Check.notEmpty(name, "name");
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
cl = Server.class.getClassLoader();
}
return cl.getResourceAsStream(name);
}
/**
* Initializes the Server.
* <p/>
* The initialization steps are:
* <ul>
* <li>It verifies the service home and temp directories exist</li>
* <li>Loads the Server <code>#SERVER#-default.xml</code>
* configuration file from the classpath</li>
* <li>Initializes log4j logging. If the
* <code>#SERVER#-log4j.properties</code> file does not exist in the config
* directory it load <code>default-log4j.properties</code> from the classpath
* </li>
* <li>Loads the <code>#SERVER#-site.xml</code> file from the server config
* directory and merges it with the default configuration.</li>
* <li>Loads the services</li>
* <li>Initializes the services</li>
* <li>Post-initializes the services</li>
* <li>Sets the server startup status</li>
*
* @throws ServerException thrown if the server could not be initialized.
*/
public void init() throws ServerException {
if (status != Status.UNDEF) {
throw new IllegalStateException("Server already initialized");
}
status = Status.BOOTING;
verifyDir(homeDir);
verifyDir(tempDir);
Properties serverInfo = new Properties();
try {
InputStream is = getResource(name + ".properties");
serverInfo.load(is);
is.close();
} catch (IOException ex) {
throw new RuntimeException("Could not load server information file: " + name + ".properties");
}
initLog();
log.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++");
log.info("Server [{}] starting", name);
log.info(" Built information:");
log.info(" Version : {}", serverInfo.getProperty(name + ".version", "undef"));
log.info(" Source Repository : {}", serverInfo.getProperty(name + ".source.repository", "undef"));
log.info(" Source Revision : {}", serverInfo.getProperty(name + ".source.revision", "undef"));
log.info(" Built by : {}", serverInfo.getProperty(name + ".build.username", "undef"));
log.info(" Built timestamp : {}", serverInfo.getProperty(name + ".build.timestamp", "undef"));
log.info(" Runtime information:");
log.info(" Home dir: {}", homeDir);
log.info(" Config dir: {}", (config == null) ? configDir : "-");
log.info(" Log dir: {}", logDir);
log.info(" Temp dir: {}", tempDir);
initConfig();
log.debug("Loading services");
List<Service> list = loadServices();
try {
log.debug("Initializing services");
initServices(list);
log.info("Services initialized");
} catch (ServerException ex) {
log.error("Services initialization failure, destroying initialized services");
destroyServices();
throw ex;
}
Status status = Status.valueOf(getConfig().get(getPrefixedName(CONF_STARTUP_STATUS), Status.NORMAL.toString()));
setStatus(status);
log.info("Server [{}] started!, status [{}]", name, status);
}
/**
* Verifies the specified directory exists.
*
* @param dir directory to verify it exists.
*
* @throws ServerException thrown if the directory does not exist or it the
* path it is not a directory.
*/
private void verifyDir(String dir) throws ServerException {
File file = new File(dir);
if (!file.exists()) {
throw new ServerException(ServerException.ERROR.S01, dir);
}
if (!file.isDirectory()) {
throw new ServerException(ServerException.ERROR.S02, dir);
}
}
/**
* Initializes Log4j logging.
*
* @throws ServerException thrown if Log4j could not be initialized.
*/
protected void initLog() throws ServerException {
verifyDir(logDir);
LogManager.resetConfiguration();
File log4jFile = new File(configDir, name + "-log4j.properties");
if (log4jFile.exists()) {
PropertyConfigurator.configureAndWatch(log4jFile.toString(), 10 * 1000); //every 10 secs
log = LoggerFactory.getLogger(Server.class);
} else {
Properties props = new Properties();
try {
InputStream is = getResource(DEFAULT_LOG4J_PROPERTIES);
props.load(is);
} catch (IOException ex) {
throw new ServerException(ServerException.ERROR.S03, DEFAULT_LOG4J_PROPERTIES, ex.getMessage(), ex);
}
PropertyConfigurator.configure(props);
log = LoggerFactory.getLogger(Server.class);
log.warn("Log4j [{}] configuration file not found, using default configuration from classpath", log4jFile);
}
}
/**
* Loads and inializes the server configuration.
*
* @throws ServerException thrown if the configuration could not be loaded/initialized.
*/
protected void initConfig() throws ServerException {
verifyDir(configDir);
File file = new File(configDir);
Configuration defaultConf;
String defaultConfig = name + "-default.xml";
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
InputStream inputStream = classLoader.getResourceAsStream(defaultConfig);
if (inputStream == null) {
log.warn("Default configuration file not available in classpath [{}]", defaultConfig);
defaultConf = new Configuration(false);
} else {
try {
defaultConf = new Configuration(false);
ConfigurationUtils.load(defaultConf, inputStream);
} catch (Exception ex) {
throw new ServerException(ServerException.ERROR.S03, defaultConfig, ex.getMessage(), ex);
}
}
if (config == null) {
Configuration siteConf;
File siteFile = new File(file, name + "-site.xml");
if (!siteFile.exists()) {
log.warn("Site configuration file [{}] not found in config directory", siteFile);
siteConf = new Configuration(false);
} else {
if (!siteFile.isFile()) {
throw new ServerException(ServerException.ERROR.S05, siteFile.getAbsolutePath());
}
try {
log.debug("Loading site configuration from [{}]", siteFile);
inputStream = new FileInputStream(siteFile);
siteConf = new Configuration(false);
ConfigurationUtils.load(siteConf, inputStream);
} catch (IOException ex) {
throw new ServerException(ServerException.ERROR.S06, siteFile, ex.getMessage(), ex);
}
}
config = new Configuration(false);
ConfigurationUtils.copy(siteConf, config);
}
ConfigurationUtils.injectDefaults(defaultConf, config);
for (String name : System.getProperties().stringPropertyNames()) {
String value = System.getProperty(name);
if (name.startsWith(getPrefix() + ".")) {
config.set(name, value);
if (name.endsWith(".password") || name.endsWith(".secret")) {
value = "*MASKED*";
}
log.info("System property sets {}: {}", name, value);
}
}
log.debug("Loaded Configuration:");
log.debug("------------------------------------------------------");
for (Map.Entry<String, String> entry : config) {
String name = entry.getKey();
String value = config.get(entry.getKey());
if (name.endsWith(".password") || name.endsWith(".secret")) {
value = "*MASKED*";
}
log.debug(" {}: {}", entry.getKey(), value);
}
log.debug("------------------------------------------------------");
}
/**
* Loads the specified services.
*
* @param classes services classes to load.
* @param list list of loaded service in order of appearance in the
* configuration.
*
* @throws ServerException thrown if a service class could not be loaded.
*/
private void loadServices(Class[] classes, List<Service> list) throws ServerException {
for (Class klass : classes) {
try {
Service service = (Service) klass.newInstance();
log.debug("Loading service [{}] implementation [{}]", service.getInterface(),
service.getClass());
if (!service.getInterface().isInstance(service)) {
throw new ServerException(ServerException.ERROR.S04, klass, service.getInterface().getName());
}
list.add(service);
} catch (ServerException ex) {
throw ex;
} catch (Exception ex) {
throw new ServerException(ServerException.ERROR.S07, klass, ex.getMessage(), ex);
}
}
}
/**
* Loads services defined in <code>services</code> and
* <code>services.ext</code> and de-dups them.
*
* @return List of final services to initialize.
*
* @throws ServerException throw if the services could not be loaded.
*/
protected List<Service> loadServices() throws ServerException {
try {
Map<Class, Service> map = new LinkedHashMap<Class, Service>();
Class[] classes = getConfig().getClasses(getPrefixedName(CONF_SERVICES));
Class[] classesExt = getConfig().getClasses(getPrefixedName(CONF_SERVICES_EXT));
List<Service> list = new ArrayList<Service>();
loadServices(classes, list);
loadServices(classesExt, list);
//removing duplicate services, strategy: last one wins
for (Service service : list) {
if (map.containsKey(service.getInterface())) {
log.debug("Replacing service [{}] implementation [{}]", service.getInterface(),
service.getClass());
}
map.put(service.getInterface(), service);
}
list = new ArrayList<Service>();
for (Map.Entry<Class, Service> entry : map.entrySet()) {
list.add(entry.getValue());
}
return list;
} catch (RuntimeException ex) {
throw new ServerException(ServerException.ERROR.S08, ex.getMessage(), ex);
}
}
/**
* Initializes the list of services.
*
* @param services services to initialized, it must be a de-dupped list of
* services.
*
* @throws ServerException thrown if the services could not be initialized.
*/
protected void initServices(List<Service> services) throws ServerException {
for (Service service : services) {
log.debug("Initializing service [{}]", service.getInterface());
checkServiceDependencies(service);
service.init(this);
this.services.put(service.getInterface(), service);
}
for (Service service : services) {
service.postInit();
}
}
/**
* Checks if all service dependencies of a service are available.
*
* @param service service to check if all its dependencies are available.
*
* @throws ServerException thrown if a service dependency is missing.
*/
protected void checkServiceDependencies(Service service) throws ServerException {
if (service.getServiceDependencies() != null) {
for (Class dependency : service.getServiceDependencies()) {
if (services.get(dependency) == null) {
throw new ServerException(ServerException.ERROR.S10, service.getClass(), dependency);
}
}
}
}
/**
* Destroys the server services.
*/
protected void destroyServices() {
List<Service> list = new ArrayList<Service>(services.values());
Collections.reverse(list);
for (Service service : list) {
try {
log.debug("Destroying service [{}]", service.getInterface());
service.destroy();
} catch (Throwable ex) {
log.error("Could not destroy service [{}], {}",
new Object[]{service.getInterface(), ex.getMessage(), ex});
}
}
log.info("Services destroyed");
}
/**
* Destroys the server.
* <p/>
* All services are destroyed in reverse order of initialization, then the
* Log4j framework is shutdown.
*/
public void destroy() {
ensureOperational();
destroyServices();
log.info("Server [{}] shutdown!", name);
log.info("======================================================");
if (!Boolean.getBoolean("test.circus")) {
LogManager.shutdown();
}
status = Status.SHUTDOWN;
}
/**
* Returns the name of the server.
*
* @return the server name.
*/
public String getName() {
return name;
}
/**
* Returns the server prefix for server configuration properties.
* <p/>
* By default it is the server name.
*
* @return the prefix for server configuration properties.
*/
public String getPrefix() {
return getName();
}
/**
* Returns the prefixed name of a server property.
*
* @param name of the property.
*
* @return prefixed name of the property.
*/
public String getPrefixedName(String name) {
return getPrefix() + "." + Check.notEmpty(name, "name");
}
/**
* Returns the server home dir.
*
* @return the server home dir.
*/
public String getHomeDir() {
return homeDir;
}
/**
* Returns the server config dir.
*
* @return the server config dir.
*/
public String getConfigDir() {
return configDir;
}
/**
* Returns the server log dir.
*
* @return the server log dir.
*/
public String getLogDir() {
return logDir;
}
/**
* Returns the server temp dir.
*
* @return the server temp dir.
*/
public String getTempDir() {
return tempDir;
}
/**
* Returns the server configuration.
*
* @return
*/
public Configuration getConfig() {
return config;
}
/**
* Returns the {@link Service} associated to the specified interface.
*
* @param serviceKlass service interface.
*
* @return the service implementation.
*/
@SuppressWarnings("unchecked")
public <T> T get(Class<T> serviceKlass) {
ensureOperational();
Check.notNull(serviceKlass, "serviceKlass");
return (T) services.get(serviceKlass);
}
/**
* Adds a service programmatically.
* <p/>
* If a service with the same interface exists, it will be destroyed and
* removed before the given one is initialized and added.
* <p/>
* If an exception is thrown the server is destroyed.
*
* @param klass service class to add.
*
* @throws ServerException throw if the service could not initialized/added
* to the server.
*/
public void setService(Class<? extends Service> klass) throws ServerException {
ensureOperational();
Check.notNull(klass, "serviceKlass");
if (getStatus() == Status.SHUTTING_DOWN) {
throw new IllegalStateException("Server shutting down");
}
try {
Service newService = klass.newInstance();
Service oldService = services.get(newService.getInterface());
if (oldService != null) {
try {
oldService.destroy();
} catch (Throwable ex) {
log.error("Could not destroy service [{}], {}",
new Object[]{oldService.getInterface(), ex.getMessage(), ex});
}
}
newService.init(this);
services.put(newService.getInterface(), newService);
} catch (Exception ex) {
log.error("Could not set service [{}] programmatically -server shutting down-, {}", klass, ex);
destroy();
throw new ServerException(ServerException.ERROR.S09, klass, ex.getMessage(), ex);
}
}
}

View File

@ -0,0 +1,90 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.lib.lang.XException;
/**
* Exception thrown by the {@link Server} class.
*/
public class ServerException extends XException {
/**
* Error codes use by the {@link Server} class.
*/
public static enum ERROR implements XException.ERROR {
S01("Dir [{0}] does not exist"),
S02("[{0}] is not a directory"),
S03("Could not load file from classpath [{0}], {1}"),
S04("Service [{0}] does not implement declared interface [{1}]"),
S05("[{0}] is not a file"),
S06("Could not load file [{0}], {1}"),
S07("Could not instanciate service class [{0}], {1}"),
S08("Could not load service classes, {0}"),
S09("Could not set service [{0}] programmatically -server shutting down-, {1}"),
S10("Service [{0}] requires service [{1}]"),
S11("Service [{0}] exception during status change to [{1}] -server shutting down-, {2}");
private String msg;
/**
* Constructor for the error code enum.
*
* @param msg message template.
*/
private ERROR(String msg) {
this.msg = msg;
}
/**
* Returns the message template for the error code.
*
* @return the message template for the error code.
*/
@Override
public String getTemplate() {
return msg;
}
}
/**
* Constructor for sub-classes.
*
* @param error error code for the XException.
* @param params parameters to use when creating the error message
* with the error code template.
*/
protected ServerException(XException.ERROR error, Object... params) {
super(error, params);
}
/**
* Creates an server exception using the specified error code.
* The exception message is resolved using the error code template
* and the passed parameters.
*
* @param error error code for the XException.
* @param params parameters to use when creating the error message
* with the error code template.
*/
public ServerException(ERROR error, Object... params) {
super(error, params);
}
}

View File

@ -0,0 +1,79 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
/**
* Service interface for components to be managed by the {@link Server} class.
*/
public interface Service {
/**
* Initializes the service. This method is called once, when the
* {@link Server} owning the service is being initialized.
*
* @param server the server initializing the service, give access to the
* server context.
*
* @throws ServiceException thrown if the service could not be initialized.
*/
public void init(Server server) throws ServiceException;
/**
* Post initializes the service. This method is called by the
* {@link Server} after all services of the server have been initialized.
*
* @throws ServiceException thrown if the service could not be
* post-initialized.
*/
public void postInit() throws ServiceException;
/**
* Destroy the services. This method is called once, when the
* {@link Server} owning the service is being destroyed.
*/
public void destroy();
/**
* Returns the service dependencies of this service. The service will be
* instantiated only if all the service dependencies are already initialized.
*
* @return the service dependencies.
*/
public Class[] getServiceDependencies();
/**
* Returns the interface implemented by this service. This interface is used
* the {@link Server} when the {@link Server#get(Class)} method is used to
* retrieve a service.
*
* @return the interface that identifies the service.
*/
public Class getInterface();
/**
* Notification callback when the server changes its status.
*
* @param oldStatus old server status.
* @param newStatus new server status.
*
* @throws ServiceException thrown if the service could not process the status change.
*/
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException;
}

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.lib.lang.XException;
/**
* Exception thrown by {@link Service} implementations.
*/
public class ServiceException extends ServerException {
/**
* Creates an service exception using the specified error code.
* The exception message is resolved using the error code template
* and the passed parameters.
*
* @param error error code for the XException.
* @param params parameters to use when creating the error message
* with the error code template.
*/
public ServiceException(XException.ERROR error, Object... params) {
super(error, params);
}
}

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import java.io.IOException;
public interface FileSystemAccess {
public interface FileSystemExecutor<T> {
public T execute(FileSystem fs) throws IOException;
}
public <T> T execute(String user, Configuration conf, FileSystemExecutor<T> executor) throws
FileSystemAccessException;
public FileSystem createFileSystem(String user, Configuration conf) throws IOException, FileSystemAccessException;
public void releaseFileSystem(FileSystem fs) throws IOException;
public Configuration getDefaultConfiguration();
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.lib.lang.XException;
public class FileSystemAccessException extends XException {
public enum ERROR implements XException.ERROR {
H01("Service property [{0}] not defined"),
H02("Kerberos initialization failed, {0}"),
H03("FileSystemExecutor error, {0}"),
H04("JobClientExecutor error, {0}"),
H05("[{0}] validation failed, {1}"),
H06("Property [{0}] not defined in configuration object"),
H07("[{0}] not healthy, {1}"),
H08(""),
H09("Invalid FileSystemAccess security mode [{0}]");
private String template;
ERROR(String template) {
this.template = template;
}
@Override
public String getTemplate() {
return template;
}
}
public FileSystemAccessException(ERROR error, Object... params) {
super(error, params);
}
}

View File

@ -0,0 +1,28 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import java.io.IOException;
import java.util.List;
public interface Groups {
public List<String> getGroups(String user) throws IOException;
}

View File

@ -0,0 +1,50 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import java.util.Map;
public interface Instrumentation {
public interface Cron {
public Cron start();
public Cron stop();
}
public interface Variable<T> {
T getValue();
}
public Cron createCron();
public void incr(String group, String name, long count);
public void addCron(String group, String name, Cron cron);
public void addVariable(String group, String name, Variable<?> variable);
//sampling happens once a second
public void addSampler(String group, String name, int samplingSize, Variable<Long> variable);
public Map<String, Map<String, ?>> getSnapshot();
}

View File

@ -0,0 +1,28 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import java.io.IOException;
import java.security.AccessControlException;
public interface ProxyUser {
public void validate(String proxyUser, String proxyHost, String doAsUser) throws IOException, AccessControlException;
}

View File

@ -0,0 +1,30 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
public interface Scheduler {
public abstract void schedule(Callable<?> callable, long delay, long interval, TimeUnit unit);
public abstract void schedule(Runnable runnable, long delay, long interval, TimeUnit unit);
}

View File

@ -0,0 +1,278 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.util.Check;
import org.apache.hadoop.lib.util.ConfigurationUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
public class FileSystemAccessService extends BaseService implements FileSystemAccess {
private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class);
public static final String PREFIX = "hadoop";
private static final String INSTRUMENTATION_GROUP = "hadoop";
public static final String AUTHENTICATION_TYPE = "authentication.type";
public static final String KERBEROS_KEYTAB = "authentication.kerberos.keytab";
public static final String KERBEROS_PRINCIPAL = "authentication.kerberos.principal";
public static final String NAME_NODE_WHITELIST = "name.node.whitelist";
private static final String HADOOP_CONF_PREFIX = "conf:";
private static final String NAME_NODE_PROPERTY = "fs.default.name";
public FileSystemAccessService() {
super(PREFIX);
}
private Collection<String> nameNodeWhitelist;
Configuration serviceHadoopConf;
private AtomicInteger unmanagedFileSystems = new AtomicInteger();
@Override
protected void init() throws ServiceException {
LOG.info("Using FileSystemAccess JARs version [{}]", VersionInfo.getVersion());
String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim();
if (security.equals("kerberos")) {
String defaultName = getServer().getName();
String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab";
keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
if (keytab.length() == 0) {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_KEYTAB);
}
String principal = defaultName + "/localhost@LOCALHOST";
principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
if (principal.length() == 0) {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
}
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
try {
UserGroupInformation.loginUserFromKeytab(principal, keytab);
} catch (IOException ex) {
throw new ServiceException(FileSystemAccessException.ERROR.H02, ex.getMessage(), ex);
}
LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
} else if (security.equals("simple")) {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "simple");
UserGroupInformation.setConfiguration(conf);
LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
} else {
throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
}
serviceHadoopConf = new Configuration(false);
for (Map.Entry entry : getServiceConfig()) {
String name = (String) entry.getKey();
if (name.startsWith(HADOOP_CONF_PREFIX)) {
name = name.substring(HADOOP_CONF_PREFIX.length());
String value = (String) entry.getValue();
serviceHadoopConf.set(name, value);
}
}
setRequiredServiceHadoopConf(serviceHadoopConf);
LOG.debug("FileSystemAccess default configuration:");
for (Map.Entry entry : serviceHadoopConf) {
LOG.debug(" {} = {}", entry.getKey(), entry.getValue());
}
nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
}
@Override
public void postInit() throws ServiceException {
super.postInit();
Instrumentation instrumentation = getServer().get(Instrumentation.class);
instrumentation.addVariable(INSTRUMENTATION_GROUP, "unmanaged.fs", new Instrumentation.Variable<Integer>() {
@Override
public Integer getValue() {
return unmanagedFileSystems.get();
}
});
instrumentation.addSampler(INSTRUMENTATION_GROUP, "unmanaged.fs", 60, new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return (long) unmanagedFileSystems.get();
}
});
}
private Set<String> toLowerCase(Collection<String> collection) {
Set<String> set = new HashSet<String>();
for (String value : collection) {
set.add(value.toLowerCase());
}
return set;
}
@Override
public Class getInterface() {
return FileSystemAccess.class;
}
@Override
public Class[] getServiceDependencies() {
return new Class[]{Instrumentation.class};
}
protected UserGroupInformation getUGI(String user) throws IOException {
return UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
}
protected void setRequiredServiceHadoopConf(Configuration conf) {
conf.set("fs.hdfs.impl.disable.cache", "true");
}
protected Configuration createHadoopConf(Configuration conf) {
Configuration hadoopConf = new Configuration();
ConfigurationUtils.copy(serviceHadoopConf, hadoopConf);
ConfigurationUtils.copy(conf, hadoopConf);
return hadoopConf;
}
protected Configuration createNameNodeConf(Configuration conf) {
return createHadoopConf(conf);
}
protected FileSystem createFileSystem(Configuration namenodeConf) throws IOException {
return FileSystem.get(namenodeConf);
}
protected void closeFileSystem(FileSystem fs) throws IOException {
fs.close();
}
protected void validateNamenode(String namenode) throws FileSystemAccessException {
if (nameNodeWhitelist.size() > 0 && !nameNodeWhitelist.contains("*")) {
if (!nameNodeWhitelist.contains(namenode.toLowerCase())) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H05, namenode, "not in whitelist");
}
}
}
protected void checkNameNodeHealth(FileSystem fileSystem) throws FileSystemAccessException {
}
@Override
public <T> T execute(String user, final Configuration conf, final FileSystemExecutor<T> executor)
throws FileSystemAccessException {
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
Check.notNull(executor, "executor");
if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, NAME_NODE_PROPERTY);
}
try {
validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction<T>() {
public T run() throws Exception {
Configuration namenodeConf = createNameNodeConf(conf);
FileSystem fs = createFileSystem(namenodeConf);
Instrumentation instrumentation = getServer().get(Instrumentation.class);
Instrumentation.Cron cron = instrumentation.createCron();
try {
checkNameNodeHealth(fs);
cron.start();
return executor.execute(fs);
} finally {
cron.stop();
instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron);
closeFileSystem(fs);
}
}
});
} catch (FileSystemAccessException ex) {
throw ex;
} catch (Exception ex) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H03, ex);
}
}
public FileSystem createFileSystemInternal(String user, final Configuration conf)
throws IOException, FileSystemAccessException {
Check.notEmpty(user, "user");
Check.notNull(conf, "conf");
try {
validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
UserGroupInformation ugi = getUGI(user);
return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws Exception {
Configuration namenodeConf = createNameNodeConf(conf);
return createFileSystem(namenodeConf);
}
});
} catch (IOException ex) {
throw ex;
} catch (FileSystemAccessException ex) {
throw ex;
} catch (Exception ex) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H08, ex.getMessage(), ex);
}
}
@Override
public FileSystem createFileSystem(String user, final Configuration conf) throws IOException,
FileSystemAccessException {
unmanagedFileSystems.incrementAndGet();
return createFileSystemInternal(user, conf);
}
@Override
public void releaseFileSystem(FileSystem fs) throws IOException {
unmanagedFileSystems.decrementAndGet();
closeFileSystem(fs);
}
@Override
public Configuration getDefaultConfiguration() {
Configuration conf = new Configuration(false);
ConfigurationUtils.copy(serviceHadoopConf, conf);
return conf;
}
}

View File

@ -0,0 +1,403 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.instrumentation;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.Scheduler;
import org.json.simple.JSONAware;
import org.json.simple.JSONObject;
import org.json.simple.JSONStreamAware;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
public class InstrumentationService extends BaseService implements Instrumentation {
public static final String PREFIX = "instrumentation";
public static final String CONF_TIMERS_SIZE = "timers.size";
private int timersSize;
private Lock counterLock;
private Lock timerLock;
private Lock variableLock;
private Lock samplerLock;
private Map<String, Map<String, AtomicLong>> counters;
private Map<String, Map<String, Timer>> timers;
private Map<String, Map<String, VariableHolder>> variables;
private Map<String, Map<String, Sampler>> samplers;
private List<Sampler> samplersList;
private Map<String, Map<String, ?>> all;
public InstrumentationService() {
super(PREFIX);
}
@Override
@SuppressWarnings("unchecked")
public void init() throws ServiceException {
timersSize = getServiceConfig().getInt(CONF_TIMERS_SIZE, 10);
counterLock = new ReentrantLock();
timerLock = new ReentrantLock();
variableLock = new ReentrantLock();
samplerLock = new ReentrantLock();
Map<String, VariableHolder> jvmVariables = new ConcurrentHashMap<String, VariableHolder>();
counters = new ConcurrentHashMap<String, Map<String, AtomicLong>>();
timers = new ConcurrentHashMap<String, Map<String, Timer>>();
variables = new ConcurrentHashMap<String, Map<String, VariableHolder>>();
samplers = new ConcurrentHashMap<String, Map<String, Sampler>>();
samplersList = new ArrayList<Sampler>();
all = new LinkedHashMap<String, Map<String, ?>>();
all.put("os-env", System.getenv());
all.put("sys-props", (Map<String, ?>) (Map) System.getProperties());
all.put("jvm", jvmVariables);
all.put("counters", (Map) counters);
all.put("timers", (Map) timers);
all.put("variables", (Map) variables);
all.put("samplers", (Map) samplers);
jvmVariables.put("free.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
public Long getValue() {
return Runtime.getRuntime().freeMemory();
}
}));
jvmVariables.put("max.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
public Long getValue() {
return Runtime.getRuntime().maxMemory();
}
}));
jvmVariables.put("total.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
public Long getValue() {
return Runtime.getRuntime().totalMemory();
}
}));
}
@Override
public void postInit() throws ServiceException {
Scheduler scheduler = getServer().get(Scheduler.class);
if (scheduler != null) {
scheduler.schedule(new SamplersRunnable(), 0, 1, TimeUnit.SECONDS);
}
}
@Override
public Class getInterface() {
return Instrumentation.class;
}
@SuppressWarnings("unchecked")
private <T> T getToAdd(String group, String name, Class<T> klass, Lock lock, Map<String, Map<String, T>> map) {
boolean locked = false;
try {
Map<String, T> groupMap = map.get(group);
if (groupMap == null) {
lock.lock();
locked = true;
groupMap = map.get(group);
if (groupMap == null) {
groupMap = new ConcurrentHashMap<String, T>();
map.put(group, groupMap);
}
}
T element = groupMap.get(name);
if (element == null) {
if (!locked) {
lock.lock();
locked = true;
}
element = groupMap.get(name);
if (element == null) {
try {
if (klass == Timer.class) {
element = (T) new Timer(timersSize);
} else {
element = klass.newInstance();
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
groupMap.put(name, element);
}
}
return element;
} finally {
if (locked) {
lock.unlock();
}
}
}
static class Cron implements Instrumentation.Cron {
long start;
long lapStart;
long own;
long total;
public Cron start() {
if (total != 0) {
throw new IllegalStateException("Cron already used");
}
if (start == 0) {
start = System.currentTimeMillis();
lapStart = start;
} else if (lapStart == 0) {
lapStart = System.currentTimeMillis();
}
return this;
}
public Cron stop() {
if (total != 0) {
throw new IllegalStateException("Cron already used");
}
if (lapStart > 0) {
own += System.currentTimeMillis() - lapStart;
lapStart = 0;
}
return this;
}
void end() {
stop();
total = System.currentTimeMillis() - start;
}
}
static class Timer implements JSONAware, JSONStreamAware {
static final int LAST_TOTAL = 0;
static final int LAST_OWN = 1;
static final int AVG_TOTAL = 2;
static final int AVG_OWN = 3;
Lock lock = new ReentrantLock();
private long[] own;
private long[] total;
private int last;
private boolean full;
private int size;
public Timer(int size) {
this.size = size;
own = new long[size];
total = new long[size];
for (int i = 0; i < size; i++) {
own[i] = -1;
total[i] = -1;
}
last = -1;
}
long[] getValues() {
lock.lock();
try {
long[] values = new long[4];
values[LAST_TOTAL] = total[last];
values[LAST_OWN] = own[last];
int limit = (full) ? size : (last + 1);
for (int i = 0; i < limit; i++) {
values[AVG_TOTAL] += total[i];
values[AVG_OWN] += own[i];
}
values[AVG_TOTAL] = values[AVG_TOTAL] / limit;
values[AVG_OWN] = values[AVG_OWN] / limit;
return values;
} finally {
lock.unlock();
}
}
void addCron(Cron cron) {
cron.end();
lock.lock();
try {
last = (last + 1) % size;
full = full || last == (size - 1);
total[last] = cron.total;
own[last] = cron.own;
} finally {
lock.unlock();
}
}
@SuppressWarnings("unchecked")
private JSONObject getJSON() {
long[] values = getValues();
JSONObject json = new JSONObject();
json.put("lastTotal", values[0]);
json.put("lastOwn", values[1]);
json.put("avgTotal", values[2]);
json.put("avgOwn", values[3]);
return json;
}
@Override
public String toJSONString() {
return getJSON().toJSONString();
}
@Override
public void writeJSONString(Writer out) throws IOException {
getJSON().writeJSONString(out);
}
}
@Override
public Cron createCron() {
return new Cron();
}
@Override
public void incr(String group, String name, long count) {
AtomicLong counter = getToAdd(group, name, AtomicLong.class, counterLock, counters);
counter.addAndGet(count);
}
@Override
public void addCron(String group, String name, Instrumentation.Cron cron) {
Timer timer = getToAdd(group, name, Timer.class, timerLock, timers);
timer.addCron((Cron) cron);
}
static class VariableHolder<E> implements JSONAware, JSONStreamAware {
Variable<E> var;
public VariableHolder() {
}
public VariableHolder(Variable<E> var) {
this.var = var;
}
@SuppressWarnings("unchecked")
private JSONObject getJSON() {
JSONObject json = new JSONObject();
json.put("value", var.getValue());
return json;
}
@Override
public String toJSONString() {
return getJSON().toJSONString();
}
@Override
public void writeJSONString(Writer out) throws IOException {
out.write(toJSONString());
}
}
@Override
public void addVariable(String group, String name, Variable<?> variable) {
VariableHolder holder = getToAdd(group, name, VariableHolder.class, variableLock, variables);
holder.var = variable;
}
static class Sampler implements JSONAware, JSONStreamAware {
Variable<Long> variable;
long[] values;
private AtomicLong sum;
private int last;
private boolean full;
void init(int size, Variable<Long> variable) {
this.variable = variable;
values = new long[size];
sum = new AtomicLong();
last = 0;
}
void sample() {
int index = last;
long valueGoingOut = values[last];
full = full || last == (values.length - 1);
last = (last + 1) % values.length;
values[index] = variable.getValue();
sum.addAndGet(-valueGoingOut + values[index]);
}
double getRate() {
return ((double) sum.get()) / ((full) ? values.length : ((last == 0) ? 1 : last));
}
@SuppressWarnings("unchecked")
private JSONObject getJSON() {
JSONObject json = new JSONObject();
json.put("sampler", getRate());
json.put("size", (full) ? values.length : last);
return json;
}
@Override
public String toJSONString() {
return getJSON().toJSONString();
}
@Override
public void writeJSONString(Writer out) throws IOException {
out.write(toJSONString());
}
}
@Override
public void addSampler(String group, String name, int samplingSize, Variable<Long> variable) {
Sampler sampler = getToAdd(group, name, Sampler.class, samplerLock, samplers);
samplerLock.lock();
try {
sampler.init(samplingSize, variable);
samplersList.add(sampler);
} finally {
samplerLock.unlock();
}
}
class SamplersRunnable implements Runnable {
@Override
public void run() {
samplerLock.lock();
try {
for (Sampler sampler : samplersList) {
sampler.sample();
}
} finally {
samplerLock.unlock();
}
}
}
@Override
public Map<String, Map<String, ?>> getSnapshot() {
return all;
}
}

View File

@ -0,0 +1,129 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.scheduler;
import org.apache.hadoop.lib.lang.RunnableCallable;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.Scheduler;
import org.apache.hadoop.lib.util.Check;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.text.MessageFormat;
import java.util.concurrent.Callable;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
public class SchedulerService extends BaseService implements Scheduler {
private static final Logger LOG = LoggerFactory.getLogger(SchedulerService.class);
private static final String INST_GROUP = "scheduler";
public static final String PREFIX = "scheduler";
public static final String CONF_THREADS = "threads";
private ScheduledExecutorService scheduler;
public SchedulerService() {
super(PREFIX);
}
@Override
public void init() throws ServiceException {
int threads = getServiceConfig().getInt(CONF_THREADS, 5);
scheduler = new ScheduledThreadPoolExecutor(threads);
LOG.debug("Scheduler started");
}
@Override
public void destroy() {
try {
long limit = System.currentTimeMillis() + 30 * 1000;
scheduler.shutdownNow();
while (!scheduler.awaitTermination(1000, TimeUnit.MILLISECONDS)) {
LOG.debug("Waiting for scheduler to shutdown");
if (System.currentTimeMillis() > limit) {
LOG.warn("Gave up waiting for scheduler to shutdown");
break;
}
}
if (scheduler.isTerminated()) {
LOG.debug("Scheduler shutdown");
}
} catch (InterruptedException ex) {
LOG.warn(ex.getMessage(), ex);
}
}
@Override
public Class[] getServiceDependencies() {
return new Class[]{Instrumentation.class};
}
@Override
public Class getInterface() {
return Scheduler.class;
}
@Override
public void schedule(final Callable<?> callable, long delay, long interval, TimeUnit unit) {
Check.notNull(callable, "callable");
if (!scheduler.isShutdown()) {
LOG.debug("Scheduling callable [{}], interval [{}] seconds, delay [{}] in [{}]",
new Object[]{callable, delay, interval, unit});
Runnable r = new Runnable() {
public void run() {
String instrName = callable.getClass().getSimpleName();
Instrumentation instr = getServer().get(Instrumentation.class);
if (getServer().getStatus() == Server.Status.HALTED) {
LOG.debug("Skipping [{}], server status [{}]", callable, getServer().getStatus());
instr.incr(INST_GROUP, instrName + ".skips", 1);
} else {
LOG.debug("Executing [{}]", callable);
instr.incr(INST_GROUP, instrName + ".execs", 1);
Instrumentation.Cron cron = instr.createCron().start();
try {
callable.call();
} catch (Exception ex) {
instr.incr(INST_GROUP, instrName + ".fails", 1);
LOG.error("Error executing [{}], {}", new Object[]{callable, ex.getMessage(), ex});
} finally {
instr.addCron(INST_GROUP, instrName, cron.stop());
}
}
}
};
scheduler.scheduleWithFixedDelay(r, delay, interval, unit);
} else {
throw new IllegalStateException(
MessageFormat.format("Scheduler shutting down, ignoring scheduling of [{}]", callable));
}
}
@Override
public void schedule(Runnable runnable, long delay, long interval, TimeUnit unit) {
schedule((Callable<?>) new RunnableCallable(runnable), delay, interval, unit);
}
}

View File

@ -0,0 +1,56 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.util.ConfigurationUtils;
import java.io.IOException;
import java.util.List;
public class GroupsService extends BaseService implements Groups {
private static final String PREFIX = "groups";
private org.apache.hadoop.security.Groups hGroups;
public GroupsService() {
super(PREFIX);
}
@Override
protected void init() throws ServiceException {
Configuration hConf = new Configuration(false);
ConfigurationUtils.copy(getServiceConfig(), hConf);
hGroups = new org.apache.hadoop.security.Groups(hConf);
}
@Override
public Class getInterface() {
return Groups.class;
}
@Override
public List<String> getGroups(String user) throws IOException {
return hGroups.getGroups(user);
}
}

View File

@ -0,0 +1,176 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.lib.lang.XException;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.service.ProxyUser;
import org.apache.hadoop.lib.util.Check;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetAddress;
import java.security.AccessControlException;
import java.text.MessageFormat;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class ProxyUserService extends BaseService implements ProxyUser {
private static Logger LOG = LoggerFactory.getLogger(ProxyUserService.class);
public enum ERROR implements XException.ERROR {
PRXU01("Could not normalize host name [{0}], {1}"),
PRXU02("Missing [{0}] property");
private String template;
ERROR(String template) {
this.template = template;
}
@Override
public String getTemplate() {
return template;
}
}
private static final String PREFIX = "proxyuser";
private static final String GROUPS = ".groups";
private static final String HOSTS = ".hosts";
private Map<String, Set<String>> proxyUserHosts = new HashMap<String, Set<String>>();
private Map<String, Set<String>> proxyUserGroups = new HashMap<String, Set<String>>();
public ProxyUserService() {
super(PREFIX);
}
@Override
public Class getInterface() {
return ProxyUser.class;
}
@Override
public Class[] getServiceDependencies() {
return new Class[]{Groups.class};
}
@Override
protected void init() throws ServiceException {
for (Map.Entry<String, String> entry : getServiceConfig()) {
String key = entry.getKey();
if (key.endsWith(GROUPS)) {
String proxyUser = key.substring(0, key.lastIndexOf(GROUPS));
if (getServiceConfig().get(proxyUser + HOSTS) == null) {
throw new ServiceException(ERROR.PRXU02, getPrefixedName(proxyUser + HOSTS));
}
String value = entry.getValue().trim();
LOG.info("Loading proxyuser settings [{}]=[{}]", key, value);
Set<String> values = null;
if (!value.equals("*")) {
values = new HashSet<String>(Arrays.asList(value.split(",")));
}
proxyUserGroups.put(proxyUser, values);
}
if (key.endsWith(HOSTS)) {
String proxyUser = key.substring(0, key.lastIndexOf(HOSTS));
if (getServiceConfig().get(proxyUser + GROUPS) == null) {
throw new ServiceException(ERROR.PRXU02, getPrefixedName(proxyUser + GROUPS));
}
String value = entry.getValue().trim();
LOG.info("Loading proxyuser settings [{}]=[{}]", key, value);
Set<String> values = null;
if (!value.equals("*")) {
String[] hosts = value.split(",");
for (int i = 0; i < hosts.length; i++) {
String originalName = hosts[i];
try {
hosts[i] = normalizeHostname(originalName);
} catch (Exception ex) {
throw new ServiceException(ERROR.PRXU01, originalName, ex.getMessage(), ex);
}
LOG.info(" Hostname, original [{}], normalized [{}]", originalName, hosts[i]);
}
values = new HashSet<String>(Arrays.asList(hosts));
}
proxyUserHosts.put(proxyUser, values);
}
}
}
@Override
public void validate(String proxyUser, String proxyHost, String doAsUser) throws IOException,
AccessControlException {
Check.notEmpty(proxyUser, "proxyUser");
Check.notEmpty(proxyHost, "proxyHost");
Check.notEmpty(doAsUser, "doAsUser");
LOG.debug("Authorization check proxyuser [{}] host [{}] doAs [{}]",
new Object[]{proxyUser, proxyHost, doAsUser});
if (proxyUserHosts.containsKey(proxyUser)) {
proxyHost = normalizeHostname(proxyHost);
validateRequestorHost(proxyUser, proxyHost, proxyUserHosts.get(proxyUser));
validateGroup(proxyUser, doAsUser, proxyUserGroups.get(proxyUser));
} else {
throw new AccessControlException(MessageFormat.format("User [{0}] not defined as proxyuser", proxyUser));
}
}
private void validateRequestorHost(String proxyUser, String hostname, Set<String> validHosts)
throws IOException, AccessControlException {
if (validHosts != null) {
if (!validHosts.contains(hostname) && !validHosts.contains(normalizeHostname(hostname))) {
throw new AccessControlException(MessageFormat.format("Unauthorized host [{0}] for proxyuser [{1}]",
hostname, proxyUser));
}
}
}
private void validateGroup(String proxyUser, String user, Set<String> validGroups) throws IOException,
AccessControlException {
if (validGroups != null) {
List<String> userGroups = getServer().get(Groups.class).getGroups(user);
for (String g : validGroups) {
if (userGroups.contains(g)) {
return;
}
}
throw new AccessControlException(
MessageFormat.format("Unauthorized proxyuser [{0}] for user [{1}], not in proxyuser groups",
proxyUser, user));
}
}
private String normalizeHostname(String name) {
try {
InetAddress address = InetAddress.getByName(name);
return address.getCanonicalHostName();
} catch (IOException ex) {
throw new AccessControlException(MessageFormat.format("Could not resolve host [{0}], {1}", name,
ex.getMessage()));
}
}
}

View File

@ -0,0 +1,110 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.lib.service.FileSystemAccess;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
/**
* The <code>FileSystemReleaseFilter</code> releases back to the
* {@link FileSystemAccess} service a <code>FileSystem</code> instance.
* <p/>
* This filter is useful in situations where a servlet request
* is streaming out HDFS data and the corresponding filesystem
* instance have to be closed after the streaming completes.
*/
public abstract class FileSystemReleaseFilter implements Filter {
private static final ThreadLocal<FileSystem> FILE_SYSTEM_TL = new ThreadLocal<FileSystem>();
/**
* Initializes the filter.
* <p/>
* This implementation is a NOP.
*
* @param filterConfig filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
/**
* It delegates the incoming request to the <code>FilterChain</code>, and
* at its completion (in a finally block) releases the filesystem instance
* back to the {@link FileSystemAccess} service.
*
* @param servletRequest servlet request.
* @param servletResponse servlet response.
* @param filterChain filter chain.
*
* @throws IOException thrown if an IO error occurrs.
* @throws ServletException thrown if a servet error occurrs.
*/
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
throws IOException, ServletException {
try {
filterChain.doFilter(servletRequest, servletResponse);
} finally {
FileSystem fs = FILE_SYSTEM_TL.get();
if (fs != null) {
FILE_SYSTEM_TL.remove();
getFileSystemAccess().releaseFileSystem(fs);
}
}
}
/**
* Destroys the filter.
* <p/>
* This implementation is a NOP.
*/
@Override
public void destroy() {
}
/**
* Static method that sets the <code>FileSystem</code> to release back to
* the {@link FileSystemAccess} service on servlet request completion.
*
* @param fs fileystem instance.
*/
public static void setFileSystem(FileSystem fs) {
FILE_SYSTEM_TL.set(fs);
}
/**
* Abstract method to be implemetned by concrete implementations of the
* filter that return the {@link FileSystemAccess} service to which the filesystem
* will be returned to.
*
* @return the FileSystemAccess service.
*/
protected abstract FileSystemAccess getFileSystemAccess();
}

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
import java.net.InetAddress;
/**
* Filter that resolves the requester hostname.
*/
public class HostnameFilter implements Filter {
static final ThreadLocal<String> HOSTNAME_TL = new ThreadLocal<String>();
/**
* Initializes the filter.
* <p/>
* This implementation is a NOP.
*
* @param config filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
}
/**
* Resolves the requester hostname and delegates the request to the chain.
* <p/>
* The requester hostname is available via the {@link #get} method.
*
* @param request servlet request.
* @param response servlet response.
* @param chain filter chain.
*
* @throws IOException thrown if an IO error occurrs.
* @throws ServletException thrown if a servet error occurrs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
try {
String hostname = InetAddress.getByName(request.getRemoteAddr()).getCanonicalHostName();
HOSTNAME_TL.set(hostname);
chain.doFilter(request, response);
} finally {
HOSTNAME_TL.remove();
}
}
/**
* Returns the requester hostname.
*
* @return the requester hostname.
*/
public static String get() {
return HOSTNAME_TL.get();
}
/**
* Destroys the filter.
* <p/>
* This implementation is a NOP.
*/
@Override
public void destroy() {
}
}

View File

@ -0,0 +1,101 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import org.slf4j.MDC;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.security.Principal;
/**
* Filter that sets request contextual information for the slf4j MDC.
* <p/>
* It sets the following values:
* <ul>
* <li>hostname: if the {@link HostnameFilter} is present and configured
* before this filter</li>
* <li>user: the <code>HttpServletRequest.getUserPrincipal().getName()</code></li>
* <li>method: the HTTP method fo the request (GET, POST, ...)</li>
* <li>path: the path of the request URL</li>
* </ul>
*/
public class MDCFilter implements Filter {
/**
* Initializes the filter.
* <p/>
* This implementation is a NOP.
*
* @param config filter configuration.
*
* @throws ServletException thrown if the filter could not be initialized.
*/
@Override
public void init(FilterConfig config) throws ServletException {
}
/**
* Sets the slf4j <code>MDC</code> and delegates the request to the chain.
*
* @param request servlet request.
* @param response servlet response.
* @param chain filter chain.
*
* @throws IOException thrown if an IO error occurrs.
* @throws ServletException thrown if a servet error occurrs.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
try {
MDC.clear();
String hostname = HostnameFilter.get();
if (hostname != null) {
MDC.put("hostname", HostnameFilter.get());
}
Principal principal = ((HttpServletRequest) request).getUserPrincipal();
String user = (principal != null) ? principal.getName() : null;
if (user != null) {
MDC.put("user", user);
}
MDC.put("method", ((HttpServletRequest) request).getMethod());
MDC.put("path", ((HttpServletRequest) request).getPathInfo());
chain.doFilter(request, response);
} finally {
MDC.clear();
}
}
/**
* Destroys the filter.
* <p/>
* This implementation is a NOP.
*/
@Override
public void destroy() {
}
}

View File

@ -0,0 +1,159 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServerException;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import java.text.MessageFormat;
/**
* {@link Server} subclass that implements <code>ServletContextListener</code>
* and uses its lifecycle to start and stop the server.
*/
public abstract class ServerWebApp extends Server implements ServletContextListener {
private static final String HOME_DIR = ".home.dir";
private static final String CONFIG_DIR = ".config.dir";
private static final String LOG_DIR = ".log.dir";
private static final String TEMP_DIR = ".temp.dir";
private static ThreadLocal<String> HOME_DIR_TL = new ThreadLocal<String>();
/**
* Method for testing purposes.
*/
public static void setHomeDirForCurrentThread(String homeDir) {
HOME_DIR_TL.set(homeDir);
}
/**
* Constructor for testing purposes.
*/
protected ServerWebApp(String name, String homeDir, String configDir, String logDir, String tempDir,
Configuration config) {
super(name, homeDir, configDir, logDir, tempDir, config);
}
/**
* Constructor for testing purposes.
*/
protected ServerWebApp(String name, String homeDir, Configuration config) {
super(name, homeDir, config);
}
/**
* Constructor. Subclasses must have a default constructor specifying
* the server name.
* <p/>
* The server name is used to resolve the Java System properties that define
* the server home, config, log and temp directories.
* <p/>
* The home directory is looked in the Java System property
* <code>#SERVER_NAME#.home.dir</code>.
* <p/>
* The config directory is looked in the Java System property
* <code>#SERVER_NAME#.config.dir</code>, if not defined it resolves to
* the <code>#SERVER_HOME_DIR#/conf</code> directory.
* <p/>
* The log directory is looked in the Java System property
* <code>#SERVER_NAME#.log.dir</code>, if not defined it resolves to
* the <code>#SERVER_HOME_DIR#/log</code> directory.
* <p/>
* The temp directory is looked in the Java System property
* <code>#SERVER_NAME#.temp.dir</code>, if not defined it resolves to
* the <code>#SERVER_HOME_DIR#/temp</code> directory.
*
* @param name server name.
*/
public ServerWebApp(String name) {
super(name, getHomeDir(name),
getDir(name, CONFIG_DIR, getHomeDir(name) + "/conf"),
getDir(name, LOG_DIR, getHomeDir(name) + "/log"),
getDir(name, TEMP_DIR, getHomeDir(name) + "/temp"), null);
}
/**
* Returns the server home directory.
* <p/>
* It is looked up in the Java System property
* <code>#SERVER_NAME#.home.dir</code>.
*
* @param name the server home directory.
*
* @return the server home directory.
*/
static String getHomeDir(String name) {
String homeDir = HOME_DIR_TL.get();
if (homeDir == null) {
String sysProp = name + HOME_DIR;
homeDir = System.getProperty(sysProp);
if (homeDir == null) {
throw new IllegalArgumentException(MessageFormat.format("System property [{0}] not defined", sysProp));
}
}
return homeDir;
}
/**
* Convenience method that looks for Java System property defining a
* diretory and if not present defaults to the specified directory.
*
* @param name server name, used as prefix of the Java System property.
* @param dirType dir type, use as postfix of the Java System property.
* @param defaultDir the default directory to return if the Java System
* property <code>name + dirType</code> is not defined.
*
* @return the directory defined in the Java System property or the
* the default directory if the Java System property is not defined.
*/
static String getDir(String name, String dirType, String defaultDir) {
String sysProp = name + dirType;
return System.getProperty(sysProp, defaultDir);
}
/**
* Initializes the <code>ServletContextListener</code> which initializes
* the Server.
*
* @param event servelt context event.
*/
public void contextInitialized(ServletContextEvent event) {
try {
init();
} catch (ServerException ex) {
event.getServletContext().log("ERROR: " + ex.getMessage());
throw new RuntimeException(ex);
}
}
/**
* Destroys the <code>ServletContextListener</code> which destroys
* the Server.
*
* @param event servelt context event.
*/
public void contextDestroyed(ServletContextEvent event) {
destroy();
}
}

View File

@ -0,0 +1,199 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.util;
import java.text.MessageFormat;
import java.util.List;
import java.util.regex.Pattern;
/**
* Utility methods to check preconditions.
* <p/>
* Commonly used for method arguments preconditions.
*/
public class Check {
/**
* Verifies a variable is not NULL.
*
* @param obj the variable to check.
* @param name the name to use in the exception message.
*
* @return the variable.
*
* @throws IllegalArgumentException if the variable is NULL.
*/
public static <T> T notNull(T obj, String name) {
if (obj == null) {
throw new IllegalArgumentException(name + " cannot be null");
}
return obj;
}
/**
* Verifies a list does not have any NULL elements.
*
* @param list the list to check.
* @param name the name to use in the exception message.
*
* @return the list.
*
* @throws IllegalArgumentException if the list has NULL elements.
*/
public static <T> List<T> notNullElements(List<T> list, String name) {
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notNull(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
}
/**
* Verifies a string is not NULL and not emtpy
*
* @param str the variable to check.
* @param name the name to use in the exception message.
*
* @return the variable.
*
* @throws IllegalArgumentException if the variable is NULL or empty.
*/
public static String notEmpty(String str, String name) {
if (str == null) {
throw new IllegalArgumentException(name + " cannot be null");
}
if (str.length() == 0) {
throw new IllegalArgumentException(name + " cannot be empty");
}
return str;
}
/**
* Verifies a string list is not NULL and not emtpy
*
* @param list the list to check.
* @param name the name to use in the exception message.
*
* @return the variable.
*
* @throws IllegalArgumentException if the string list has NULL or empty
* elements.
*/
public static List<String> notEmptyElements(List<String> list, String name) {
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notEmpty(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
}
private static final String IDENTIFIER_PATTERN_STR = "[a-zA-z_][a-zA-Z0-9_\\-]*";
private static final Pattern IDENTIFIER_PATTERN = Pattern.compile("^" + IDENTIFIER_PATTERN_STR + "$");
/**
* Verifies a value is a valid identifier,
* <code>[a-zA-z_][a-zA-Z0-9_\-]*</code>, up to a maximum length.
*
* @param value string to check if it is a valid identifier.
* @param maxLen maximun length.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the string is not a valid identifier.
*/
public static String validIdentifier(String value, int maxLen, String name) {
Check.notEmpty(value, name);
if (value.length() > maxLen) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] = [{1}] exceeds max len [{2}]", name, value, maxLen));
}
if (!IDENTIFIER_PATTERN.matcher(value).find()) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] = [{1}] must be '{2}'", name, value, IDENTIFIER_PATTERN_STR));
}
return value;
}
/**
* Verifies an integer is greater than zero.
*
* @param value integer value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the integer is zero or less.
*/
public static int gt0(int value, String name) {
return (int) gt0((long) value, name);
}
/**
* Verifies an long is greater than zero.
*
* @param value long value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the long is zero or less.
*/
public static long gt0(long value, String name) {
if (value <= 0) {
throw new IllegalArgumentException(
MessageFormat.format("parameter [{0}] = [{1}] must be greater than zero", name, value));
}
return value;
}
/**
* Verifies an integer is greater or equal to zero.
*
* @param value integer value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the integer is greater or equal to zero.
*/
public static int ge0(int value, String name) {
return (int) ge0((long) value, name);
}
/**
* Verifies an long is greater or equal to zero.
*
* @param value integer value.
* @param name the name to use in the exception message.
*
* @return the value.
*
* @throws IllegalArgumentException if the long is greater or equal to zero.
*/
public static long ge0(long value, String name) {
if (value < 0) {
throw new IllegalArgumentException(MessageFormat.format(
"parameter [{0}] = [{1}] must be greater than or equals zero", name, value));
}
return value;
}
}

View File

@ -0,0 +1,157 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.util;
import org.apache.hadoop.conf.Configuration;
import org.w3c.dom.DOMException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
import org.xml.sax.SAXException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
/**
* Configuration utilities.
*/
public abstract class ConfigurationUtils {
/**
* Copy configuration key/value pairs from one configuration to another if a property exists in the target, it gets
* replaced.
*
* @param source source configuration.
* @param target target configuration.
*/
public static void copy(Configuration source, Configuration target) {
Check.notNull(source, "source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {
target.set(entry.getKey(), entry.getValue());
}
}
/**
* Injects configuration key/value pairs from one configuration to another if the key does not exist in the target
* configuration.
*
* @param source source configuration.
* @param target target configuration.
*/
public static void injectDefaults(Configuration source, Configuration target) {
Check.notNull(source, "source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {
if (target.get(entry.getKey()) == null) {
target.set(entry.getKey(), entry.getValue());
}
}
}
/**
* Returns a new ConfigurationUtils instance with all inline values resolved.
*
* @return a new ConfigurationUtils instance with all inline values resolved.
*/
public static Configuration resolve(Configuration conf) {
Configuration resolved = new Configuration(false);
for (Map.Entry<String, String> entry : conf) {
resolved.set(entry.getKey(), conf.get(entry.getKey()));
}
return resolved;
}
// Canibalized from FileSystemAccess <code>Configuration.loadResource()</code>.
/**
* Create a configuration from an InputStream.
* <p/>
* ERROR canibalized from <code>Configuration.loadResource()</code>.
*
* @param is inputstream to read the configuration from.
*
* @throws IOException thrown if the configuration could not be read.
*/
public static void load(Configuration conf, InputStream is) throws IOException {
try {
DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance();
// ignore all comments inside the xml file
docBuilderFactory.setIgnoringComments(true);
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document doc = builder.parse(is);
parseDocument(conf, doc);
} catch (SAXException e) {
throw new IOException(e);
} catch (ParserConfigurationException e) {
throw new IOException(e);
}
}
// Canibalized from FileSystemAccess <code>Configuration.loadResource()</code>.
private static void parseDocument(Configuration conf, Document doc) throws IOException {
try {
Element root = doc.getDocumentElement();
if (!"configuration".equals(root.getTagName())) {
throw new IOException("bad conf file: top-level element not <configuration>");
}
NodeList props = root.getChildNodes();
for (int i = 0; i < props.getLength(); i++) {
Node propNode = props.item(i);
if (!(propNode instanceof Element)) {
continue;
}
Element prop = (Element) propNode;
if (!"property".equals(prop.getTagName())) {
throw new IOException("bad conf file: element not <property>");
}
NodeList fields = prop.getChildNodes();
String attr = null;
String value = null;
for (int j = 0; j < fields.getLength(); j++) {
Node fieldNode = fields.item(j);
if (!(fieldNode instanceof Element)) {
continue;
}
Element field = (Element) fieldNode;
if ("name".equals(field.getTagName()) && field.hasChildNodes()) {
attr = ((Text) field.getFirstChild()).getData().trim();
}
if ("value".equals(field.getTagName()) && field.hasChildNodes()) {
value = ((Text) field.getFirstChild()).getData();
}
}
if (attr != null && value != null) {
conf.set(attr, value);
}
}
} catch (DOMException e) {
throw new IOException(e);
}
}
}

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import java.text.MessageFormat;
public abstract class BooleanParam extends Param<Boolean> {
public BooleanParam(String name, String str) {
value = parseParam(name, str);
}
protected Boolean parse(String str) throws Exception {
if (str.equalsIgnoreCase("true")) {
return true;
}
if (str.equalsIgnoreCase("false")) {
return false;
}
throw new IllegalArgumentException(MessageFormat.format("Invalid value [{0}], must be a boolean", str));
}
@Override
protected String getDomain() {
return "a boolean";
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
public abstract class ByteParam extends Param<Byte> {
public ByteParam(String name, String str) {
value = parseParam(name, str);
}
protected Byte parse(String str) throws Exception {
return Byte.parseByte(str);
}
@Override
protected String getDomain() {
return "a byte";
}
}

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.util.StringUtils;
import java.util.Arrays;
public abstract class EnumParam<E extends Enum<E>> extends Param<E> {
Class<E> klass;
public EnumParam(String label, String str, Class<E> e) {
klass = e;
value = parseParam(label, str);
}
protected E parse(String str) throws Exception {
return Enum.valueOf(klass, str.toUpperCase());
}
@Override
protected String getDomain() {
return StringUtils.join(",", Arrays.asList(klass.getEnumConstants()));
}
}

View File

@ -0,0 +1,67 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import java.util.LinkedHashMap;
import java.util.Map;
public class ExceptionProvider implements ExceptionMapper<Throwable> {
private static Logger LOG = LoggerFactory.getLogger(ExceptionProvider.class);
private static final String ENTER = System.getProperty("line.separator");
protected Response createResponse(Response.Status status, Throwable throwable) {
Map<String, Object> json = new LinkedHashMap<String, Object>();
json.put(HttpFSFileSystem.ERROR_MESSAGE_JSON, getOneLineMessage(throwable));
json.put(HttpFSFileSystem.ERROR_EXCEPTION_JSON, throwable.getClass().getSimpleName());
json.put(HttpFSFileSystem.ERROR_CLASSNAME_JSON, throwable.getClass().getName());
Map<String, Object> response = new LinkedHashMap<String, Object>();
response.put(HttpFSFileSystem.ERROR_JSON, json);
log(status, throwable);
return Response.status(status).type(MediaType.APPLICATION_JSON).entity(response).build();
}
protected String getOneLineMessage(Throwable throwable) {
String message = throwable.getMessage();
if (message != null) {
int i = message.indexOf(ENTER);
if (i > -1) {
message = message.substring(0, i);
}
}
return message;
}
protected void log(Response.Status status, Throwable throwable) {
LOG.debug("{}", throwable.getMessage(), throwable);
}
@Override
public Response toResponse(Throwable throwable) {
return createResponse(Response.Status.BAD_REQUEST, throwable);
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.io.IOUtils;
import javax.ws.rs.core.StreamingOutput;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
public class InputStreamEntity implements StreamingOutput {
private InputStream is;
private long offset;
private long len;
public InputStreamEntity(InputStream is, long offset, long len) {
this.is = is;
this.offset = offset;
this.len = len;
}
public InputStreamEntity(InputStream is) {
this(is, 0, -1);
}
@Override
public void write(OutputStream os) throws IOException {
is.skip(offset);
if (len == -1) {
IOUtils.copyBytes(is, os, 4096, true);
} else {
IOUtils.copyBytes(is, os, len, true);
}
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
public abstract class IntegerParam extends Param<Integer> {
public IntegerParam(String name, String str) {
value = parseParam(name, str);
}
protected Integer parse(String str) throws Exception {
return Integer.parseInt(str);
}
@Override
protected String getDomain() {
return "an integer";
}
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.json.simple.JSONObject;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Provider;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
import java.util.Map;
@Provider
@Produces(MediaType.APPLICATION_JSON)
public class JSONMapProvider implements MessageBodyWriter<Map> {
private static final String ENTER = System.getProperty("line.separator");
@Override
public boolean isWriteable(Class<?> aClass, Type type, Annotation[] annotations, MediaType mediaType) {
return Map.class.isAssignableFrom(aClass);
}
@Override
public long getSize(Map map, Class<?> aClass, Type type, Annotation[] annotations, MediaType mediaType) {
return -1;
}
@Override
public void writeTo(Map map, Class<?> aClass, Type type, Annotation[] annotations,
MediaType mediaType, MultivaluedMap<String, Object> stringObjectMultivaluedMap,
OutputStream outputStream) throws IOException, WebApplicationException {
Writer writer = new OutputStreamWriter(outputStream);
JSONObject.writeJSONString(map, writer);
writer.write(ENTER);
writer.flush();
}
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.json.simple.JSONStreamAware;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Provider;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
@Provider
@Produces(MediaType.APPLICATION_JSON)
public class JSONProvider implements MessageBodyWriter<JSONStreamAware> {
private static final String ENTER = System.getProperty("line.separator");
@Override
public boolean isWriteable(Class<?> aClass, Type type, Annotation[] annotations, MediaType mediaType) {
return JSONStreamAware.class.isAssignableFrom(aClass);
}
@Override
public long getSize(JSONStreamAware jsonStreamAware, Class<?> aClass, Type type, Annotation[] annotations,
MediaType mediaType) {
return -1;
}
@Override
public void writeTo(JSONStreamAware jsonStreamAware, Class<?> aClass, Type type, Annotation[] annotations,
MediaType mediaType, MultivaluedMap<String, Object> stringObjectMultivaluedMap,
OutputStream outputStream) throws IOException, WebApplicationException {
Writer writer = new OutputStreamWriter(outputStream);
jsonStreamAware.writeJSONString(writer);
writer.write(ENTER);
writer.flush();
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
public abstract class LongParam extends Param<Long> {
public LongParam(String name, String str) {
value = parseParam(name, str);
}
protected Long parse(String str) throws Exception {
return Long.parseLong(str);
}
@Override
protected String getDomain() {
return "a long";
}
}

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.lib.util.Check;
import java.text.MessageFormat;
public abstract class Param<T> {
protected T value;
public T parseParam(String name, String str) {
Check.notNull(name, "name");
try {
return (str != null && str.trim().length() > 0) ? parse(str) : null;
} catch (Exception ex) {
throw new IllegalArgumentException(
MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
name, str, getDomain()));
}
}
public T value() {
return value;
}
protected void setValue(T value) {
this.value = value;
}
protected abstract String getDomain();
protected abstract T parse(String str) throws Exception;
public String toString() {
return value.toString();
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
public abstract class ShortParam extends Param<Short> {
public ShortParam(String name, String str) {
value = parseParam(name, str);
}
protected Short parse(String str) throws Exception {
return Short.parseShort(str);
}
@Override
protected String getDomain() {
return "a short";
}
}

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import org.apache.hadoop.lib.util.Check;
import java.text.MessageFormat;
import java.util.regex.Pattern;
public abstract class StringParam extends Param<String> {
private Pattern pattern;
public StringParam(String name, String str) {
this(name, str, null);
}
public StringParam(String name, String str, Pattern pattern) {
this.pattern = pattern;
value = parseParam(name, str);
}
public String parseParam(String name, String str) {
String ret = null;
Check.notNull(name, "name");
try {
if (str != null) {
str = str.trim();
if (str.length() > 0) {
return parse(str);
}
}
} catch (Exception ex) {
throw new IllegalArgumentException(
MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
name, str, getDomain()));
}
return ret;
}
protected String parse(String str) throws Exception {
if (pattern != null) {
if (!pattern.matcher(str).matches()) {
throw new IllegalArgumentException("Invalid value");
}
}
return str;
}
@Override
protected String getDomain() {
return (pattern == null) ? "a string" : pattern.pattern();
}
}

View File

@ -0,0 +1,79 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import com.sun.jersey.api.core.HttpContext;
import com.sun.jersey.core.spi.component.ComponentContext;
import com.sun.jersey.core.spi.component.ComponentScope;
import com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable;
import com.sun.jersey.spi.inject.Injectable;
import com.sun.jersey.spi.inject.InjectableProvider;
import org.slf4j.MDC;
import javax.ws.rs.core.Context;
import javax.ws.rs.ext.Provider;
import java.lang.reflect.Type;
import java.security.Principal;
import java.util.regex.Pattern;
@Provider
public class UserProvider extends AbstractHttpContextInjectable<Principal> implements
InjectableProvider<Context, Type> {
public static final String USER_NAME_PARAM = "user.name";
public static final Pattern USER_PATTERN = Pattern.compile("[_a-zA-Z0-9]+");
private static class UserParam extends StringParam {
public UserParam(String user) {
super(USER_NAME_PARAM, user, USER_PATTERN);
}
}
@Override
public Principal getValue(HttpContext httpContext) {
Principal principal = httpContext.getRequest().getUserPrincipal();
if (principal == null) {
final String user = httpContext.getRequest().getQueryParameters().getFirst(USER_NAME_PARAM);
if (user != null) {
principal = new Principal() {
@Override
public String getName() {
return new UserParam(user).value();
}
};
}
}
if (principal != null) {
MDC.put("user", principal.getName());
}
return principal;
}
@Override
public ComponentScope getScope() {
return ComponentScope.PerRequest;
}
@Override
public Injectable getInjectable(ComponentContext componentContext, Context context, Type type) {
return (type.equals(Principal.class)) ? this : null;
}
}

View File

@ -0,0 +1,167 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# resolve links - $0 may be a softlink
PRG="${0}"
while [ -h "${PRG}" ]; do
ls=`ls -ld "${PRG}"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "${PRG}"`/"$link"
fi
done
BASEDIR=`dirname ${PRG}`
BASEDIR=`cd ${BASEDIR}/..;pwd`
function print() {
if [ "${HTTPFS_SILENT}" != "true" ]; then
echo "$@"
fi
}
# if HTTPFS_HOME is already set warn it will be ignored
#
if [ "${HTTPFS_HOME}" != "" ]; then
echo "WARNING: current setting of HTTPFS_HOME ignored"
fi
print
# setting HTTPFS_HOME to the installation dir, it cannot be changed
#
export HTTPFS_HOME=${BASEDIR}
httpfs_home=${HTTPFS_HOME}
print "Setting HTTPFS_HOME: ${HTTPFS_HOME}"
# if the installation has a env file, source it
# this is for native packages installations
#
if [ -e "${HTTPFS_HOME}/bin/httpfs-env.sh" ]; then
print "Sourcing: ${HTTPFS_HOME}/bin/httpfs-env.sh"
source ${HTTPFS_HOME}/bin/HTTPFS-env.sh
grep "^ *export " ${HTTPFS_HOME}/bin/httpfs-env.sh | sed 's/ *export/ setting/'
fi
# verify that the sourced env file didn't change HTTPFS_HOME
# if so, warn and revert
#
if [ "${HTTPFS_HOME}" != "${httpfs_home}" ]; then
print "WARN: HTTPFS_HOME resetting to ''${HTTPFS_HOME}'' ignored"
export HTTPFS_HOME=${httpfs_home}
print " using HTTPFS_HOME: ${HTTPFS_HOME}"
fi
if [ "${HTTPFS_CONFIG}" = "" ]; then
export HTTPFS_CONFIG=${HTTPFS_HOME}/etc/hadoop
print "Setting HTTPFS_CONFIG: ${HTTPFS_CONFIG}"
else
print "Using HTTPFS_CONFIG: ${HTTPFS_CONFIG}"
fi
httpfs_config=${HTTPFS_CONFIG}
# if the configuration dir has a env file, source it
#
if [ -e "${HTTPFS_CONFIG}/httpfs-env.sh" ]; then
print "Sourcing: ${HTTPFS_CONFIG}/httpfs-env.sh"
source ${HTTPFS_CONFIG}/httpfs-env.sh
grep "^ *export " ${HTTPFS_CONFIG}/httpfs-env.sh | sed 's/ *export/ setting/'
fi
# verify that the sourced env file didn't change HTTPFS_HOME
# if so, warn and revert
#
if [ "${HTTPFS_HOME}" != "${httpfs_home}" ]; then
echo "WARN: HTTPFS_HOME resetting to ''${HTTPFS_HOME}'' ignored"
export HTTPFS_HOME=${httpfs_home}
fi
# verify that the sourced env file didn't change HTTPFS_CONFIG
# if so, warn and revert
#
if [ "${HTTPFS_CONFIG}" != "${httpfs_config}" ]; then
echo "WARN: HTTPFS_CONFIG resetting to ''${HTTPFS_CONFIG}'' ignored"
export HTTPFS_CONFIG=${httpfs_config}
fi
if [ "${HTTPFS_LOG}" = "" ]; then
export HTTPFS_LOG=${HTTPFS_HOME}/logs
print "Setting HTTPFS_LOG: ${HTTPFS_LOG}"
else
print "Using HTTPFS_LOG: ${HTTPFS_LOG}"
fi
if [ ! -f ${HTTPFS_LOG} ]; then
mkdir -p ${HTTPFS_LOG}
fi
if [ "${HTTPFS_TEMP}" = "" ]; then
export HTTPFS_TEMP=${HTTPFS_HOME}/temp
print "Setting HTTPFS_TEMP: ${HTTPFS_TEMP}"
else
print "Using HTTPFS_TEMP: ${HTTPFS_TEMP}"
fi
if [ ! -f ${HTTPFS_TEMP} ]; then
mkdir -p ${HTTPFS_TEMP}
fi
if [ "${HTTPFS_HTTP_PORT}" = "" ]; then
export HTTPFS_HTTP_PORT=14000
print "Setting HTTPFS_HTTP_PORT: ${HTTPFS_HTTP_PORT}"
else
print "Using HTTPFS_HTTP_PORT: ${HTTPFS_HTTP_PORT}"
fi
if [ "${HTTPFS_ADMIN_PORT}" = "" ]; then
export HTTPFS_ADMIN_PORT=`expr $HTTPFS_HTTP_PORT + 1`
print "Setting HTTPFS_ADMIN_PORT: ${HTTPFS_ADMIN_PORT}"
else
print "Using HTTPFS_ADMIN_PORT: ${HTTPFS_ADMIN_PORT}"
fi
if [ "${HTTPFS_HTTP_HOSTNAME}" = "" ]; then
export HTTPFS_HTTP_HOSTNAME=`hostname -f`
print "Setting HTTPFS_HTTP_HOSTNAME: ${HTTPFS_HTTP_HOSTNAME}"
else
print "Using HTTPFS_HTTP_HOSTNAME: ${HTTPFS_HTTP_HOSTNAME}"
fi
if [ "${CATALINA_BASE}" = "" ]; then
export CATALINA_BASE=${HTTPFS_HOME}/share/hadoop/httpfs/tomcat
print "Setting CATALINA_BASE: ${CATALINA_BASE}"
else
print "Using CATALINA_BASE: ${CATALINA_BASE}"
fi
if [ "${CATALINA_OUT}" = "" ]; then
export CATALINA_OUT=${HTTPFS_LOG}/httpfs-catalina.out
print "Setting CATALINA_OUT: ${CATALINA_OUT}"
else
print "Using CATALINA_OUT: ${CATALINA_OUT}"
fi
if [ "${CATALINA_PID}" = "" ]; then
export CATALINA_PID=/tmp/httpfs.pid
print "Setting CATALINA_PID: ${CATALINA_PID}"
else
print "Using CATALINA_PID: ${CATALINA_PID}"
fi
print

View File

@ -0,0 +1,20 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.Target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n
log4j.rootLogger=INFO, console

View File

@ -0,0 +1,204 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!-- HttpFSServer Server -->
<property>
<name>httpfs.buffer.size</name>
<value>4096</value>
<description>
The buffer size used by a read/write request when streaming data from/to
HDFS.
</description>
</property>
<!-- HttpFSServer Services -->
<property>
<name>httpfs.services</name>
<value>
org.apache.hadoop.lib.service.instrumentation.InstrumentationService,
org.apache.hadoop.lib.service.scheduler.SchedulerService,
org.apache.hadoop.lib.service.security.GroupsService,
org.apache.hadoop.lib.service.security.ProxyUserService,
org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
</value>
<description>
Services used by the httpfs server.
</description>
</property>
<!-- Kerberos Configuration -->
<property>
<name>kerberos.realm</name>
<value>LOCALHOST</value>
<description>
Kerberos realm, used only if Kerberos authentication is used between
the clients and httpfs or between HttpFS and HDFS.
This property is only used to resolve other properties within this
configuration file.
</description>
</property>
<!-- HttpFSServer Security Configuration -->
<property>
<name>httpfs.hostname</name>
<value>${httpfs.http.hostname}</value>
<description>
Property used to synthetize the HTTP Kerberos principal used by httpfs.
This property is only used to resolve other properties within this
configuration file.
</description>
</property>
<property>
<name>httpfs.authentication.type</name>
<value>simple</value>
<description>
Defines the authentication mechanism used by httpfs for its HTTP clients.
Valid values are 'simple' and 'kerberos'.
If using 'simple' HTTP clients must specify the username with the
'user.name' query string parameter.
If using 'kerberos' HTTP clients must use HTTP SPNEGO.
</description>
</property>
<property>
<name>httpfs.authentication.kerberos.principal</name>
<value>HTTP/${httpfs.hostname}@${kerberos.realm}</value>
<description>
The HTTP Kerberos principal used by HttpFS in the HTTP endpoint.
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
HTTP SPENGO specification.
</description>
</property>
<property>
<name>httpfs.authentication.kerberos.keytab</name>
<value>${user.home}/httpfs.keytab</value>
<description>
The Kerberos keytab file with the credentials for the
HTTP Kerberos principal used by httpfs in the HTTP endpoint.
</description>
</property>
<!-- HttpFSServer proxy user Configuration -->
<property>
<name>httpfs.proxyuser.#USER#.hosts</name>
<value>*</value>
<description>
List of hosts the '#USER#' user is allowed to perform 'doAs'
operations.
The '#USER#' must be replaced with the username o the user who is
allowed to perform 'doAs' operations.
The value can be the '*' wildcard or a list of hostnames.
For multiple users copy this property and replace the user name
in the property name.
</description>
</property>
<property>
<name>httpfs.proxyuser.#USER#.groups</name>
<value>*</value>
<description>
List of groups the '#USER#' user is allowed to impersonate users
from to perform 'doAs' operations.
The '#USER#' must be replaced with the username o the user who is
allowed to perform 'doAs' operations.
The value can be the '*' wildcard or a list of groups.
For multiple users copy this property and replace the user name
in the property name.
</description>
</property>
<!-- FileSystemAccess Namenode Configuration -->
<property>
<name>namenode.hostname</name>
<value>localhost</value>
<description>
The HDFS Namenode host the httpfs server connects to perform file
system operations.
This property is only used to resolve other properties within this
configuration file.
</description>
</property>
<property>
<name>httpfs.hadoop.conf:fs.default.name</name>
<value>hdfs://${namenode.hostname}:8020</value>
<description>
The HDFS Namenode URI the httpfs server connects to perform file
system operations.
</description>
</property>
<!-- FileSystemAccess Namenode Security Configuration -->
<property>
<name>httpfs.hadoop.authentication.type</name>
<value>simple</value>
<description>
Defines the authentication mechanism used by httpfs to connect to
the HDFS Namenode.
Valid values are 'simple' and 'kerberos'.
</description>
</property>
<property>
<name>httpfs.hadoop.authentication.kerberos.keytab</name>
<value>${user.home}/httpfs.keytab</value>
<description>
The Kerberos keytab file with the credentials for the
Kerberos principal used by httpfs to connect to the HDFS Namenode.
</description>
</property>
<property>
<name>httpfs.hadoop.authentication.kerberos.principal</name>
<value>${user.name}/${httpfs.hostname}@${kerberos.realm}</value>
<description>
The Kerberos principal used by httpfs to connect to the HDFS Namenode.
</description>
</property>
<property>
<name>httpfs.hadoop.conf:dfs.namenode.kerberos.principal</name>
<value>hdfs/${namenode.hostname}@${kerberos.realm}</value>
<description>
The HDFS Namenode Kerberos principal.
</description>
</property>
</configuration>

View File

@ -0,0 +1,21 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
httpfs.version=${project.version}
httpfs.source.repository=${httpfs.source.repository}
httpfs.source.revision=${httpfs.source.revision}
httpfs.build.username=${user.name}
httpfs.build.timestamp=${httpfs.build.timestamp}

View File

@ -0,0 +1,62 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# resolve links - $0 may be a softlink
PRG="${0}"
while [ -h "${PRG}" ]; do
ls=`ls -ld "${PRG}"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "${PRG}"`/"$link"
fi
done
BASEDIR=`dirname ${PRG}`
BASEDIR=`cd ${BASEDIR}/..;pwd`
source ${BASEDIR}/libexec/httpfs-config.sh
# The Java System property 'httpfs.http.port' it is not used by HttpFS,
# it is used in Tomcat's server.xml configuration file
#
print "Using CATALINA_OPTS: ${CATALINA_OPTS}"
catalina_opts="-Dhttpfs.home.dir=${HTTPFS_HOME}";
catalina_opts="${catalina_opts} -Dhttpfs.config.dir=${HTTPFS_CONFIG}";
catalina_opts="${catalina_opts} -Dhttpfs.log.dir=${HTTPFS_LOG}";
catalina_opts="${catalina_opts} -Dhttpfs.temp.dir=${HTTPFS_TEMP}";
catalina_opts="${catalina_opts} -Dhttpfs.admin.port=${HTTPFS_ADMIN_PORT}";
catalina_opts="${catalina_opts} -Dhttpfs.http.port=${HTTPFS_HTTP_PORT}";
catalina_opts="${catalina_opts} -Dhttpfs.http.hostname=${HTTPFS_HTTP_HOSTNAME}";
print "Adding to CATALINA_OPTS: ${catalina_opts}"
export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}"
# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server
#
if [ "${1}" = "stop" ]; then
export JAVA_OPTS=${CATALINA_OPTS}
fi
if [ "${HTTPFS_SILENT}" != "true" ]; then
${BASEDIR}/share/hadoop/httpfs/tomcat/bin/catalina.sh "$@"
else
${BASEDIR}/share/hadoop/httpfs/tomcat/bin/catalina.sh "$@" > /dev/null
fi

View File

@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
</web-app>

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<body>
<b>HttpFs service</b>, service base URL at /webhdfs/v1.
</body>
</html>

View File

@ -0,0 +1,67 @@
#
# All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, 3manager.org.apache.juli.FileHandler, 4host-manager.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler
############################################################
# Handler specific properties.
# Describes specific configuration info for Handlers.
############################################################
1catalina.org.apache.juli.FileHandler.level = FINE
1catalina.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
1catalina.org.apache.juli.FileHandler.prefix = httpfs-catalina.
2localhost.org.apache.juli.FileHandler.level = FINE
2localhost.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
2localhost.org.apache.juli.FileHandler.prefix = httpfs-localhost.
3manager.org.apache.juli.FileHandler.level = FINE
3manager.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
3manager.org.apache.juli.FileHandler.prefix = httpfs-manager.
4host-manager.org.apache.juli.FileHandler.level = FINE
4host-manager.org.apache.juli.FileHandler.directory = ${httpfs.log.dir}
4host-manager.org.apache.juli.FileHandler.prefix = httpfs-host-manager.
java.util.logging.ConsoleHandler.level = FINE
java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter
############################################################
# Facility specific properties.
# Provides extra control for each logger.
############################################################
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.FileHandler
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.FileHandler
# For example, set the com.xyz.foo logger to only log SEVERE
# messages:
#org.apache.catalina.startup.ContextConfig.level = FINE
#org.apache.catalina.startup.HostConfig.level = FINE
#org.apache.catalina.session.ManagerBase.level = FINE
#org.apache.catalina.core.AprLifecycleListener.level=FINE

View File

@ -0,0 +1,150 @@
<?xml version='1.0' encoding='utf-8'?>
<!--
All Rights Reserved.
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Note: A "Server" is not itself a "Container", so you may not
define subcomponents such as "Valves" at this level.
Documentation at /docs/config/server.html
-->
<Server port="${httpfs.admin.port}" shutdown="SHUTDOWN">
<!--APR library loader. Documentation at /docs/apr.html -->
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on"/>
<!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
<Listener className="org.apache.catalina.core.JasperListener"/>
<!-- Prevent memory leaks due to use of particular java/javax APIs-->
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
<!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
<Listener className="org.apache.catalina.mbeans.ServerLifecycleListener"/>
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
<!-- Global JNDI resources
Documentation at /docs/jndi-resources-howto.html
-->
<GlobalNamingResources>
<!-- Editable user database that can also be used by
UserDatabaseRealm to authenticate users
-->
<Resource name="UserDatabase" auth="Container"
type="org.apache.catalina.UserDatabase"
description="User database that can be updated and saved"
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
pathname="conf/tomcat-users.xml"/>
</GlobalNamingResources>
<!-- A "Service" is a collection of one or more "Connectors" that share
a single "Container" Note: A "Service" is not itself a "Container",
so you may not define subcomponents such as "Valves" at this level.
Documentation at /docs/config/service.html
-->
<Service name="Catalina">
<!--The connectors can use a shared executor, you can define one or more named thread pools-->
<!--
<Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
maxThreads="150" minSpareThreads="4"/>
-->
<!-- A "Connector" represents an endpoint by which requests are received
and responses are returned. Documentation at :
Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
Java AJP Connector: /docs/config/ajp.html
APR (HTTP/AJP) Connector: /docs/apr.html
Define a non-SSL HTTP/1.1 Connector on port ${httpfs.http.port}
-->
<Connector port="${httpfs.http.port}" protocol="HTTP/1.1"
connectionTimeout="20000"
redirectPort="8443"/>
<!-- A "Connector" using the shared thread pool-->
<!--
<Connector executor="tomcatThreadPool"
port="${httpfs.http.port}" protocol="HTTP/1.1"
connectionTimeout="20000"
redirectPort="8443" />
-->
<!-- Define a SSL HTTP/1.1 Connector on port 8443
This connector uses the JSSE configuration, when using APR, the
connector should be using the OpenSSL style configuration
described in the APR documentation -->
<!--
<Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
maxThreads="150" scheme="https" secure="true"
clientAuth="false" sslProtocol="TLS" />
-->
<!-- Define an AJP 1.3 Connector on port 8009 -->
<!-- An Engine represents the entry point (within Catalina) that processes
every request. The Engine implementation for Tomcat stand alone
analyzes the HTTP headers included with the request, and passes them
on to the appropriate Host (virtual host).
Documentation at /docs/config/engine.html -->
<!-- You should set jvmRoute to support load-balancing via AJP ie :
<Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-->
<Engine name="Catalina" defaultHost="localhost">
<!--For clustering, please take a look at documentation at:
/docs/cluster-howto.html (simple how to)
/docs/config/cluster.html (reference documentation) -->
<!--
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-->
<!-- The request dumper valve dumps useful debugging information about
the request and response data received and sent by Tomcat.
Documentation at: /docs/config/valve.html -->
<!--
<Valve className="org.apache.catalina.valves.RequestDumperValve"/>
-->
<!-- This Realm uses the UserDatabase configured in the global JNDI
resources under the key "UserDatabase". Any edits
that are performed against this UserDatabase are immediately
available for use by the Realm. -->
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
resourceName="UserDatabase"/>
<!-- Define the default virtual host
Note: XML Schema validation will not work with Xerces 2.2.
-->
<Host name="localhost" appBase="webapps"
unpackWARs="true" autoDeploy="true"
xmlValidation="false" xmlNamespaceAware="false">
<!-- SingleSignOn valve, share authentication between web applications
Documentation at: /docs/config/valve.html -->
<!--
<Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-->
<!-- Access log processes all example.
Documentation at: /docs/config/valve.html -->
<!--
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="localhost_access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
-->
</Host>
</Engine>
</Service>
</Server>

View File

@ -0,0 +1,88 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
<listener>
<listener-class>org.apache.hadoop.fs.http.server.HttpFSServerWebApp</listener-class>
</listener>
<servlet>
<servlet-name>webservices-driver</servlet-name>
<servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
<init-param>
<param-name>com.sun.jersey.config.property.packages</param-name>
<param-value>org.apache.hadoop.fs.http.server,org.apache.hadoop.lib.wsrs</param-value>
</init-param>
<!-- Enables detailed Jersey request/response logging -->
<!--
<init-param>
<param-name>com.sun.jersey.spi.container.ContainerRequestFilters</param-name>
<param-value>com.sun.jersey.api.container.filter.LoggingFilter</param-value>
</init-param>
<init-param>
<param-name>com.sun.jersey.spi.container.ContainerResponseFilters</param-name>
<param-value>com.sun.jersey.api.container.filter.LoggingFilter</param-value>
</init-param>
-->
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>webservices-driver</servlet-name>
<url-pattern>/*</url-pattern>
</servlet-mapping>
<filter>
<filter-name>authFilter</filter-name>
<filter-class>org.apache.hadoop.fs.http.server.AuthFilter</filter-class>
</filter>
<filter>
<filter-name>MDCFilter</filter-name>
<filter-class>org.apache.hadoop.lib.servlet.MDCFilter</filter-class>
</filter>
<filter>
<filter-name>hostnameFilter</filter-name>
<filter-class>org.apache.hadoop.lib.servlet.HostnameFilter</filter-class>
</filter>
<filter>
<filter-name>fsReleaseFilter</filter-name>
<filter-class>org.apache.hadoop.fs.http.server.HttpFSReleaseFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>authFilter</filter-name>
<url-pattern>*</url-pattern>
</filter-mapping>
<filter-mapping>
<filter-name>MDCFilter</filter-name>
<url-pattern>*</url-pattern>
</filter-mapping>
<filter-mapping>
<filter-name>hostnameFilter</filter-name>
<url-pattern>*</url-pattern>
</filter-mapping>
<filter-mapping>
<filter-name>fsReleaseFilter</filter-name>
<url-pattern>*</url-pattern>
</filter-mapping>
</web-app>

View File

@ -0,0 +1,121 @@
~~ Licensed under the Apache License, Version 2.0 (the "License");
~~ you may not use this file except in compliance with the License.
~~ You may obtain a copy of the License at
~~
~~ http://www.apache.org/licenses/LICENSE-2.0
~~
~~ Unless required by applicable law or agreed to in writing, software
~~ distributed under the License is distributed on an "AS IS" BASIS,
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~~ See the License for the specific language governing permissions and
~~ limitations under the License.
---
Hadoop HDFS over HTTP ${project.version} - Server Setup
---
---
${maven.build.timestamp}
Hadoop HDFS over HTTP ${project.version} - Server Setup
\[ {{{./index.html}Go Back}} \]
This page explains how to quickly setup HttpFS with Pseudo authentication
against a Hadoop cluster with Pseudo authentication.
* Requirements
* Java 6+
* Maven 3+
* Install HttpFS
+---+
~ $ tar xzf httpfs-${project.version}.tar.gz
+---+
* Configure HttpFS
Edit the <<<httpfs-${project.version}/conf/httpfs-site.xml>>> file and
set the <<<httpfs.fsAccess.conf:fs.default.name>>> property to the HDFS
Namenode URI. For example:
+---+
httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021
+---+
* Configure Hadoop
Edit Hadoop <<<core-site.xml>>> and defined the Unix user that will
run the HttpFS server as a proxyuser. For example:
+---+
...
<property>
<name>fsAccess.proxyuser.#HTTPFSUSER#.hosts</name>
<value>httpfs-host.foo.com</value>
</property>
<property>
<name>fsAccess.proxyuser.#HTTPFSUSER#.groups</name>
<value>*</value>
</property>
...
+---+
IMPORTANT: Replace <<<#HTTPFSUSER#>>> with the Unix user that will
start the HttpFS server.
* Restart Hadoop
You need to restart Hadoop for the proxyuser configuration ot become
active.
* Start/Stop HttpFS
To start/stop HttpFS use HttpFS's bin/httpfs.sh script. For example:
+---+
httpfs-${project.version} $ bin/httpfs.sh start
+---+
NOTE: Invoking the script without any parameters list all possible
parameters (start, stop, run, etc.). The <<<httpfs.sh>>> script is a wrapper
for Tomcat's <<<catalina.sh>>> script that sets the environment variables
and Java System properties required to run HttpFS server.
* Test HttpFS is working
+---+
~ $ curl -i "http://<HTTPFSHOSTNAME>:14000?user.name=babu&op=homedir"
HTTP/1.1 200 OK
Content-Type: application/json
Transfer-Encoding: chunked
{"homeDir":"http:\/\/<HTTPFS_HOST>:14000\/user\/babu"}
+---+
* Embedded Tomcat Configuration
To configure the embedded Tomcat go to the <<<tomcat/conf>>>.
HttpFS preconfigures the HTTP and Admin ports in Tomcat's <<<server.xml>>> to
14000 and 14001.
Tomcat logs are also preconfigured to go to HttpFS's <<<logs/>>> directory.
The following environment variables (which can be set in HttpFS's
<<<conf/httpfs-env.sh>>> script) can be used to alter those values:
* HTTPFS_HTTP_PORT
* HTTPFS_ADMIN_PORT
* HTTPFS_LOG
* HttpFS Configuration
HttpFS supports the following {{{./httpfs-default.html}configuration properties}}
in the HttpFS's <<<conf/httpfs-site.xml>>> configuration file.
\[ {{{./index.html}Go Back}} \]

View File

@ -0,0 +1,91 @@
~~ Licensed under the Apache License, Version 2.0 (the "License");
~~ you may not use this file except in compliance with the License.
~~ You may obtain a copy of the License at
~~
~~ http://www.apache.org/licenses/LICENSE-2.0
~~
~~ Unless required by applicable law or agreed to in writing, software
~~ distributed under the License is distributed on an "AS IS" BASIS,
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~~ See the License for the specific language governing permissions and
~~ limitations under the License.
---
Hadoop HDFS over HTTP ${project.version} - Using HTTP Tools
---
---
${maven.build.timestamp}
Hadoop HDFS over HTTP ${project.version} - Using HTTP Tools
\[ {{{./index.html}Go Back}} \]
* Security
Out of the box HttpFS supports both pseudo authentication and Kerberos HTTP
SPNEGO authentication.
** Pseudo Authentication
With pseudo authentication the user name must be specified in the
<<<user.name=\<USERNAME\>>>> query string parameter of a HttpFS URL.
For example:
+---+
$ curl "http://<HTTFS_HOST>:14000/webhdfs/v1?op=homedir&user.name=babu"
+---+
** Kerberos HTTP SPNEGO Authentication
Kerberos HTTP SPENGO authentication requires a tool or library supporting
Kerberos HTTP SPNEGO protocol.
IMPORTANT: If using <<<curl>>>, the <<<curl>>> version being used must support
GSS (<<<curl -V>>> prints out 'GSS' if it supports it).
For example:
+---+
$ kinit
Please enter the password for tucu@LOCALHOST:
$ curl --negotiate -u foo "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=homedir"
Enter host password for user 'foo':
+---+
NOTE: the <<<-u USER>>> option is required by the <<<--negotiate>>> but it is
not used. Use any value as <<<USER>>> and when asked for the password press
[ENTER] as the password value is ignored.
** {Remembering Who I Am} (Establishing an Authenticated Session)
As most authentication mechanisms, Hadoop HTTP authentication authenticates
users once and issues a short-lived authentication token to be presented in
subsequent requests. This authentication token is a signed HTTP Cookie.
When using tools like <<<curl>>>, the authentication token must be stored on
the first request doing authentication, and submitted in subsequent requests.
To do this with curl the <<<-b>>> and <<<-c>>> options to save and send HTTP
Cookies must be used.
For example, the first request doing authentication should save the received
HTTP Cookies.
Using Pseudo Authentication:
+---+
$ curl -c ~/.httpfsauth "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=homedir&user.name=babu"
+---+
Using Kerberos HTTP SPNEGO authentication:
+---+
$ curl --negotiate -u foo -c ~/.httpfsauth "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=homedir"
+---+
Then, subsequent requests forward the previously received HTTP Cookie:
+---+
$ curl -b ~/.httpfsauth "http://<HTTPFS_HOST>:14000/webhdfs/v1?op=liststatus"
+---+
\[ {{{./index.html}Go Back}} \]

View File

@ -0,0 +1,88 @@
~~ Licensed under the Apache License, Version 2.0 (the "License");
~~ you may not use this file except in compliance with the License.
~~ You may obtain a copy of the License at
~~
~~ http://www.apache.org/licenses/LICENSE-2.0
~~
~~ Unless required by applicable law or agreed to in writing, software
~~ distributed under the License is distributed on an "AS IS" BASIS,
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~~ See the License for the specific language governing permissions and
~~ limitations under the License.
---
Hadoop HDFS over HTTP - Documentation Sets ${project.version}
---
---
${maven.build.timestamp}
Hadoop HDFS over HTTP - Documentation Sets ${project.version}
HttpFS is a server that provides a REST HTTP gateway supporting all HDFS
File System operations (read and write). And it is inteoperable with the
<<webhdfs>> REST HTTP API.
HttpFS can be used to transfer data between clusters running different
versions of Hadoop (overcoming RPC versioning issues), for example using
Hadoop DistCP.
HttpFS can be used to access data in HDFS on a cluster behind of a firewall
(the HttpFS server acts as a gateway and is the only system that is allowed
to cross the firewall into the cluster).
HttpFS can be used to access data in HDFS using HTTP utilities (such as curl
and wget) and HTTP libraries Perl from other languages than Java.
The <<webhdfs>> client FileSytem implementation can be used to access HttpFS
using the Hadoop filesystem command (<<<hadoop fs>>>) line tool as well as
from Java aplications using the Hadoop FileSystem Java API.
HttpFS has built-in security supporting Hadoop pseudo authentication and
HTTP SPNEGO Kerberos and other pluggable authentication mechanims. It also
provides Hadoop proxy user support.
* How Does HttpFS Works?
HttpFS is a separate service from Hadoop NameNode.
HttpFS itself is Java web-application and it runs using a preconfigured Tomcat
bundled with HttpFS binary distribution.
HttpFS HTTP web-service API calls are HTTP REST calls that map to a HDFS file
system operation. For example, using the <<<curl>>> Unix command:
* <<<$ curl http://httpfs-host:14000/webhdfs/v1/user/foo/README.txt>>> returns
the contents of the HDFS <<</user/foo/README.txt>>> file.
* <<<$ curl http://httpfs-host:14000/webhdfs/v1/user/foo?op=list>>> returns the
contents of the HDFS <<</user/foo>>> directory in JSON format.
* <<<$ curl -X POST http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=mkdirs>>>
creates the HDFS <<</user/foo.bar>>> directory.
* How HttpFS and Hadoop HDFS Proxy differ?
HttpFS was inspired by Hadoop HDFS proxy.
HttpFS can be seening as a full rewrite of Hadoop HDFS proxy.
Hadoop HDFS proxy provides a subset of file system operations (read only),
HttpFS provides support for all file system operations.
HttpFS uses a clean HTTP REST API making its use with HTTP tools more
intuitive.
HttpFS supports Hadoop pseudo authentication, Kerberos SPENGOS authentication
and Hadoop proxy users. Hadoop HDFS proxy did not.
* User and Developer Documentation
* {{{./ServerSetup.html}HttpFS Server Setup}}
* {{{./UsingHttpTools.html}Using HTTP Tools}}
* Current Limitations
<<<GETDELEGATIONTOKEN, RENEWDELEGATIONTOKEN and CANCELDELEGATIONTOKEN>>>
operations are not supported.

View File

@ -0,0 +1,49 @@
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="html"/>
<xsl:template match="configuration">
<html>
<body>
<h2>Configuration Properties</h2>
<table border="1">
<tr>
<th>name</th>
<th>value</th>
<th>description</th>
</tr>
<xsl:for-each select="property">
<tr>
<td>
<a name="{name}">
<xsl:value-of select="name"/>
</a>
</td>
<td>
<xsl:value-of select="value"/>
</td>
<td>
<xsl:value-of select="description"/>
</td>
</tr>
</xsl:for-each>
</table>
</body>
</html>
</xsl:template>
</xsl:stylesheet>

View File

@ -0,0 +1,34 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project name="HttpFS">
<version position="right"/>
<bannerLeft>
<name>&nbsp;</name>
</bannerLeft>
<skin>
<groupId>org.apache.maven.skins</groupId>
<artifactId>maven-stylus-skin</artifactId>
<version>1.2</version>
</skin>
<body>
<links>
</links>
</body>
</project>

View File

@ -0,0 +1,485 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
@RunWith(value = Parameterized.class)
public class TestHttpFSFileSystem extends HFSTestCase {
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name");
Configuration conf = new Configuration(false);
conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName);
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper
.getHadoopProxyUserGroups());
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper
.getHadoopProxyUserHosts());
File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
OutputStream os = new FileOutputStream(hoopSite);
conf.writeXml(os);
os.close();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
}
protected FileSystem getHttpFileSystem() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.http.impl", HttpFSFileSystem.class.getName());
return FileSystem.get(TestJettyHelper.getJettyURL().toURI(), conf);
}
protected void testGet() throws Exception {
FileSystem fs = getHttpFileSystem();
Assert.assertNotNull(fs);
Assert.assertEquals(fs.getUri(), TestJettyHelper.getJettyURL().toURI());
fs.close();
}
private void testOpen() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs = getHttpFileSystem();
InputStream is = fs.open(new Path(path.toUri().getPath()));
Assert.assertEquals(is.read(), 1);
is.close();
fs.close();
}
private void testCreate(Path path, boolean override) throws Exception {
FileSystem fs = getHttpFileSystem();
FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024,
(short) 2, 100 * 1024 * 1024, null);
os.write(1);
os.close();
fs.close();
fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
FileStatus status = fs.getFileStatus(path);
Assert.assertEquals(status.getReplication(), 2);
Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
Assert.assertEquals(status.getPermission(), permission);
InputStream is = fs.open(path);
Assert.assertEquals(is.read(), 1);
is.close();
fs.close();
}
private void testCreate() throws Exception {
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
testCreate(path, false);
testCreate(path, true);
try {
testCreate(path, false);
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
}
private void testAppend() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs = getHttpFileSystem();
os = fs.append(new Path(path.toUri().getPath()));
os.write(2);
os.close();
fs.close();
fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
InputStream is = fs.open(path);
Assert.assertEquals(is.read(), 1);
Assert.assertEquals(is.read(), 2);
Assert.assertEquals(is.read(), -1);
is.close();
fs.close();
}
private void testRename() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo");
fs.mkdirs(path);
fs.close();
fs = getHttpFileSystem();
Path oldPath = new Path(path.toUri().getPath());
Path newPath = new Path(path.getParent(), "bar");
fs.rename(oldPath, newPath);
fs.close();
fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Assert.assertFalse(fs.exists(oldPath));
Assert.assertTrue(fs.exists(newPath));
fs.close();
}
private void testDelete() throws Exception {
Path foo = new Path(TestHdfsHelper.getHdfsTestDir(), "foo");
Path bar = new Path(TestHdfsHelper.getHdfsTestDir(), "bar");
Path foe = new Path(TestHdfsHelper.getHdfsTestDir(), "foe");
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(foo);
fs.mkdirs(new Path(bar, "a"));
fs.mkdirs(foe);
FileSystem hoopFs = getHttpFileSystem();
Assert.assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false));
Assert.assertFalse(fs.exists(foo));
try {
hoopFs.delete(new Path(bar.toUri().getPath()), false);
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
Assert.assertTrue(fs.exists(bar));
Assert.assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true));
Assert.assertFalse(fs.exists(bar));
Assert.assertTrue(fs.exists(foe));
Assert.assertTrue(hoopFs.delete(foe, true));
Assert.assertFalse(fs.exists(foe));
hoopFs.close();
fs.close();
}
private void testListStatus() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
FileStatus status1 = fs.getFileStatus(path);
fs.close();
fs = getHttpFileSystem();
FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
fs.close();
Assert.assertEquals(status2.getPermission(), status1.getPermission());
Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
Assert.assertEquals(status2.getReplication(), status1.getReplication());
Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
Assert.assertEquals(status2.getOwner(), status1.getOwner());
Assert.assertEquals(status2.getGroup(), status1.getGroup());
Assert.assertEquals(status2.getLen(), status1.getLen());
FileStatus[] stati = fs.listStatus(path.getParent());
Assert.assertEquals(stati.length, 1);
Assert.assertEquals(stati[0].getPath().getName(), path.getName());
}
private void testWorkingdirectory() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path workingDir = fs.getWorkingDirectory();
fs.close();
fs = getHttpFileSystem();
Path hoopWorkingDir = fs.getWorkingDirectory();
fs.close();
Assert.assertEquals(hoopWorkingDir.toUri().getPath(), workingDir.toUri().getPath());
fs = getHttpFileSystem();
fs.setWorkingDirectory(new Path("/tmp"));
workingDir = fs.getWorkingDirectory();
fs.close();
Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
}
private void testMkdirs() throws Exception {
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo");
FileSystem fs = getHttpFileSystem();
fs.mkdirs(path);
fs.close();
fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Assert.assertTrue(fs.exists(path));
fs.close();
}
private void testSetTimes() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
FileStatus status1 = fs.getFileStatus(path);
fs.close();
long at = status1.getAccessTime();
long mt = status1.getModificationTime();
fs = getHttpFileSystem();
fs.setTimes(path, mt + 10, at + 20);
fs.close();
fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
status1 = fs.getFileStatus(path);
fs.close();
long atNew = status1.getAccessTime();
long mtNew = status1.getModificationTime();
Assert.assertEquals(mtNew, mt + 10);
Assert.assertEquals(atNew, at + 20);
}
private void testSetPermission() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs = getHttpFileSystem();
FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
fs.setPermission(path, permission1);
fs.close();
fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
FsPermission permission2 = status1.getPermission();
Assert.assertEquals(permission2, permission1);
}
private void testSetOwner() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs = getHttpFileSystem();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[1];
String group = HadoopUsersConfTestHelper.getHadoopUserGroups(user)[0];
fs.setOwner(path, user, group);
fs.close();
fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
Assert.assertEquals(status1.getOwner(), user);
Assert.assertEquals(status1.getGroup(), group);
}
private void testSetReplication() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
fs.close();
fs.setReplication(path, (short) 2);
fs = getHttpFileSystem();
fs.setReplication(path, (short) 1);
fs.close();
fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
FileStatus status1 = fs.getFileStatus(path);
fs.close();
Assert.assertEquals(status1.getReplication(), (short) 1);
}
private void testChecksum() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
FileChecksum hdfsChecksum = fs.getFileChecksum(path);
fs.close();
fs = getHttpFileSystem();
FileChecksum httpChecksum = fs.getFileChecksum(path);
fs.close();
Assert.assertEquals(httpChecksum.getAlgorithmName(), hdfsChecksum.getAlgorithmName());
Assert.assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
Assert.assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes());
}
private void testContentSummary() throws Exception {
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
ContentSummary hdfsContentSummary = fs.getContentSummary(path);
fs.close();
fs = getHttpFileSystem();
ContentSummary httpContentSummary = fs.getContentSummary(path);
fs.close();
Assert.assertEquals(httpContentSummary.getDirectoryCount(), hdfsContentSummary.getDirectoryCount());
Assert.assertEquals(httpContentSummary.getFileCount(), hdfsContentSummary.getFileCount());
Assert.assertEquals(httpContentSummary.getLength(), hdfsContentSummary.getLength());
Assert.assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
}
protected enum Operation {
GET, OPEN, CREATE, APPEND, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY
}
private void operation(Operation op) throws Exception {
switch (op) {
case GET:
testGet();
break;
case OPEN:
testOpen();
break;
case CREATE:
testCreate();
break;
case APPEND:
testAppend();
break;
case RENAME:
testRename();
break;
case DELETE:
testDelete();
break;
case LIST_STATUS:
testListStatus();
break;
case WORKING_DIRECTORY:
testWorkingdirectory();
break;
case MKDIRS:
testMkdirs();
break;
case SET_TIMES:
testSetTimes();
break;
case SET_PERMISSION:
testSetPermission();
break;
case SET_OWNER:
testSetOwner();
break;
case SET_REPLICATION:
testSetReplication();
break;
case CHECKSUM:
testChecksum();
break;
case CONTENT_SUMMARY:
testContentSummary();
break;
}
}
@Parameterized.Parameters
public static Collection operations() {
Object[][] ops = new Object[Operation.values().length][];
for (int i = 0; i < Operation.values().length; i++) {
ops[i] = new Object[]{Operation.values()[i]};
}
return Arrays.asList(ops);
}
private Operation operation;
public TestHttpFSFileSystem(Operation operation) {
this.operation = operation;
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOperation() throws Exception {
createHttpFSServer();
operation(operation);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOperationDoAs() throws Exception {
createHttpFSServer();
UserGroupInformation ugi = UserGroupInformation.createProxyUser(HadoopUsersConfTestHelper.getHadoopUsers()[0],
UserGroupInformation.getCurrentUser());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
operation(operation);
return null;
}
});
}
}

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.test.TestJettyHelper;
import org.junit.Assert;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.net.URI;
@RunWith(value = Parameterized.class)
public class TestWebhdfsFileSystem extends TestHttpFSFileSystem {
public TestWebhdfsFileSystem(TestHttpFSFileSystem.Operation operation) {
super(operation);
}
@Override
protected FileSystem getHttpFileSystem() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.webhdfs.impl", WebHdfsFileSystem.class.getName());
URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
return FileSystem.get(uri, conf);
}
@Override
protected void testGet() throws Exception {
FileSystem fs = getHttpFileSystem();
Assert.assertNotNull(fs);
URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
Assert.assertEquals(fs.getUri(), uri);
fs.close();
}
}

View File

@ -0,0 +1,164 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
public class TestHttpFSServer extends HFSTestCase {
@Test
@TestDir
@TestJetty
public void server() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration hoopConf = new Configuration(false);
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, hoopConf);
server.init();
server.destroy();
}
private void createHttpFSServer() throws Exception {
File homeDir = TestDirHelper.getTestDir();
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name");
Configuration conf = new Configuration(false);
conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName);
File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
OutputStream os = new FileOutputStream(hoopSite);
conf.writeXml(os);
os.close();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL url = cl.getResource("webapp");
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
Server server = TestJettyHelper.getJettyServer();
server.addHandler(context);
server.start();
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void instrumentation() throws Exception {
createHttpFSServer();
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "root"));
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String line = reader.readLine();
reader.close();
Assert.assertTrue(line.contains("\"counters\":{"));
url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", "root"));
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testHdfsAccess() throws Exception {
createHttpFSServer();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGlobFilter() throws Exception {
createHttpFSServer();
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
fs.create(new Path("/tmp/foo.txt")).close();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
reader.readLine();
reader.close();
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testPutNoOperation() throws Exception {
createHttpFSServer();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setDoInput(true);
conn.setDoOutput(true);
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
}
}

View File

@ -0,0 +1,94 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.lang;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import java.util.concurrent.Callable;
public class TestRunnableCallable extends HTestCase {
public static class R implements Runnable {
boolean RUN;
@Override
public void run() {
RUN = true;
}
}
public static class C implements Callable {
boolean RUN;
@Override
public Object call() throws Exception {
RUN = true;
return null;
}
}
public static class CEx implements Callable {
@Override
public Object call() throws Exception {
throw new Exception();
}
}
@Test
public void runnable() throws Exception {
R r = new R();
RunnableCallable rc = new RunnableCallable(r);
rc.run();
Assert.assertTrue(r.RUN);
r = new R();
rc = new RunnableCallable(r);
rc.call();
Assert.assertTrue(r.RUN);
Assert.assertEquals(rc.toString(), "R");
}
@Test
public void callable() throws Exception {
C c = new C();
RunnableCallable rc = new RunnableCallable(c);
rc.run();
Assert.assertTrue(c.RUN);
c = new C();
rc = new RunnableCallable(c);
rc.call();
Assert.assertTrue(c.RUN);
Assert.assertEquals(rc.toString(), "C");
}
@Test(expected = RuntimeException.class)
public void callableExRun() throws Exception {
CEx c = new CEx();
RunnableCallable rc = new RunnableCallable(c);
rc.run();
}
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.lang;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
public class TestXException extends HTestCase {
public static enum TestERROR implements XException.ERROR {
TC;
@Override
public String getTemplate() {
return "{0}";
}
}
@Test
public void testXException() throws Exception {
XException ex = new XException(TestERROR.TC);
Assert.assertEquals(ex.getError(), TestERROR.TC);
Assert.assertEquals(ex.getMessage(), "TC: {0}");
Assert.assertNull(ex.getCause());
ex = new XException(TestERROR.TC, "msg");
Assert.assertEquals(ex.getError(), TestERROR.TC);
Assert.assertEquals(ex.getMessage(), "TC: msg");
Assert.assertNull(ex.getCause());
Exception cause = new Exception();
ex = new XException(TestERROR.TC, cause);
Assert.assertEquals(ex.getError(), TestERROR.TC);
Assert.assertEquals(ex.getMessage(), "TC: " + cause.toString());
Assert.assertEquals(ex.getCause(), cause);
XException xcause = ex;
ex = new XException(xcause);
Assert.assertEquals(ex.getError(), TestERROR.TC);
Assert.assertEquals(ex.getMessage(), xcause.getMessage());
Assert.assertEquals(ex.getCause(), xcause);
}
}

View File

@ -0,0 +1,68 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
public class TestBaseService extends HTestCase {
public static class MyService extends BaseService {
static Boolean INIT;
public MyService() {
super("myservice");
}
@Override
protected void init() throws ServiceException {
INIT = true;
}
@Override
public Class getInterface() {
return null;
}
}
@Test
public void baseService() throws Exception {
BaseService service = new MyService();
Assert.assertNull(service.getInterface());
Assert.assertEquals(service.getPrefix(), "myservice");
Assert.assertEquals(service.getServiceDependencies().length, 0);
Server server = Mockito.mock(Server.class);
Configuration conf = new Configuration(false);
conf.set("server.myservice.foo", "FOO");
conf.set("server.myservice1.bar", "BAR");
Mockito.when(server.getConfig()).thenReturn(conf);
Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo");
Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice.");
service.init(server);
Assert.assertEquals(service.getPrefixedName("foo"), "server.myservice.foo");
Assert.assertEquals(service.getServiceConfig().size(), 1);
Assert.assertEquals(service.getServiceConfig().get("foo"), "FOO");
Assert.assertTrue(MyService.INIT);
}
}

View File

@ -0,0 +1,790 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.lang.XException;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class TestServer extends HTestCase {
@Test
@TestDir
public void constructorsGetters() throws Exception {
Server server = new Server("server", "/a", "/b", "/c", "/d", new Configuration(false));
Assert.assertEquals(server.getHomeDir(), "/a");
Assert.assertEquals(server.getConfigDir(), "/b");
Assert.assertEquals(server.getLogDir(), "/c");
Assert.assertEquals(server.getTempDir(), "/d");
Assert.assertEquals(server.getName(), "server");
Assert.assertEquals(server.getPrefix(), "server");
Assert.assertEquals(server.getPrefixedName("name"), "server.name");
Assert.assertNotNull(server.getConfig());
server = new Server("server", "/a", "/b", "/c", "/d");
Assert.assertEquals(server.getHomeDir(), "/a");
Assert.assertEquals(server.getConfigDir(), "/b");
Assert.assertEquals(server.getLogDir(), "/c");
Assert.assertEquals(server.getTempDir(), "/d");
Assert.assertEquals(server.getName(), "server");
Assert.assertEquals(server.getPrefix(), "server");
Assert.assertEquals(server.getPrefixedName("name"), "server.name");
Assert.assertNull(server.getConfig());
server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
Assert.assertEquals(server.getName(), "server");
Assert.assertEquals(server.getPrefix(), "server");
Assert.assertEquals(server.getPrefixedName("name"), "server.name");
Assert.assertNotNull(server.getConfig());
server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath());
Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath());
Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf");
Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log");
Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp");
Assert.assertEquals(server.getName(), "server");
Assert.assertEquals(server.getPrefix(), "server");
Assert.assertEquals(server.getPrefixedName("name"), "server.name");
Assert.assertNull(server.getConfig());
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoHomeDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S02.*")
@TestDir
public void initHomeDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
new FileOutputStream(homeDir).close();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoConfigDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S02.*")
@TestDir
public void initConfigDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
File configDir = new File(homeDir, "conf");
new FileOutputStream(configDir).close();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoLogDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S02.*")
@TestDir
public void initLogDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "temp").mkdir());
File logDir = new File(homeDir, "log");
new FileOutputStream(logDir).close();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S01.*")
@TestDir
public void initNoTempDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S02.*")
@TestDir
public void initTempDirNotDir() throws Exception {
File homeDir = new File(TestDirHelper.getTestDir(), "home");
Assert.assertTrue(homeDir.mkdir());
Assert.assertTrue(new File(homeDir, "conf").mkdir());
Assert.assertTrue(new File(homeDir, "log").mkdir());
File tempDir = new File(homeDir, "temp");
new FileOutputStream(tempDir).close();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = new Server("server", homeDir.getAbsolutePath(), conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S05.*")
@TestDir
public void siteFileNotAFile() throws Exception {
String homeDir = TestDirHelper.getTestDir().getAbsolutePath();
File siteFile = new File(homeDir, "server-site.xml");
Assert.assertTrue(siteFile.mkdir());
Server server = new Server("server", homeDir, homeDir, homeDir, homeDir);
server.init();
}
private Server createServer(Configuration conf) {
return new Server("server", TestDirHelper.getTestDir().getAbsolutePath(),
TestDirHelper.getTestDir().getAbsolutePath(),
TestDirHelper.getTestDir().getAbsolutePath(), TestDirHelper.getTestDir().getAbsolutePath(), conf);
}
@Test
@TestDir
public void log4jFile() throws Exception {
InputStream is = Server.getResource("default-log4j.properties");
OutputStream os = new FileOutputStream(new File(TestDirHelper.getTestDir(), "server-log4j.properties"));
IOUtils.copyBytes(is, os, 1024, true);
Configuration conf = new Configuration(false);
Server server = createServer(conf);
server.init();
}
public static class LifeCycleService extends BaseService {
public LifeCycleService() {
super("lifecycle");
}
@Override
protected void init() throws ServiceException {
Assert.assertEquals(getServer().getStatus(), Server.Status.BOOTING);
}
@Override
public void destroy() {
Assert.assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN);
super.destroy();
}
@Override
public Class getInterface() {
return LifeCycleService.class;
}
}
@Test
@TestDir
public void lifeCycle() throws Exception {
Configuration conf = new Configuration(false);
conf.set("server.services", LifeCycleService.class.getName());
Server server = createServer(conf);
Assert.assertEquals(server.getStatus(), Server.Status.UNDEF);
server.init();
Assert.assertNotNull(server.get(LifeCycleService.class));
Assert.assertEquals(server.getStatus(), Server.Status.NORMAL);
server.destroy();
Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
}
@Test
@TestDir
public void startWithStatusNotNormal() throws Exception {
Configuration conf = new Configuration(false);
conf.set("server.startup.status", "ADMIN");
Server server = createServer(conf);
server.init();
Assert.assertEquals(server.getStatus(), Server.Status.ADMIN);
server.destroy();
}
@Test(expected = IllegalArgumentException.class)
@TestDir
public void nonSeteableStatus() throws Exception {
Configuration conf = new Configuration(false);
Server server = createServer(conf);
server.init();
server.setStatus(Server.Status.SHUTDOWN);
}
public static class TestService implements Service {
static List<String> LIFECYCLE = new ArrayList<String>();
@Override
public void init(Server server) throws ServiceException {
LIFECYCLE.add("init");
}
@Override
public void postInit() throws ServiceException {
LIFECYCLE.add("postInit");
}
@Override
public void destroy() {
LIFECYCLE.add("destroy");
}
@Override
public Class[] getServiceDependencies() {
return new Class[0];
}
@Override
public Class getInterface() {
return TestService.class;
}
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
LIFECYCLE.add("serverStatusChange");
}
}
public static class TestServiceExceptionOnStatusChange extends TestService {
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
throw new RuntimeException();
}
}
@Test
@TestDir
public void changeStatus() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = createServer(conf);
server.init();
server.setStatus(Server.Status.ADMIN);
Assert.assertTrue(TestService.LIFECYCLE.contains("serverStatusChange"));
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S11.*")
@TestDir
public void changeStatusServiceException() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf = new Configuration(false);
conf.set("server.services", TestServiceExceptionOnStatusChange.class.getName());
Server server = createServer(conf);
server.init();
}
@Test
@TestDir
public void setSameStatus() throws Exception {
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = createServer(conf);
server.init();
TestService.LIFECYCLE.clear();
server.setStatus(server.getStatus());
Assert.assertFalse(TestService.LIFECYCLE.contains("serverStatusChange"));
}
@Test
@TestDir
public void serviceLifeCycle() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf = new Configuration(false);
conf.set("server.services", TestService.class.getName());
Server server = createServer(conf);
server.init();
Assert.assertNotNull(server.get(TestService.class));
server.destroy();
Assert.assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy"));
}
@Test
@TestDir
public void loadingDefaultConfig() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
Assert.assertEquals(server.getConfig().get("testserver.a"), "default");
}
@Test
@TestDir
public void loadingSiteConfig() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
File configFile = new File(dir, "testserver-site.xml");
Writer w = new FileWriter(configFile);
w.write("<configuration><property><name>testserver.a</name><value>site</value></property></configuration>");
w.close();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
Assert.assertEquals(server.getConfig().get("testserver.a"), "site");
}
@Test
@TestDir
public void loadingSysPropConfig() throws Exception {
try {
System.setProperty("testserver.a", "sysprop");
String dir = TestDirHelper.getTestDir().getAbsolutePath();
File configFile = new File(dir, "testserver-site.xml");
Writer w = new FileWriter(configFile);
w.write("<configuration><property><name>testserver.a</name><value>site</value></property></configuration>");
w.close();
Server server = new Server("testserver", dir, dir, dir, dir);
server.init();
Assert.assertEquals(server.getConfig().get("testserver.a"), "sysprop");
} finally {
System.getProperties().remove("testserver.a");
}
}
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState1() throws Exception {
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
server.destroy();
}
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState2() throws Exception {
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
server.get(Object.class);
}
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState3() throws Exception {
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
server.setService(null);
}
@Test(expected = IllegalStateException.class)
@TestDir
public void illegalState4() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Server server = new Server("server", dir, dir, dir, dir, new Configuration(false));
server.init();
server.init();
}
private static List<String> ORDER = new ArrayList<String>();
public abstract static class MyService implements Service, XException.ERROR {
private String id;
private Class serviceInterface;
private Class[] dependencies;
private boolean failOnInit;
private boolean failOnDestroy;
protected MyService(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
boolean failOnDestroy) {
this.id = id;
this.serviceInterface = serviceInterface;
this.dependencies = dependencies;
this.failOnInit = failOnInit;
this.failOnDestroy = failOnDestroy;
}
@Override
public void init(Server server) throws ServiceException {
ORDER.add(id + ".init");
if (failOnInit) {
throw new ServiceException(this);
}
}
@Override
public void postInit() throws ServiceException {
ORDER.add(id + ".postInit");
}
@Override
public String getTemplate() {
return "";
}
@Override
public void destroy() {
ORDER.add(id + ".destroy");
if (failOnDestroy) {
throw new RuntimeException();
}
}
@Override
public Class[] getServiceDependencies() {
return dependencies;
}
@Override
public Class getInterface() {
return serviceInterface;
}
@Override
public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException {
}
}
public static class MyService1 extends MyService {
public MyService1() {
super("s1", MyService1.class, null, false, false);
}
protected MyService1(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
boolean failOnDestroy) {
super(id, serviceInterface, dependencies, failOnInit, failOnDestroy);
}
}
public static class MyService2 extends MyService {
public MyService2() {
super("s2", MyService2.class, null, true, false);
}
}
public static class MyService3 extends MyService {
public MyService3() {
super("s3", MyService3.class, null, false, false);
}
}
public static class MyService1a extends MyService1 {
public MyService1a() {
super("s1a", MyService1.class, null, false, false);
}
}
public static class MyService4 extends MyService1 {
public MyService4() {
super("s4a", String.class, null, false, false);
}
}
public static class MyService5 extends MyService {
public MyService5() {
super("s5", MyService5.class, null, false, true);
}
protected MyService5(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit,
boolean failOnDestroy) {
super(id, serviceInterface, dependencies, failOnInit, failOnDestroy);
}
}
public static class MyService5a extends MyService5 {
public MyService5a() {
super("s5a", MyService5.class, null, false, false);
}
}
public static class MyService6 extends MyService {
public MyService6() {
super("s6", MyService6.class, new Class[]{MyService1.class}, false, false);
}
}
public static class MyService7 extends MyService {
@SuppressWarnings({"UnusedParameters"})
public MyService7(String foo) {
super("s6", MyService7.class, new Class[]{MyService1.class}, false, false);
}
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S08.*")
@TestDir
public void invalidSservice() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", "foo");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S07.*")
@TestDir
public void serviceWithNoDefaultConstructor() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", MyService7.class.getName());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S04.*")
@TestDir
public void serviceNotImplementingServiceInterface() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", MyService4.class.getName());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServerException.class, msgRegExp = "S10.*")
@TestDir
public void serviceWithMissingDependency() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
String services = StringUtils.join(",", Arrays.asList(MyService3.class.getName(), MyService6.class.getName())
);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestDir
public void services() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf;
Server server;
// no services
ORDER.clear();
conf = new Configuration(false);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(ORDER.size(), 0);
// 2 services init/destroy
ORDER.clear();
String services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName())
);
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
Assert.assertEquals(server.get(MyService3.class).getInterface(), MyService3.class);
Assert.assertEquals(ORDER.size(), 4);
Assert.assertEquals(ORDER.get(0), "s1.init");
Assert.assertEquals(ORDER.get(1), "s3.init");
Assert.assertEquals(ORDER.get(2), "s1.postInit");
Assert.assertEquals(ORDER.get(3), "s3.postInit");
server.destroy();
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s3.destroy");
Assert.assertEquals(ORDER.get(5), "s1.destroy");
// 3 services, 2nd one fails on init
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService2.class.getName(),
MyService3.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
try {
server.init();
Assert.fail();
} catch (ServerException ex) {
Assert.assertEquals(MyService2.class, ex.getError().getClass());
} catch (Exception ex) {
Assert.fail();
}
Assert.assertEquals(ORDER.size(), 3);
Assert.assertEquals(ORDER.get(0), "s1.init");
Assert.assertEquals(ORDER.get(1), "s2.init");
Assert.assertEquals(ORDER.get(2), "s1.destroy");
// 2 services one fails on destroy
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService5.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(ORDER.size(), 4);
Assert.assertEquals(ORDER.get(0), "s1.init");
Assert.assertEquals(ORDER.get(1), "s5.init");
Assert.assertEquals(ORDER.get(2), "s1.postInit");
Assert.assertEquals(ORDER.get(3), "s5.postInit");
server.destroy();
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s5.destroy");
Assert.assertEquals(ORDER.get(5), "s1.destroy");
// service override via ext
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
String servicesExt = StringUtils.join(",", Arrays.asList(MyService1a.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.services.ext", servicesExt);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
Assert.assertEquals(ORDER.size(), 4);
Assert.assertEquals(ORDER.get(0), "s1a.init");
Assert.assertEquals(ORDER.get(1), "s3.init");
Assert.assertEquals(ORDER.get(2), "s1a.postInit");
Assert.assertEquals(ORDER.get(3), "s3.postInit");
server.destroy();
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s3.destroy");
Assert.assertEquals(ORDER.get(5), "s1a.destroy");
// service override via setService
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
server.setService(MyService1a.class);
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s1.destroy");
Assert.assertEquals(ORDER.get(5), "s1a.init");
Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class);
server.destroy();
Assert.assertEquals(ORDER.size(), 8);
Assert.assertEquals(ORDER.get(6), "s3.destroy");
Assert.assertEquals(ORDER.get(7), "s1a.destroy");
// service add via setService
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
server.setService(MyService5.class);
Assert.assertEquals(ORDER.size(), 5);
Assert.assertEquals(ORDER.get(4), "s5.init");
Assert.assertEquals(server.get(MyService5.class).getClass(), MyService5.class);
server.destroy();
Assert.assertEquals(ORDER.size(), 8);
Assert.assertEquals(ORDER.get(5), "s5.destroy");
Assert.assertEquals(ORDER.get(6), "s3.destroy");
Assert.assertEquals(ORDER.get(7), "s1.destroy");
// service add via setService exception
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
try {
server.setService(MyService7.class);
Assert.fail();
} catch (ServerException ex) {
Assert.assertEquals(ServerException.ERROR.S09, ex.getError());
} catch (Exception ex) {
Assert.fail();
}
Assert.assertEquals(ORDER.size(), 6);
Assert.assertEquals(ORDER.get(4), "s3.destroy");
Assert.assertEquals(ORDER.get(5), "s1.destroy");
// service with dependency
ORDER.clear();
services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService6.class.getName()));
conf = new Configuration(false);
conf.set("server.services", services);
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class);
Assert.assertEquals(server.get(MyService6.class).getInterface(), MyService6.class);
server.destroy();
}
}

View File

@ -0,0 +1,76 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.util.Arrays;
import java.util.Collection;
@RunWith(value = Parameterized.class)
public class TestServerConstructor extends HTestCase {
@Parameterized.Parameters
public static Collection constructorFailParams() {
return Arrays.asList(new Object[][]{
{null, null, null, null, null, null},
{"", null, null, null, null, null},
{null, null, null, null, null, null},
{"server", null, null, null, null, null},
{"server", "", null, null, null, null},
{"server", "foo", null, null, null, null},
{"server", "/tmp", null, null, null, null},
{"server", "/tmp", "", null, null, null},
{"server", "/tmp", "foo", null, null, null},
{"server", "/tmp", "/tmp", null, null, null},
{"server", "/tmp", "/tmp", "", null, null},
{"server", "/tmp", "/tmp", "foo", null, null},
{"server", "/tmp", "/tmp", "/tmp", null, null},
{"server", "/tmp", "/tmp", "/tmp", "", null},
{"server", "/tmp", "/tmp", "/tmp", "foo", null}});
}
private String name;
private String homeDir;
private String configDir;
private String logDir;
private String tempDir;
private Configuration conf;
public TestServerConstructor(String name, String homeDir, String configDir, String logDir, String tempDir,
Configuration conf) {
this.name = name;
this.homeDir = homeDir;
this.configDir = configDir;
this.logDir = logDir;
this.tempDir = tempDir;
this.conf = conf;
}
@Test(expected = IllegalArgumentException.class)
public void constructorFail() {
new Server(name, homeDir, configDir, logDir, tempDir, conf);
}
}

View File

@ -0,0 +1,306 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.hadoop;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.io.IOException;
import java.util.Arrays;
public class TestFileSystemAccessService extends HFSTestCase {
@Test
@TestDir
public void simpleSecurity() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertNotNull(server.get(FileSystemAccess.class));
server.destroy();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H01.*")
@TestDir
public void noKerberosKeytabProperty() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "kerberos");
conf.set("server.hadoop.authentication.kerberos.keytab", " ");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H01.*")
@TestDir
public void noKerberosPrincipalProperty() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "kerberos");
conf.set("server.hadoop.authentication.kerberos.keytab", "/tmp/foo");
conf.set("server.hadoop.authentication.kerberos.principal", " ");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H02.*")
@TestDir
public void kerberosInitializationFailure() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "kerberos");
conf.set("server.hadoop.authentication.kerberos.keytab", "/tmp/foo");
conf.set("server.hadoop.authentication.kerberos.principal", "foo@FOO");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "H09.*")
@TestDir
public void invalidSecurity() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.authentication.type", "foo");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestDir
public void serviceHadoopConf() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.conf:foo", "FOO");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO");
server.destroy();
}
@Test
@TestDir
public void inWhitelists() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NN");
server.destroy();
conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.name.node.whitelist", "*");
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NN");
server.destroy();
conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.name.node.whitelist", "NN");
server = new Server("server", dir, dir, dir, dir, conf);
server.init();
fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NN");
server.destroy();
}
@Test
@TestException(exception = FileSystemAccessException.class, msgRegExp = "H05.*")
@TestDir
public void NameNodeNotinWhitelists() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.name.node.whitelist", "NN");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
fsAccess.validateNamenode("NNx");
}
@Test
@TestDir
@TestHdfs
public void createFileSystem() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
FileSystem fs = hadoop.createFileSystem("u", TestHdfsHelper.getHdfsConf());
Assert.assertNotNull(fs);
fs.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs);
try {
fs.mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
@Test
@TestDir
@TestHdfs
public void fileSystemExecutor() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
final FileSystem fsa[] = new FileSystem[1];
hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor<Void>() {
@Override
public Void execute(FileSystem fs) throws IOException {
fs.mkdirs(new Path("/tmp/foo"));
fsa[0] = fs;
return null;
}
});
try {
fsa[0].mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
@Test
@TestException(exception = FileSystemAccessException.class, msgRegExp = "H06.*")
@TestDir
@TestHdfs
public void fileSystemExecutorNoNameNode() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess fsAccess = server.get(FileSystemAccess.class);
Configuration hdfsConf = TestHdfsHelper.getHdfsConf();
hdfsConf.set("fs.default.name", "");
fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor<Void>() {
@Override
public Void execute(FileSystem fs) throws IOException {
return null;
}
});
}
@Test
@TestDir
@TestHdfs
public void fileSystemExecutorException() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
final FileSystem fsa[] = new FileSystem[1];
try {
hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor<Void>() {
@Override
public Void execute(FileSystem fs) throws IOException {
fsa[0] = fs;
throw new IOException();
}
});
Assert.fail();
} catch (FileSystemAccessException ex) {
Assert.assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03);
} catch (Exception ex) {
Assert.fail();
}
try {
fsa[0].mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
}

View File

@ -0,0 +1,404 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.instrumentation;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.scheduler.SchedulerService;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Test;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
public class TestInstrumentationService extends HTestCase {
@Override
protected float getWaitForRatio() {
return 1;
}
@Test
public void cron() {
InstrumentationService.Cron cron = new InstrumentationService.Cron();
Assert.assertEquals(cron.start, 0);
Assert.assertEquals(cron.lapStart, 0);
Assert.assertEquals(cron.own, 0);
Assert.assertEquals(cron.total, 0);
long begin = System.currentTimeMillis();
Assert.assertEquals(cron.start(), cron);
Assert.assertEquals(cron.start(), cron);
Assert.assertEquals(cron.start, begin, 20);
Assert.assertEquals(cron.start, cron.lapStart);
sleep(100);
Assert.assertEquals(cron.stop(), cron);
long end = System.currentTimeMillis();
long delta = end - begin;
Assert.assertEquals(cron.own, delta, 20);
Assert.assertEquals(cron.total, 0);
Assert.assertEquals(cron.lapStart, 0);
sleep(100);
long reStart = System.currentTimeMillis();
cron.start();
Assert.assertEquals(cron.start, begin, 20);
Assert.assertEquals(cron.lapStart, reStart, 20);
sleep(100);
cron.stop();
long reEnd = System.currentTimeMillis();
delta += reEnd - reStart;
Assert.assertEquals(cron.own, delta, 20);
Assert.assertEquals(cron.total, 0);
Assert.assertEquals(cron.lapStart, 0);
cron.end();
Assert.assertEquals(cron.total, reEnd - begin, 20);
try {
cron.start();
Assert.fail();
} catch (IllegalStateException ex) {
} catch (Exception ex) {
Assert.fail();
}
try {
cron.stop();
Assert.fail();
} catch (IllegalStateException ex) {
} catch (Exception ex) {
Assert.fail();
}
}
@Test
public void timer() throws Exception {
InstrumentationService.Timer timer = new InstrumentationService.Timer(2);
InstrumentationService.Cron cron = new InstrumentationService.Cron();
long ownStart;
long ownEnd;
long totalStart;
long totalEnd;
long ownDelta;
long totalDelta;
long avgTotal;
long avgOwn;
cron.start();
ownStart = System.currentTimeMillis();
totalStart = ownStart;
ownDelta = 0;
sleep(100);
cron.stop();
ownEnd = System.currentTimeMillis();
ownDelta += ownEnd - ownStart;
sleep(100);
cron.start();
ownStart = System.currentTimeMillis();
sleep(100);
cron.stop();
ownEnd = System.currentTimeMillis();
ownDelta += ownEnd - ownStart;
totalEnd = ownEnd;
totalDelta = totalEnd - totalStart;
avgTotal = totalDelta;
avgOwn = ownDelta;
timer.addCron(cron);
long[] values = timer.getValues();
Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
cron = new InstrumentationService.Cron();
cron.start();
ownStart = System.currentTimeMillis();
totalStart = ownStart;
ownDelta = 0;
sleep(200);
cron.stop();
ownEnd = System.currentTimeMillis();
ownDelta += ownEnd - ownStart;
sleep(200);
cron.start();
ownStart = System.currentTimeMillis();
sleep(200);
cron.stop();
ownEnd = System.currentTimeMillis();
ownDelta += ownEnd - ownStart;
totalEnd = ownEnd;
totalDelta = totalEnd - totalStart;
avgTotal = (avgTotal * 1 + totalDelta) / 2;
avgOwn = (avgOwn * 1 + ownDelta) / 2;
timer.addCron(cron);
values = timer.getValues();
Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
avgTotal = totalDelta;
avgOwn = ownDelta;
cron = new InstrumentationService.Cron();
cron.start();
ownStart = System.currentTimeMillis();
totalStart = ownStart;
ownDelta = 0;
sleep(300);
cron.stop();
ownEnd = System.currentTimeMillis();
ownDelta += ownEnd - ownStart;
sleep(300);
cron.start();
ownStart = System.currentTimeMillis();
sleep(300);
cron.stop();
ownEnd = System.currentTimeMillis();
ownDelta += ownEnd - ownStart;
totalEnd = ownEnd;
totalDelta = totalEnd - totalStart;
avgTotal = (avgTotal * 1 + totalDelta) / 2;
avgOwn = (avgOwn * 1 + ownDelta) / 2;
cron.stop();
timer.addCron(cron);
values = timer.getValues();
Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20);
Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20);
JSONObject json = (JSONObject) new JSONParser().parse(timer.toJSONString());
Assert.assertEquals(json.size(), 4);
Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
StringWriter writer = new StringWriter();
timer.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
Assert.assertEquals(json.size(), 4);
Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]);
Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]);
Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]);
Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]);
}
@Test
public void sampler() throws Exception {
final long value[] = new long[1];
Instrumentation.Variable<Long> var = new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return value[0];
}
};
InstrumentationService.Sampler sampler = new InstrumentationService.Sampler();
sampler.init(4, var);
Assert.assertEquals(sampler.getRate(), 0f, 0.0001);
sampler.sample();
Assert.assertEquals(sampler.getRate(), 0f, 0.0001);
value[0] = 1;
sampler.sample();
Assert.assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001);
value[0] = 2;
sampler.sample();
Assert.assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001);
value[0] = 3;
sampler.sample();
Assert.assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001);
value[0] = 4;
sampler.sample();
Assert.assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001);
JSONObject json = (JSONObject) new JSONParser().parse(sampler.toJSONString());
Assert.assertEquals(json.size(), 2);
Assert.assertEquals(json.get("sampler"), sampler.getRate());
Assert.assertEquals(json.get("size"), 4L);
StringWriter writer = new StringWriter();
sampler.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
Assert.assertEquals(json.size(), 2);
Assert.assertEquals(json.get("sampler"), sampler.getRate());
Assert.assertEquals(json.get("size"), 4L);
}
@Test
public void variableHolder() throws Exception {
InstrumentationService.VariableHolder<String> variableHolder =
new InstrumentationService.VariableHolder<String>();
variableHolder.var = new Instrumentation.Variable<String>() {
@Override
public String getValue() {
return "foo";
}
};
JSONObject json = (JSONObject) new JSONParser().parse(variableHolder.toJSONString());
Assert.assertEquals(json.size(), 1);
Assert.assertEquals(json.get("value"), "foo");
StringWriter writer = new StringWriter();
variableHolder.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
Assert.assertEquals(json.size(), 1);
Assert.assertEquals(json.get("value"), "foo");
}
@Test
@TestDir
@SuppressWarnings("unchecked")
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Instrumentation instrumentation = server.get(Instrumentation.class);
Assert.assertNotNull(instrumentation);
instrumentation.incr("g", "c", 1);
instrumentation.incr("g", "c", 2);
instrumentation.incr("g", "c1", 2);
Instrumentation.Cron cron = instrumentation.createCron();
cron.start();
sleep(100);
cron.stop();
instrumentation.addCron("g", "t", cron);
cron = instrumentation.createCron();
cron.start();
sleep(200);
cron.stop();
instrumentation.addCron("g", "t", cron);
Instrumentation.Variable<String> var = new Instrumentation.Variable<String>() {
@Override
public String getValue() {
return "foo";
}
};
instrumentation.addVariable("g", "v", var);
Instrumentation.Variable<Long> varToSample = new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return 1L;
}
};
instrumentation.addSampler("g", "s", 10, varToSample);
Map<String, ?> snapshot = instrumentation.getSnapshot();
Assert.assertNotNull(snapshot.get("os-env"));
Assert.assertNotNull(snapshot.get("sys-props"));
Assert.assertNotNull(snapshot.get("jvm"));
Assert.assertNotNull(snapshot.get("counters"));
Assert.assertNotNull(snapshot.get("timers"));
Assert.assertNotNull(snapshot.get("variables"));
Assert.assertNotNull(snapshot.get("samplers"));
Assert.assertNotNull(((Map<String, String>) snapshot.get("os-env")).get("PATH"));
Assert.assertNotNull(((Map<String, String>) snapshot.get("sys-props")).get("java.version"));
Assert.assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("free.memory"));
Assert.assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("max.memory"));
Assert.assertNotNull(((Map<String, ?>) snapshot.get("jvm")).get("total.memory"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("counters")).get("g").get("c1"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("timers")).get("g").get("t"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("variables")).get("g").get("v"));
Assert.assertNotNull(((Map<String, Map<String, Object>>) snapshot.get("samplers")).get("g").get("s"));
StringWriter writer = new StringWriter();
JSONObject.writeJSONString(snapshot, writer);
writer.close();
server.destroy();
}
@Test
@TestDir
@SuppressWarnings("unchecked")
public void sampling() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName()));
Configuration conf = new Configuration(false);
conf.set("server.services", services);
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Instrumentation instrumentation = server.get(Instrumentation.class);
final AtomicInteger count = new AtomicInteger();
Instrumentation.Variable<Long> varToSample = new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return (long) count.incrementAndGet();
}
};
instrumentation.addSampler("g", "s", 10, varToSample);
sleep(2000);
int i = count.get();
Assert.assertTrue(i > 0);
Map<String, Map<String, ?>> snapshot = instrumentation.getSnapshot();
Map<String, Map<String, Object>> samplers = (Map<String, Map<String, Object>>) snapshot.get("samplers");
InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s");
Assert.assertTrue(sampler.getRate() > 0);
server.destroy();
}
}

View File

@ -0,0 +1,49 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.scheduler;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Scheduler;
import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.util.Arrays;
public class TestSchedulerService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Assert.assertNotNull(server.get(Scheduler.class));
server.destroy();
}
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
public class TestGroupsService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Groups groups = server.get(Groups.class);
Assert.assertNotNull(groups);
List<String> g = groups.getGroups(System.getProperty("user.name"));
Assert.assertNotSame(g.size(), 0);
server.destroy();
}
@Test(expected = RuntimeException.class)
@TestDir
public void invalidGroupsMapping() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
conf.set("server.groups.hadoop.security.group.mapping", String.class.getName());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
}

View File

@ -0,0 +1,225 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.service.ProxyUser;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import java.security.AccessControlException;
import java.util.Arrays;
import java.util.List;
public class TestProxyUserService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
server.destroy();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "PRXU02.*")
@TestDir
public void wrongConfigGroups() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "PRXU01.*")
@TestDir
public void wrongHost() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "otherhost");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "PRXU02.*")
@TestDir
public void wrongConfigHosts() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestDir
public void validateAnyHostAnyUser() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "*");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", "bar");
server.destroy();
}
@Test(expected = AccessControlException.class)
@TestDir
public void invalidProxyUser() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "*");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
proxyUser.validate("bar", "localhost", "foo");
server.destroy();
}
@Test
@TestDir
public void validateHost() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "localhost");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", "bar");
server.destroy();
}
private String getGroup() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Groups groups = server.get(Groups.class);
List<String> g = groups.getGroups(System.getProperty("user.name"));
server.destroy();
return g.get(0);
}
@Test
@TestDir
public void validateGroup() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "*");
conf.set("server.proxyuser.foo.groups", getGroup());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", System.getProperty("user.name"));
server.destroy();
}
@Test(expected = AccessControlException.class)
@TestDir
public void unknownHost() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "localhost");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
proxyUser.validate("foo", "unknownhost.bar.foo", "bar");
server.destroy();
}
@Test(expected = AccessControlException.class)
@TestDir
public void invalidHost() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "localhost");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
proxyUser.validate("foo", "www.yahoo.com", "bar");
server.destroy();
}
@Test(expected = AccessControlException.class)
@TestDir
public void invalidGroup() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "localhost");
conf.set("server.proxyuser.foo.groups", "nobody");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
Assert.assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", System.getProperty("user.name"));
server.destroy();
}
}

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
public class TestHostnameFilter extends HTestCase {
@Test
public void hostname() throws Exception {
ServletRequest request = Mockito.mock(ServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn("localhost");
ServletResponse response = Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked = new AtomicBoolean();
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
Assert.assertEquals(HostnameFilter.get(), "localhost");
invoked.set(true);
}
};
Filter filter = new HostnameFilter();
filter.init(null);
Assert.assertNull(HostnameFilter.get());
filter.doFilter(request, response, chain);
Assert.assertTrue(invoked.get());
Assert.assertNull(HostnameFilter.get());
filter.destroy();
}
}

View File

@ -0,0 +1,117 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.MDC;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.security.Principal;
import java.util.concurrent.atomic.AtomicBoolean;
public class TestMDCFilter extends HTestCase {
@Test
public void mdc() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
Mockito.when(request.getMethod()).thenReturn("METHOD");
Mockito.when(request.getPathInfo()).thenReturn("/pathinfo");
ServletResponse response = Mockito.mock(ServletResponse.class);
final AtomicBoolean invoked = new AtomicBoolean();
FilterChain chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
Assert.assertEquals(MDC.get("hostname"), null);
Assert.assertEquals(MDC.get("user"), null);
Assert.assertEquals(MDC.get("method"), "METHOD");
Assert.assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
MDC.clear();
Filter filter = new MDCFilter();
filter.init(null);
filter.doFilter(request, response, chain);
Assert.assertTrue(invoked.get());
Assert.assertNull(MDC.get("hostname"));
Assert.assertNull(MDC.get("user"));
Assert.assertNull(MDC.get("method"));
Assert.assertNull(MDC.get("path"));
Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() {
@Override
public String getName() {
return "name";
}
});
invoked.set(false);
chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
Assert.assertEquals(MDC.get("hostname"), null);
Assert.assertEquals(MDC.get("user"), "name");
Assert.assertEquals(MDC.get("method"), "METHOD");
Assert.assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
filter.doFilter(request, response, chain);
Assert.assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.set("HOST");
invoked.set(false);
chain = new FilterChain() {
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
throws IOException, ServletException {
Assert.assertEquals(MDC.get("hostname"), "HOST");
Assert.assertEquals(MDC.get("user"), "name");
Assert.assertEquals(MDC.get("method"), "METHOD");
Assert.assertEquals(MDC.get("path"), "/pathinfo");
invoked.set(true);
}
};
filter.doFilter(request, response, chain);
Assert.assertTrue(invoked.get());
HostnameFilter.HOSTNAME_TL.remove();
filter.destroy();
}
}

View File

@ -0,0 +1,76 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.servlet;
import junit.framework.Assert;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.junit.Test;
public class TestServerWebApp extends HTestCase {
@Test(expected = IllegalArgumentException.class)
public void getHomeDirNotDef() {
ServerWebApp.getHomeDir("TestServerWebApp00");
}
@Test
public void getHomeDir() {
System.setProperty("TestServerWebApp0.home.dir", "/tmp");
Assert.assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp");
Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log");
System.setProperty("TestServerWebApp0.log.dir", "/tmplog");
Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog");
}
@Test
@TestDir
public void lifecycle() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
System.setProperty("TestServerWebApp1.home.dir", dir);
System.setProperty("TestServerWebApp1.config.dir", dir);
System.setProperty("TestServerWebApp1.log.dir", dir);
System.setProperty("TestServerWebApp1.temp.dir", dir);
ServerWebApp server = new ServerWebApp("TestServerWebApp1") {
};
Assert.assertEquals(server.getStatus(), Server.Status.UNDEF);
server.contextInitialized(null);
Assert.assertEquals(server.getStatus(), Server.Status.NORMAL);
server.contextDestroyed(null);
Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN);
}
@Test(expected = RuntimeException.class)
@TestDir
public void failedInit() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
System.setProperty("TestServerWebApp2.home.dir", dir);
System.setProperty("TestServerWebApp2.config.dir", dir);
System.setProperty("TestServerWebApp2.log.dir", dir);
System.setProperty("TestServerWebApp2.temp.dir", dir);
System.setProperty("testserverwebapp2.services", "FOO");
ServerWebApp server = new ServerWebApp("TestServerWebApp2") {
};
server.contextInitialized(null);
}
}

View File

@ -0,0 +1,144 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.util;
import junit.framework.Assert;
import org.apache.hadoop.test.HTestCase;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
public class TestCheck extends HTestCase {
@Test
public void notNullNotNull() {
Assert.assertEquals(Check.notNull("value", "name"), "value");
}
@Test(expected = IllegalArgumentException.class)
public void notNullNull() {
Check.notNull(null, "name");
}
@Test
public void notNullElementsNotNull() {
Check.notNullElements(new ArrayList<String>(), "name");
Check.notNullElements(Arrays.asList("a"), "name");
}
@Test(expected = IllegalArgumentException.class)
public void notNullElementsNullList() {
Check.notNullElements(null, "name");
}
@Test(expected = IllegalArgumentException.class)
public void notNullElementsNullElements() {
Check.notNullElements(Arrays.asList("a", "", null), "name");
}
@Test
public void notEmptyElementsNotNull() {
Check.notEmptyElements(new ArrayList<String>(), "name");
Check.notEmptyElements(Arrays.asList("a"), "name");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyElementsNullList() {
Check.notEmptyElements(null, "name");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyElementsNullElements() {
Check.notEmptyElements(Arrays.asList("a", null), "name");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyElementsEmptyElements() {
Check.notEmptyElements(Arrays.asList("a", ""), "name");
}
@Test
public void notEmptyNotEmtpy() {
Assert.assertEquals(Check.notEmpty("value", "name"), "value");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyNull() {
Check.notEmpty(null, "name");
}
@Test(expected = IllegalArgumentException.class)
public void notEmptyEmpty() {
Check.notEmpty("", "name");
}
@Test
public void validIdentifierValid() throws Exception {
Assert.assertEquals(Check.validIdentifier("a", 1, ""), "a");
Assert.assertEquals(Check.validIdentifier("a1", 2, ""), "a1");
Assert.assertEquals(Check.validIdentifier("a_", 3, ""), "a_");
Assert.assertEquals(Check.validIdentifier("_", 1, ""), "_");
}
@Test(expected = IllegalArgumentException.class)
public void validIdentifierInvalid1() throws Exception {
Check.validIdentifier("!", 1, "");
}
@Test(expected = IllegalArgumentException.class)
public void validIdentifierInvalid2() throws Exception {
Check.validIdentifier("a1", 1, "");
}
@Test(expected = IllegalArgumentException.class)
public void validIdentifierInvalid3() throws Exception {
Check.validIdentifier("1", 1, "");
}
@Test
public void checkGTZeroGreater() {
Assert.assertEquals(Check.gt0(120, "test"), 120);
}
@Test(expected = IllegalArgumentException.class)
public void checkGTZeroZero() {
Check.gt0(0, "test");
}
@Test(expected = IllegalArgumentException.class)
public void checkGTZeroLessThanZero() {
Check.gt0(-1, "test");
}
@Test
public void checkGEZero() {
Assert.assertEquals(Check.ge0(120, "test"), 120);
Assert.assertEquals(Check.ge0(0, "test"), 0);
}
@Test(expected = IllegalArgumentException.class)
public void checkGELessThanZero() {
Check.ge0(-1, "test");
}
}

View File

@ -0,0 +1,125 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.util;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
public class TestConfigurationUtils {
@Test
public void constructors() throws Exception {
Configuration conf = new Configuration(false);
Assert.assertEquals(conf.size(), 0);
byte[] bytes = "<configuration><property><name>a</name><value>A</value></property></configuration>".getBytes();
InputStream is = new ByteArrayInputStream(bytes);
conf = new Configuration(false);
ConfigurationUtils.load(conf, is);
Assert.assertEquals(conf.size(), 1);
Assert.assertEquals(conf.get("a"), "A");
}
@Test(expected = IOException.class)
public void constructorsFail3() throws Exception {
InputStream is = new ByteArrayInputStream("<xonfiguration></xonfiguration>".getBytes());
Configuration conf = new Configuration(false);
ConfigurationUtils.load(conf, is);
}
@Test
public void copy() throws Exception {
Configuration srcConf = new Configuration(false);
Configuration targetConf = new Configuration(false);
srcConf.set("testParameter1", "valueFromSource");
srcConf.set("testParameter2", "valueFromSource");
targetConf.set("testParameter2", "valueFromTarget");
targetConf.set("testParameter3", "valueFromTarget");
ConfigurationUtils.copy(srcConf, targetConf);
Assert.assertEquals("valueFromSource", targetConf.get("testParameter1"));
Assert.assertEquals("valueFromSource", targetConf.get("testParameter2"));
Assert.assertEquals("valueFromTarget", targetConf.get("testParameter3"));
}
@Test
public void injectDefaults() throws Exception {
Configuration srcConf = new Configuration(false);
Configuration targetConf = new Configuration(false);
srcConf.set("testParameter1", "valueFromSource");
srcConf.set("testParameter2", "valueFromSource");
targetConf.set("testParameter2", "originalValueFromTarget");
targetConf.set("testParameter3", "originalValueFromTarget");
ConfigurationUtils.injectDefaults(srcConf, targetConf);
Assert.assertEquals("valueFromSource", targetConf.get("testParameter1"));
Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter2"));
Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter3"));
Assert.assertEquals("valueFromSource", srcConf.get("testParameter1"));
Assert.assertEquals("valueFromSource", srcConf.get("testParameter2"));
Assert.assertNull(srcConf.get("testParameter3"));
}
@Test
public void resolve() {
Configuration conf = new Configuration(false);
conf.set("a", "A");
conf.set("b", "${a}");
Assert.assertEquals(conf.getRaw("a"), "A");
Assert.assertEquals(conf.getRaw("b"), "${a}");
conf = ConfigurationUtils.resolve(conf);
Assert.assertEquals(conf.getRaw("a"), "A");
Assert.assertEquals(conf.getRaw("b"), "A");
}
@Test
public void testVarResolutionAndSysProps() {
String userName = System.getProperty("user.name");
Configuration conf = new Configuration(false);
conf.set("a", "A");
conf.set("b", "${a}");
conf.set("c", "${user.name}");
conf.set("d", "${aaa}");
Assert.assertEquals(conf.getRaw("a"), "A");
Assert.assertEquals(conf.getRaw("b"), "${a}");
Assert.assertEquals(conf.getRaw("c"), "${user.name}");
Assert.assertEquals(conf.get("a"), "A");
Assert.assertEquals(conf.get("b"), "A");
Assert.assertEquals(conf.get("c"), userName);
Assert.assertEquals(conf.get("d"), "${aaa}");
conf.set("user.name", "foo");
Assert.assertEquals(conf.get("user.name"), "foo");
}
}

View File

@ -0,0 +1,50 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
public class TestBooleanParam {
@Test
public void param() throws Exception {
BooleanParam param = new BooleanParam("p", "true") {
};
Assert.assertEquals(param.getDomain(), "a boolean");
Assert.assertEquals(param.value(), Boolean.TRUE);
Assert.assertEquals(param.toString(), "true");
param = new BooleanParam("p", "false") {
};
Assert.assertEquals(param.value(), Boolean.FALSE);
param = new BooleanParam("p", null) {
};
Assert.assertEquals(param.value(), null);
param = new BooleanParam("p", "") {
};
Assert.assertEquals(param.value(), null);
}
@Test(expected = IllegalArgumentException.class)
public void invalid() throws Exception {
new BooleanParam("p", "x") {
};
}
}

View File

@ -0,0 +1,53 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
public class TestByteParam {
@Test
public void param() throws Exception {
ByteParam param = new ByteParam("p", "1") {
};
Assert.assertEquals(param.getDomain(), "a byte");
Assert.assertEquals(param.value(), new Byte((byte) 1));
Assert.assertEquals(param.toString(), "1");
param = new ByteParam("p", null) {
};
Assert.assertEquals(param.value(), null);
param = new ByteParam("p", "") {
};
Assert.assertEquals(param.value(), null);
}
@Test(expected = IllegalArgumentException.class)
public void invalid1() throws Exception {
new ByteParam("p", "x") {
};
}
@Test(expected = IllegalArgumentException.class)
public void invalid2() throws Exception {
new ByteParam("p", "256") {
};
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
public class TestEnumParam {
public static enum ENUM {
FOO, BAR
}
@Test
public void param() throws Exception {
EnumParam<ENUM> param = new EnumParam<ENUM>("p", "FOO", ENUM.class) {
};
Assert.assertEquals(param.getDomain(), "FOO,BAR");
Assert.assertEquals(param.value(), ENUM.FOO);
Assert.assertEquals(param.toString(), "FOO");
param = new EnumParam<ENUM>("p", null, ENUM.class) {
};
Assert.assertEquals(param.value(), null);
param = new EnumParam<ENUM>("p", "", ENUM.class) {
};
Assert.assertEquals(param.value(), null);
}
@Test(expected = IllegalArgumentException.class)
public void invalid1() throws Exception {
new EnumParam<ENUM>("p", "x", ENUM.class) {
};
}
}

View File

@ -0,0 +1,47 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
public class TestInputStreamEntity {
@Test
public void test() throws Exception {
InputStream is = new ByteArrayInputStream("abc".getBytes());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
InputStreamEntity i = new InputStreamEntity(is);
i.write(baos);
baos.close();
Assert.assertEquals(new String(baos.toByteArray()), "abc");
is = new ByteArrayInputStream("abc".getBytes());
baos = new ByteArrayOutputStream();
i = new InputStreamEntity(is, 1, 1);
i.write(baos);
baos.close();
Assert.assertEquals(baos.toByteArray()[0], 'b');
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
public class TestIntegerParam {
@Test
public void param() throws Exception {
IntegerParam param = new IntegerParam("p", "1") {
};
Assert.assertEquals(param.getDomain(), "an integer");
Assert.assertEquals(param.value(), new Integer(1));
Assert.assertEquals(param.toString(), "1");
param = new IntegerParam("p", null) {
};
Assert.assertEquals(param.value(), null);
param = new IntegerParam("p", "") {
};
Assert.assertEquals(param.value(), null);
}
@Test(expected = IllegalArgumentException.class)
public void invalid1() throws Exception {
new IntegerParam("p", "x") {
};
}
@Test(expected = IllegalArgumentException.class)
public void invalid2() throws Exception {
new IntegerParam("p", "" + Long.MAX_VALUE) {
};
}
}

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.json.simple.JSONObject;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.util.Map;
public class TestJSONMapProvider {
@Test
@SuppressWarnings("unchecked")
public void test() throws Exception {
JSONMapProvider p = new JSONMapProvider();
Assert.assertTrue(p.isWriteable(Map.class, null, null, null));
Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null));
Assert.assertEquals(p.getSize(null, null, null, null, null), -1);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JSONObject json = new JSONObject();
json.put("a", "A");
p.writeTo(json, JSONObject.class, null, null, null, null, baos);
baos.close();
Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
}
}

View File

@ -0,0 +1,44 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.json.simple.JSONObject;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
public class TestJSONProvider {
@Test
@SuppressWarnings("unchecked")
public void test() throws Exception {
JSONProvider p = new JSONProvider();
Assert.assertTrue(p.isWriteable(JSONObject.class, null, null, null));
Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null));
Assert.assertEquals(p.getSize(null, null, null, null, null), -1);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
JSONObject json = new JSONObject();
json.put("a", "A");
p.writeTo(json, JSONObject.class, null, null, null, null, baos);
baos.close();
Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}");
}
}

View File

@ -0,0 +1,47 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
public class TestLongParam {
@Test
public void param() throws Exception {
LongParam param = new LongParam("p", "1") {
};
Assert.assertEquals(param.getDomain(), "a long");
Assert.assertEquals(param.value(), new Long(1));
Assert.assertEquals(param.toString(), "1");
param = new LongParam("p", null) {
};
Assert.assertEquals(param.value(), null);
param = new LongParam("p", "") {
};
Assert.assertEquals(param.value(), null);
}
@Test(expected = IllegalArgumentException.class)
public void invalid1() throws Exception {
new LongParam("p", "x") {
};
}
}

View File

@ -0,0 +1,53 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
public class TestShortParam {
@Test
public void param() throws Exception {
ShortParam param = new ShortParam("p", "1") {
};
Assert.assertEquals(param.getDomain(), "a short");
Assert.assertEquals(param.value(), new Short((short) 1));
Assert.assertEquals(param.toString(), "1");
param = new ShortParam("p", null) {
};
Assert.assertEquals(param.value(), null);
param = new ShortParam("p", "") {
};
Assert.assertEquals(param.value(), null);
}
@Test(expected = IllegalArgumentException.class)
public void invalid1() throws Exception {
new ShortParam("p", "x") {
};
}
@Test(expected = IllegalArgumentException.class)
public void invalid2() throws Exception {
new ShortParam("p", "" + Integer.MAX_VALUE) {
};
}
}

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import junit.framework.Assert;
import org.junit.Test;
import java.util.regex.Pattern;
public class TestStringParam {
@Test
public void param() throws Exception {
StringParam param = new StringParam("p", "s") {
};
Assert.assertEquals(param.getDomain(), "a string");
Assert.assertEquals(param.value(), "s");
Assert.assertEquals(param.toString(), "s");
param = new StringParam("p", null) {
};
Assert.assertEquals(param.value(), null);
param = new StringParam("p", "") {
};
Assert.assertEquals(param.value(), null);
param.setValue("S");
Assert.assertEquals(param.value(), "S");
}
@Test
public void paramRegEx() throws Exception {
StringParam param = new StringParam("p", "Aaa", Pattern.compile("A.*")) {
};
Assert.assertEquals(param.getDomain(), "A.*");
Assert.assertEquals(param.value(), "Aaa");
Assert.assertEquals(param.toString(), "Aaa");
param = new StringParam("p", null) {
};
Assert.assertEquals(param.value(), null);
}
@Test(expected = IllegalArgumentException.class)
public void paramInvalidRegEx() throws Exception {
new StringParam("p", "Baa", Pattern.compile("A.*")) {
};
}
}

View File

@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import com.sun.jersey.api.core.HttpContext;
import com.sun.jersey.api.core.HttpRequestContext;
import com.sun.jersey.core.spi.component.ComponentScope;
import junit.framework.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.MDC;
import javax.ws.rs.core.MultivaluedMap;
import java.security.Principal;
public class TestUserProvider {
@Test
@SuppressWarnings("unchecked")
public void noUser() {
MDC.remove("user");
HttpRequestContext request = Mockito.mock(HttpRequestContext.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
MultivaluedMap map = Mockito.mock(MultivaluedMap.class);
Mockito.when(map.getFirst(UserProvider.USER_NAME_PARAM)).thenReturn(null);
Mockito.when(request.getQueryParameters()).thenReturn(map);
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
Assert.assertNull(up.getValue(context));
Assert.assertNull(MDC.get("user"));
}
@Test
@SuppressWarnings("unchecked")
public void queryStringUser() {
MDC.remove("user");
HttpRequestContext request = Mockito.mock(HttpRequestContext.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
MultivaluedMap map = Mockito.mock(MultivaluedMap.class);
Mockito.when(map.getFirst(UserProvider.USER_NAME_PARAM)).thenReturn("foo");
Mockito.when(request.getQueryParameters()).thenReturn(map);
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
Assert.assertEquals(up.getValue(context).getName(), "foo");
Assert.assertEquals(MDC.get("user"), "foo");
}
@Test
@SuppressWarnings("unchecked")
public void principalUser() {
MDC.remove("user");
HttpRequestContext request = Mockito.mock(HttpRequestContext.class);
Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() {
@Override
public String getName() {
return "bar";
}
});
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
Assert.assertEquals(up.getValue(context).getName(), "bar");
Assert.assertEquals(MDC.get("user"), "bar");
}
@Test
public void getters() {
UserProvider up = new UserProvider();
Assert.assertEquals(up.getScope(), ComponentScope.PerRequest);
Assert.assertEquals(up.getInjectable(null, null, Principal.class), up);
Assert.assertNull(up.getInjectable(null, null, String.class));
}
}

Some files were not shown because too many files have changed in this diff Show More