Merge trunk into HA branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1166495 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
d10631f728
|
@ -249,8 +249,8 @@ setup () {
|
|||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
|
||||
$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
|
||||
echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
|
||||
$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
|
||||
if [[ $? != 0 ]] ; then
|
||||
echo "Trunk compilation is broken?"
|
||||
cleanupAndExit 1
|
||||
|
@ -366,14 +366,14 @@ checkJavadocWarnings () {
|
|||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "$MVN clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
|
||||
echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
|
||||
if [ -d hadoop-project ]; then
|
||||
(cd hadoop-project; $MVN install)
|
||||
fi
|
||||
if [ -d hadoop-common-project/hadoop-annotations ]; then
|
||||
(cd hadoop-common-project/hadoop-annotations; $MVN install)
|
||||
fi
|
||||
$MVN clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
|
||||
$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
|
||||
javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
|
||||
echo ""
|
||||
echo ""
|
||||
|
@ -404,8 +404,8 @@ checkJavacWarnings () {
|
|||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
|
||||
$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
|
||||
echo "$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
|
||||
$MVN clean test -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
|
||||
if [[ $? != 0 ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
|
@ -488,8 +488,8 @@ checkStyle () {
|
|||
echo "THIS IS NOT IMPLEMENTED YET"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "$MVN compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess"
|
||||
$MVN compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess
|
||||
echo "$MVN test checkstyle:checkstyle -DskipTests -D${PROJECT_NAME}PatchProcess"
|
||||
$MVN test checkstyle:checkstyle -DskipTests -D${PROJECT_NAME}PatchProcess
|
||||
|
||||
JIRA_COMMENT_FOOTER="Checkstyle results: $BUILD_URL/artifact/trunk/build/test/checkstyle-errors.html
|
||||
$JIRA_COMMENT_FOOTER"
|
||||
|
@ -520,8 +520,8 @@ checkFindbugsWarnings () {
|
|||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "$MVN clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess"
|
||||
$MVN clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess < /dev/null
|
||||
echo "$MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess"
|
||||
$MVN clean test findbugs:findbugs -DskipTests -D${PROJECT_NAME}PatchProcess < /dev/null
|
||||
|
||||
if [ $? != 0 ] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
|
|
@ -18,15 +18,15 @@
|
|||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-project</artifactId>
|
||||
<version>0.24.0-SNAPSHOT</version>
|
||||
<relativePath>../hadoop-project</relativePath>
|
||||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-alfredo-examples</artifactId>
|
||||
<artifactId>hadoop-auth-examples</artifactId>
|
||||
<version>0.24.0-SNAPSHOT</version>
|
||||
<packaging>war</packaging>
|
||||
|
||||
<name>Hadoop Alfredo Examples</name>
|
||||
<description>Hadoop Alfredo - Java HTTP SPNEGO Examples</description>
|
||||
<name>Apache Hadoop Auth Examples</name>
|
||||
<description>Apache Hadoop Auth Examples - Java HTTP SPNEGO</description>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
|
@ -36,7 +36,7 @@
|
|||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-alfredo</artifactId>
|
||||
<artifactId>hadoop-auth</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
|
@ -53,6 +53,18 @@
|
|||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-war-plugin</artifactId>
|
||||
<configuration>
|
||||
<warName>hadoop-auth-examples</warName>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-deploy-plugin</artifactId>
|
||||
<configuration>
|
||||
<skip>true</skip>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
|
@ -64,7 +76,7 @@
|
|||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<mainClass>org.apache.hadoop.alfredo.examples.WhoClient</mainClass>
|
||||
<mainClass>org.apache.hadoop.security.authentication.examples.WhoClient</mainClass>
|
||||
<arguments>
|
||||
<argument>${url}</argument>
|
||||
</arguments>
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.examples;
|
||||
package org.apache.hadoop.security.authentication.examples;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
|
@ -11,9 +11,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.examples;
|
||||
package org.apache.hadoop.security.authentication.examples;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.InputStreamReader;
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.examples;
|
||||
package org.apache.hadoop.security.authentication.examples;
|
||||
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.http.HttpServlet;
|
|
@ -16,4 +16,4 @@ log4j.appender.test.Target=System.out
|
|||
log4j.appender.test.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.test.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n
|
||||
|
||||
log4j.logger.org.apache.hadoop.alfredo=DEBUG, test
|
||||
log4j.logger.org.apache.hadoop.security.authentication=DEBUG, test
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
<servlet>
|
||||
<servlet-name>whoServlet</servlet-name>
|
||||
<servlet-class>org.apache.hadoop.alfredo.examples.WhoServlet</servlet-class>
|
||||
<servlet-class>org.apache.hadoop.security.authentication.examples.WhoServlet</servlet-class>
|
||||
</servlet>
|
||||
|
||||
<servlet-mapping>
|
||||
|
@ -36,12 +36,12 @@
|
|||
|
||||
<filter>
|
||||
<filter-name>requestLoggerFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.alfredo.examples.RequestLoggerFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.authentication.examples.RequestLoggerFilter</filter-class>
|
||||
</filter>
|
||||
|
||||
<filter>
|
||||
<filter-name>anonymousFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>type</param-name>
|
||||
<param-value>simple</param-value>
|
||||
|
@ -58,7 +58,7 @@
|
|||
|
||||
<filter>
|
||||
<filter-name>simpleFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>type</param-name>
|
||||
<param-value>simple</param-value>
|
||||
|
@ -75,7 +75,7 @@
|
|||
|
||||
<filter>
|
||||
<filter-name>kerberosFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.authentication.server.AuthenticationFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>type</param-name>
|
||||
<param-value>kerberos</param-value>
|
|
@ -13,6 +13,6 @@
|
|||
-->
|
||||
<html>
|
||||
<body>
|
||||
<h1>Hello Hadoop Alfredo Pseudo/Simple Authentication with anonymous users!</h1>
|
||||
<h1>Hello Hadoop Auth Pseudo/Simple Authentication with anonymous users!</h1>
|
||||
</body>
|
||||
</html>
|
|
@ -13,6 +13,6 @@
|
|||
-->
|
||||
<html>
|
||||
<body>
|
||||
<h1>Hello Hadoop Alfredo Examples</h1>
|
||||
<h1>Hello Hadoop Auth Examples!</h1>
|
||||
</body>
|
||||
</html>
|
|
@ -13,6 +13,6 @@
|
|||
-->
|
||||
<html>
|
||||
<body>
|
||||
<h1>Hello Hadoop Alfredo Pseudo/Simple Authentication!</h1>
|
||||
<h1>Hello Hadoop Auth Kerberos SPNEGO Authentication!</h1>
|
||||
</body>
|
||||
</html>
|
|
@ -13,6 +13,6 @@
|
|||
-->
|
||||
<html>
|
||||
<body>
|
||||
<h1>Hello Hadoop Alfredo Kerberos SPNEGO Authentication!</h1>
|
||||
<h1>Hello Hadoop Auth Pseudo/Simple Authentication!</h1>
|
||||
</body>
|
||||
</html>
|
|
@ -1,20 +1,20 @@
|
|||
|
||||
Build instructions for Hadoop Alfredo
|
||||
Build instructions for Hadoop Auth
|
||||
|
||||
Same as for Hadoop.
|
||||
|
||||
For more details refer to the Alfredo documentation pages.
|
||||
For more details refer to the Hadoop Auth documentation pages.
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
Caveats:
|
||||
|
||||
* Alfredo has profile to enable Kerberos testcases (testKerberos)
|
||||
* Hadoop Auth has profile to enable Kerberos testcases (testKerberos)
|
||||
|
||||
To run Kerberos testcases a KDC, 2 kerberos principals and a keytab file
|
||||
are required (refer to the Alfredo documentation pages for details).
|
||||
are required (refer to the Hadoop Auth documentation pages for details).
|
||||
|
||||
* Alfredo does not have a distribution profile (dist)
|
||||
* Hadoop Auth does not have a distribution profile (dist)
|
||||
|
||||
* Alfredo does not have a native code profile (native)
|
||||
* Hadoop Auth does not have a native code profile (native)
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
Hadoop Alfredo, Java HTTP SPNEGO
|
||||
Hadoop Auth, Java HTTP SPNEGO
|
||||
|
||||
Hadoop Alfredo is a Java library consisting of a client and a server
|
||||
Hadoop Auth is a Java library consisting of a client and a server
|
||||
components to enable Kerberos SPNEGO authentication for HTTP.
|
||||
|
||||
The client component is the AuthenticatedURL class.
|
||||
|
@ -10,6 +10,6 @@ The server component is the AuthenticationFilter servlet filter class.
|
|||
Authentication mechanisms support is pluggable in both the client and
|
||||
the server components via interfaces.
|
||||
|
||||
In addition to Kerberos SPNEGO, Alfredo also supports Pseudo/Simple
|
||||
In addition to Kerberos SPNEGO, Hadoop Auth also supports Pseudo/Simple
|
||||
authentication (trusting the value of the query string parameter
|
||||
'user.name').
|
||||
|
|
|
@ -21,13 +21,12 @@
|
|||
<relativePath>../../hadoop-project</relativePath>
|
||||
</parent>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-alfredo</artifactId>
|
||||
<artifactId>hadoop-auth</artifactId>
|
||||
<version>0.24.0-SNAPSHOT</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>Apache Hadoop Alfredo</name>
|
||||
<description>Apache Hadoop Alfredo - Java HTTP SPNEGO</description>
|
||||
<url>http://hadoop.apache.org/alfredo</url>
|
||||
<name>Apache Hadoop Auth</name>
|
||||
<description>Apache Hadoop Auth - Java HTTP SPNEGO</description>
|
||||
|
||||
<properties>
|
||||
<maven.build.timestamp.format>yyyyMMdd</maven.build.timestamp.format>
|
||||
|
|
|
@ -11,9 +11,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
import org.apache.hadoop.alfredo.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.HttpURLConnection;
|
||||
|
@ -63,7 +63,7 @@ public class AuthenticatedURL {
|
|||
/**
|
||||
* Name of the HTTP cookie used for the authentication token between the client and the server.
|
||||
*/
|
||||
public static final String AUTH_COOKIE = "alfredo.auth";
|
||||
public static final String AUTH_COOKIE = "hadoop.auth";
|
||||
|
||||
private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
|
||||
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
/**
|
||||
* Exception thrown when an authentication error occurrs.
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
|
||||
import java.io.IOException;
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
import com.sun.security.auth.module.Krb5LoginModule;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
|
@ -48,17 +48,17 @@ public class KerberosAuthenticator implements Authenticator {
|
|||
/**
|
||||
* HTTP header used by the SPNEGO server endpoint during an authentication sequence.
|
||||
*/
|
||||
public static String WWW_AUTHENTICATE = "WWW-Authenticate";
|
||||
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
|
||||
|
||||
/**
|
||||
* HTTP header used by the SPNEGO client endpoint during an authentication sequence.
|
||||
*/
|
||||
public static String AUTHORIZATION = "Authorization";
|
||||
public static final String AUTHORIZATION = "Authorization";
|
||||
|
||||
/**
|
||||
* HTTP header prefix used by the SPNEGO client/server endpoints during an authentication sequence.
|
||||
*/
|
||||
public static String NEGOTIATE = "Negotiate";
|
||||
public static final String NEGOTIATE = "Negotiate";
|
||||
|
||||
private static final String AUTH_HTTP_METHOD = "OPTIONS";
|
||||
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.HttpURLConnection;
|
|
@ -11,12 +11,12 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.alfredo.util.Signer;
|
||||
import org.apache.hadoop.alfredo.util.SignerException;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.util.Signer;
|
||||
import org.apache.hadoop.security.authentication.util.SignerException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -103,6 +103,8 @@ public class AuthenticationFilter implements Filter {
|
|||
*/
|
||||
public static final String COOKIE_PATH = "cookie.path";
|
||||
|
||||
private static final Random RAN = new Random();
|
||||
|
||||
private Signer signer;
|
||||
private AuthenticationHandler authHandler;
|
||||
private boolean randomSecret;
|
||||
|
@ -139,7 +141,7 @@ public class AuthenticationFilter implements Filter {
|
|||
}
|
||||
|
||||
try {
|
||||
Class klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
|
||||
Class<?> klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
|
||||
authHandler = (AuthenticationHandler) klass.newInstance();
|
||||
authHandler.init(config);
|
||||
} catch (ClassNotFoundException ex) {
|
||||
|
@ -151,7 +153,7 @@ public class AuthenticationFilter implements Filter {
|
|||
}
|
||||
String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET);
|
||||
if (signatureSecret == null) {
|
||||
signatureSecret = Long.toString(new Random(System.currentTimeMillis()).nextLong());
|
||||
signatureSecret = Long.toString(RAN.nextLong());
|
||||
randomSecret = true;
|
||||
LOG.warn("'signature.secret' configuration not set, using a random value as secret");
|
||||
}
|
||||
|
@ -237,7 +239,7 @@ public class AuthenticationFilter implements Filter {
|
|||
*/
|
||||
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) throws ServletException {
|
||||
Properties props = new Properties();
|
||||
Enumeration names = filterConfig.getInitParameterNames();
|
||||
Enumeration<?> names = filterConfig.getInitParameterNames();
|
||||
while (names.hasMoreElements()) {
|
||||
String name = (String) names.nextElement();
|
||||
if (name.startsWith(configPrefix)) {
|
||||
|
@ -381,7 +383,7 @@ public class AuthenticationFilter implements Filter {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates the Alfredo authentiation HTTP cookie.
|
||||
* Creates the Hadoop authentiation HTTP cookie.
|
||||
* <p/>
|
||||
* It sets the domain and path specified in the configuration.
|
||||
*
|
|
@ -11,9 +11,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.http.HttpServletRequest;
|
|
@ -11,9 +11,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
|
||||
import java.security.Principal;
|
||||
import java.util.Arrays;
|
|
@ -11,13 +11,13 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.alfredo.client.KerberosAuthenticator;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
|
||||
import com.sun.security.auth.module.Krb5LoginModule;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.hadoop.alfredo.util.KerberosName;
|
||||
import org.apache.hadoop.security.authentication.util.KerberosName;
|
||||
import org.ietf.jgss.GSSContext;
|
||||
import org.ietf.jgss.GSSCredential;
|
||||
import org.ietf.jgss.GSSManager;
|
|
@ -11,10 +11,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.alfredo.client.PseudoAuthenticator;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
|
||||
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.http.HttpServletRequest;
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.hadoop.alfredo.util;
|
||||
package org.apache.hadoop.security.authentication.util;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.util;
|
||||
package org.apache.hadoop.security.authentication.util;
|
||||
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.util;
|
||||
package org.apache.hadoop.security.authentication.util;
|
||||
|
||||
/**
|
||||
* Exception thrown by {@link Signer} when a string signature is invalid.
|
|
@ -11,12 +11,12 @@
|
|||
~~ limitations under the License. See accompanying LICENSE file.
|
||||
|
||||
---
|
||||
Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Building It
|
||||
Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
|
||||
---
|
||||
---
|
||||
${maven.build.timestamp}
|
||||
|
||||
Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Building It
|
||||
Hadoop Auth, Java HTTP SPNEGO ${project.version} - Building It
|
||||
|
||||
\[ {{{./index.html}Go Back}} \]
|
||||
|
||||
|
@ -50,14 +50,14 @@ $ mvn test -PtestKerberos
|
|||
The following Maven <<<-D>>> options can be used to change the default
|
||||
values:
|
||||
|
||||
* <<<alfredo.test.kerberos.realm>>>: default value <<LOCALHOST>>
|
||||
* <<<hadoop-auth.test.kerberos.realm>>>: default value <<LOCALHOST>>
|
||||
|
||||
* <<<alfredo.test.kerberos.client.principal>>>: default value <<client>>
|
||||
* <<<hadoop-auth.test.kerberos.client.principal>>>: default value <<client>>
|
||||
|
||||
* <<<alfredo.test.kerberos.server.principal>>>: default value
|
||||
* <<<hadoop-auth.test.kerberos.server.principal>>>: default value
|
||||
<<HTTP/localhost>> (it must start 'HTTP/')
|
||||
|
||||
* <<<alfredo.test.kerberos.keytab.file>>>: default value
|
||||
* <<<hadoop-auth.test.kerberos.keytab.file>>>: default value
|
||||
<<${HOME}/${USER}.keytab>>
|
||||
|
||||
** Generating Documentation
|
||||
|
@ -69,7 +69,7 @@ $ mvn package -Pdocs
|
|||
+---+
|
||||
|
||||
The generated documentation is available at
|
||||
<<<hadoop-alfredo/target/site/>>>.
|
||||
<<<hadoop-auth/target/site/>>>.
|
||||
|
||||
\[ {{{./index.html}Go Back}} \]
|
||||
|
||||
|
|
|
@ -11,30 +11,30 @@
|
|||
~~ limitations under the License. See accompanying LICENSE file.
|
||||
|
||||
---
|
||||
Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Server Side
|
||||
Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side
|
||||
Configuration
|
||||
---
|
||||
---
|
||||
${maven.build.timestamp}
|
||||
|
||||
Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Server Side
|
||||
Hadoop Auth, Java HTTP SPNEGO ${project.version} - Server Side
|
||||
Configuration
|
||||
|
||||
\[ {{{./index.html}Go Back}} \]
|
||||
|
||||
* Server Side Configuration Setup
|
||||
|
||||
The {{{./apidocs/org/apache/hadoop/alfredo/server/AuthenticationFilter.html}
|
||||
AuthenticationFilter filter}} is Alfredo's server side component.
|
||||
The {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationFilter.html}
|
||||
AuthenticationFilter filter}} is Hadoop Auth's server side component.
|
||||
|
||||
This filter must be configured in front of all the web application resources
|
||||
that required authenticated requests. For example:
|
||||
|
||||
The Alfredo and dependent JAR files must be in the web application classpath
|
||||
(commonly the <<<WEB-INF/lib>>> directory).
|
||||
The Hadoop Auth and dependent JAR files must be in the web application
|
||||
classpath (commonly the <<<WEB-INF/lib>>> directory).
|
||||
|
||||
Alfredo uses SLF4J-API for logging. Alfredo Maven POM dependencies define the
|
||||
SLF4J API dependency but it does not define the dependency on a concrete
|
||||
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define
|
||||
the SLF4J API dependency but it does not define the dependency on a concrete
|
||||
logging implementation, this must be addded explicitly to the web
|
||||
application. For example, if the web applicationan uses Log4j, the
|
||||
SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application
|
||||
|
@ -47,7 +47,7 @@ Configuration
|
|||
|
||||
* <<<[PREFIX.]type>>>: the authentication type keyword (<<<simple>>> or
|
||||
<<<kerberos>>>) or a
|
||||
{{{./apidocs/org/apache/hadoop/alfredo/server/AuthenticationHandler.html}
|
||||
{{{./apidocs/org/apache/hadoop/auth/server/AuthenticationHandler.html}
|
||||
Authentication handler implementation}}.
|
||||
|
||||
* <<<[PREFIX.]signature.secret>>>: The secret to SHA-sign the generated
|
||||
|
@ -80,7 +80,7 @@ Configuration
|
|||
|
||||
* <<<[PREFIX.]kerberos.keytab>>>: The path to the keytab file containing
|
||||
the credentials for the kerberos principal. For example:
|
||||
<<</Users/tucu/alfredo.keytab>>>. There is no default value.
|
||||
<<</Users/tucu/tucu.keytab>>>. There is no default value.
|
||||
|
||||
<<Example>>:
|
||||
|
||||
|
@ -90,7 +90,7 @@ Configuration
|
|||
|
||||
<filter>
|
||||
<filter-name>kerberosFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.auth.server.AuthenticationFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>type</param-name>
|
||||
<param-value>kerberos</param-value>
|
||||
|
@ -113,7 +113,7 @@ Configuration
|
|||
</init-param>
|
||||
<init-param>
|
||||
<param-name>kerberos.keytab</param-name>
|
||||
<param-value>/tmp/alfredo.keytab</param-value>
|
||||
<param-value>/tmp/auth.keytab</param-value>
|
||||
</init-param>
|
||||
</filter>
|
||||
|
||||
|
@ -146,7 +146,7 @@ Configuration
|
|||
|
||||
<filter>
|
||||
<filter-name>simpleFilter</filter-name>
|
||||
<filter-class>org.apache.hadoop.alfredo.server.AuthenticationFilter</filter-class>
|
||||
<filter-class>org.apache.hadoop.security.auth.server.AuthenticationFilter</filter-class>
|
||||
<init-param>
|
||||
<param-name>type</param-name>
|
||||
<param-value>simple</param-value>
|
||||
|
|
|
@ -11,16 +11,16 @@
|
|||
~~ limitations under the License. See accompanying LICENSE file.
|
||||
|
||||
---
|
||||
Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
|
||||
Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples
|
||||
---
|
||||
---
|
||||
${maven.build.timestamp}
|
||||
|
||||
Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
|
||||
Hadoop Auth, Java HTTP SPNEGO ${project.version} - Examples
|
||||
|
||||
\[ {{{./index.html}Go Back}} \]
|
||||
|
||||
* Accessing a Alfredo protected URL Using a browser
|
||||
* Accessing a Hadoop Auth protected URL Using a browser
|
||||
|
||||
<<IMPORTANT:>> The browser must support HTTP Kerberos SPNEGO. For example,
|
||||
Firefox or Internet Explorer.
|
||||
|
@ -31,7 +31,7 @@ Hadoop Alfredo, Java HTTP SPNEGO ${project.version} - Examples
|
|||
the domain of the web server that is HTTP Kerberos SPNEGO protected (if using
|
||||
multiple domains and hostname use comma to separate them).
|
||||
|
||||
* Accessing a Alfredo protected URL Using <<<curl>>>
|
||||
* Accessing a Hadoop Auth protected URL Using <<<curl>>>
|
||||
|
||||
<<IMPORTANT:>> The <<<curl>>> version must support GSS, run <<<curl -V>>>.
|
||||
|
||||
|
@ -48,10 +48,10 @@ Features: GSS-Negotiate IPv6 Largefile NTLM SSL libz
|
|||
+---+
|
||||
$ kinit
|
||||
Please enter the password for tucu@LOCALHOST:
|
||||
$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/alfredo-examples/kerberos/who
|
||||
$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who
|
||||
Enter host password for user 'tucu':
|
||||
|
||||
Hello Alfredo!
|
||||
Hello Hadoop Auth Examples!
|
||||
+---+
|
||||
|
||||
* The <<<--negotiate>>> option enables SPNEGO in <<<curl>>>.
|
||||
|
@ -68,7 +68,7 @@ Hello Alfredo!
|
|||
|
||||
+---+
|
||||
...
|
||||
URL url = new URL("http://localhost:8080/alfredo/kerberos/who");
|
||||
URL url = new URL("http://localhost:8080/hadoop-auth/kerberos/who");
|
||||
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
|
||||
...
|
||||
HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
|
||||
|
@ -79,12 +79,12 @@ conn = new AuthenticatedURL(url, token).openConnection();
|
|||
|
||||
* Building and Running the Examples
|
||||
|
||||
Download Alfredo's source code, the examples are in the
|
||||
Download Hadoop-Auth's source code, the examples are in the
|
||||
<<<src/main/examples>>> directory.
|
||||
|
||||
** Server Example:
|
||||
|
||||
Edit the <<<src/main/examples/src/main/webapp/WEB-INF/web.xml>>> and set the
|
||||
Edit the <<<hadoop-auth-examples/src/main/webapp/WEB-INF/web.xml>>> and set the
|
||||
right configuration init parameters for the <<<AuthenticationFilter>>>
|
||||
definition configured for Kerberos (the right Kerberos principal and keytab
|
||||
file must be specified). Refer to the {{{./Configuration.html}Configuration
|
||||
|
@ -106,11 +106,11 @@ conn = new AuthenticatedURL(url, token).openConnection();
|
|||
$ kinit
|
||||
Please enter the password for tucu@LOCALHOST:
|
||||
|
||||
$ curl http://localhost:8080/alfredo-examples/anonymous/who
|
||||
$ curl http://localhost:8080/hadoop-auth-examples/anonymous/who
|
||||
|
||||
$ curl http://localhost:8080/alfredo-examples/simple/who?user.name=foo
|
||||
$ curl http://localhost:8080/hadoop-auth-examples/simple/who?user.name=foo
|
||||
|
||||
$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/alfredo-examples/kerberos/who
|
||||
$ curl --negotiate -u foo -b ~/cookiejar.txt -c ~/cookiejar.txt http://localhost:8080/hadoop-auth-examples/kerberos/who
|
||||
+---+
|
||||
|
||||
** Accessing the server using the Java client example
|
||||
|
@ -121,7 +121,7 @@ Please enter the password for tucu@LOCALHOST:
|
|||
|
||||
$ cd examples
|
||||
|
||||
$ mvn exec:java -Durl=http://localhost:8080/alfredo-examples/kerberos/who
|
||||
$ mvn exec:java -Durl=http://localhost:8080/hadoop-auth-examples/kerberos/who
|
||||
|
||||
....
|
||||
|
||||
|
|
|
@ -11,27 +11,27 @@
|
|||
~~ limitations under the License. See accompanying LICENSE file.
|
||||
|
||||
---
|
||||
Hadoop Alfredo, Java HTTP SPNEGO ${project.version}
|
||||
Hadoop Auth, Java HTTP SPNEGO ${project.version}
|
||||
---
|
||||
---
|
||||
${maven.build.timestamp}
|
||||
|
||||
Hadoop Alfredo, Java HTTP SPNEGO ${project.version}
|
||||
Hadoop Auth, Java HTTP SPNEGO ${project.version}
|
||||
|
||||
Hadoop Alfredo is a Java library consisting of a client and a server
|
||||
Hadoop Auth is a Java library consisting of a client and a server
|
||||
components to enable Kerberos SPNEGO authentication for HTTP.
|
||||
|
||||
Alfredo also supports additional authentication mechanisms on the client
|
||||
Hadoop Auth also supports additional authentication mechanisms on the client
|
||||
and the server side via 2 simple interfaces.
|
||||
|
||||
* License
|
||||
|
||||
Alfredo is distributed under {{{http://www.apache.org/licenses/}Apache
|
||||
Hadoop Auth is distributed under {{{http://www.apache.org/licenses/}Apache
|
||||
License 2.0}}.
|
||||
|
||||
* How Does Alfredo Works?
|
||||
* How Does Auth Works?
|
||||
|
||||
Alfredo enforces authentication on protected resources, once authentiation
|
||||
Hadoop Auth enforces authentication on protected resources, once authentiation
|
||||
has been established it sets a signed HTTP Cookie that contains an
|
||||
authentication token with the user name, user principal, authentication type
|
||||
and expiration time.
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
<project name="Hadoop Alfredo">
|
||||
<project name="Hadoop Auth">
|
||||
|
||||
<version position="right"/>
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo;
|
||||
package org.apache.hadoop.security.authentication;
|
||||
|
||||
import com.sun.security.auth.module.Krb5LoginModule;
|
||||
|
||||
|
@ -34,7 +34,7 @@ import java.util.concurrent.Callable;
|
|||
* Test helper class for Java Kerberos setup.
|
||||
*/
|
||||
public class KerberosTestUtils {
|
||||
private static final String PREFIX = "alfredo.test.";
|
||||
private static final String PREFIX = "hadoop-auth.test.";
|
||||
|
||||
public static final String REALM = PREFIX + "kerberos.realm";
|
||||
|
|
@ -11,9 +11,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
import org.apache.hadoop.alfredo.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import junit.framework.TestCase;
|
||||
import org.mortbay.jetty.Server;
|
||||
import org.mortbay.jetty.servlet.Context;
|
||||
|
@ -57,6 +57,7 @@ public abstract class AuthenticatorTestCase extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("serial")
|
||||
public static class TestServlet extends HttpServlet {
|
||||
|
||||
@Override
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.mockito.Mockito;
|
|
@ -11,12 +11,12 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
import org.apache.hadoop.alfredo.KerberosTestUtils;
|
||||
import org.apache.hadoop.alfredo.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.alfredo.server.PseudoAuthenticationHandler;
|
||||
import org.apache.hadoop.alfredo.server.KerberosAuthenticationHandler;
|
||||
import org.apache.hadoop.security.authentication.KerberosTestUtils;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
|
||||
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
|
||||
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
|
@ -11,10 +11,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.client;
|
||||
package org.apache.hadoop.security.authentication.client;
|
||||
|
||||
import org.apache.hadoop.alfredo.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.alfredo.server.PseudoAuthenticationHandler;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
|
||||
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
|
@ -11,11 +11,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.alfredo.util.Signer;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.util.Signer;
|
||||
import junit.framework.TestCase;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
|
@ -11,9 +11,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
public class TestAuthenticationToken extends TestCase {
|
|
@ -11,11 +11,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.KerberosTestUtils;
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.alfredo.client.KerberosAuthenticator;
|
||||
import org.apache.hadoop.security.authentication.KerberosTestUtils;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.ietf.jgss.GSSContext;
|
|
@ -11,11 +11,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.server;
|
||||
package org.apache.hadoop.security.authentication.server;
|
||||
|
||||
import org.apache.hadoop.alfredo.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.hadoop.alfredo.client.PseudoAuthenticator;
|
||||
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.hadoop.alfredo.util;
|
||||
package org.apache.hadoop.security.authentication.util;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
@ -20,7 +20,7 @@ package org.apache.hadoop.alfredo.util;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.alfredo.KerberosTestUtils;
|
||||
import org.apache.hadoop.security.authentication.KerberosTestUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
|
@ -11,7 +11,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License. See accompanying LICENSE file.
|
||||
*/
|
||||
package org.apache.hadoop.alfredo.util;
|
||||
package org.apache.hadoop.security.authentication.util;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
|
@ -2,6 +2,16 @@ Hadoop Change Log
|
|||
|
||||
Trunk (unreleased changes)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
|
||||
HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
|
||||
|
||||
BUGS
|
||||
|
||||
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
|
||||
by Jersey (Alejandro Abdelnur via atm)
|
||||
|
||||
Release 0.23.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -347,6 +357,20 @@ Release 0.23.0 - Unreleased
|
|||
HADOOP-7547. Add generic type in WritableComparable subclasses.
|
||||
(Uma Maheswara Rao G via szetszwo)
|
||||
|
||||
HADOOP-7579. Rename package names from alfredo to auth.
|
||||
(Alejandro Abdelnur via szetszwo)
|
||||
|
||||
HADOOP-7594. Support HTTP REST in HttpServer. (szetszwo)
|
||||
|
||||
HADOOP-7552. FileUtil#fullyDelete doesn't throw IOE but lists it
|
||||
in the throws clause. (eli)
|
||||
|
||||
HADOOP-7580. Add a version of getLocalPathForWrite to LocalDirAllocator
|
||||
which doesn't create dirs. (Chris Douglas & Siddharth Seth via acmurthy)
|
||||
|
||||
HADOOP-7507. Allow ganglia metrics to include the metrics system tags
|
||||
in the gmetric names. (Alejandro Abdelnur via todd)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
|
||||
|
@ -533,6 +557,12 @@ Release 0.23.0 - Unreleased
|
|||
HADOOP-7560. Change src layout to be heirarchical. (Alejandro Abdelnur
|
||||
via acmurthy)
|
||||
|
||||
HADOOP-7576. Fix findbugs warnings and javac warnings in hadoop-auth.
|
||||
(szetszwo)
|
||||
|
||||
HADOOP-7593. Fix AssertionError in TestHttpServer.testMaxThreads().
|
||||
(Uma Maheswara Rao G via szetszwo)
|
||||
|
||||
Release 0.22.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -92,6 +92,28 @@
|
|||
<artifactId>jetty-util</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>asm</groupId>
|
||||
<artifactId>asm</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-core</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>tomcat</groupId>
|
||||
<artifactId>jasper-compiler</artifactId>
|
||||
|
@ -239,7 +261,7 @@
|
|||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-alfredo</artifactId>
|
||||
<artifactId>hadoop-auth</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
|
|
@ -43,6 +43,16 @@
|
|||
#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
|
||||
#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
|
||||
|
||||
# Tag values to use for the ganglia prefix. If not defined no tags are used.
|
||||
# If '*' all tags are used. If specifiying multiple tags separate them with
|
||||
# commas. Note that the last segment of the property name is the context name.
|
||||
#
|
||||
#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
|
||||
#*.sink.ganglia.tagsForPrefix.dfs=
|
||||
#*.sink.ganglia.tagsForPrefix.rpc=
|
||||
#*.sink.ganglia.tagsForPrefix.mapred=
|
||||
#*.sink.ganglia.tagsForPrefix.fairscheduler=
|
||||
|
||||
#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
|
||||
|
||||
#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
|
||||
|
|
|
@ -149,3 +149,25 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
|||
#log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
|
||||
#log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
|
||||
#
|
||||
# Yarn ResourceManager Application Summary Log
|
||||
#
|
||||
# Set the ResourceManager summary log filename
|
||||
#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
|
||||
# Set the ResourceManager summary log level and appender
|
||||
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
|
||||
|
||||
# Appender for ResourceManager Application Summary Log - rolled daily
|
||||
# Requires the following properties to be set
|
||||
# - hadoop.log.dir (Hadoop Log directory)
|
||||
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
|
||||
# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
|
||||
|
||||
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
|
||||
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
|
||||
#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
|
||||
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
|
||||
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
||||
|
||||
|
@ -88,7 +87,7 @@ public class FileUtil {
|
|||
* (4) If dir is a normal directory, then dir and all its contents recursively
|
||||
* are deleted.
|
||||
*/
|
||||
public static boolean fullyDelete(File dir) throws IOException {
|
||||
public static boolean fullyDelete(File dir) {
|
||||
if (dir.delete()) {
|
||||
// dir is (a) normal file, (b) symlink to a file, (c) empty directory or
|
||||
// (d) symlink to a directory
|
||||
|
@ -108,7 +107,7 @@ public class FileUtil {
|
|||
* If dir is a symlink to a directory, all the contents of the actual
|
||||
* directory pointed to by dir will be deleted.
|
||||
*/
|
||||
public static boolean fullyDeleteContents(File dir) throws IOException {
|
||||
public static boolean fullyDeleteContents(File dir) {
|
||||
boolean deletionSucceeded = true;
|
||||
File contents[] = dir.listFiles();
|
||||
if (contents != null) {
|
||||
|
|
|
@ -128,8 +128,26 @@ public class LocalDirAllocator {
|
|||
*/
|
||||
public Path getLocalPathForWrite(String pathStr, long size,
|
||||
Configuration conf) throws IOException {
|
||||
return getLocalPathForWrite(pathStr, size, conf, true);
|
||||
}
|
||||
|
||||
/** Get a path from the local FS. Pass size as
|
||||
* SIZE_UNKNOWN if not known apriori. We
|
||||
* round-robin over the set of disks (via the configured dirs) and return
|
||||
* the first complete path which has enough space
|
||||
* @param pathStr the requested path (this will be created on the first
|
||||
* available disk)
|
||||
* @param size the size of the file that is going to be written
|
||||
* @param conf the Configuration object
|
||||
* @param checkWrite ensure that the path is writable
|
||||
* @return the complete path to the file on a local disk
|
||||
* @throws IOException
|
||||
*/
|
||||
public Path getLocalPathForWrite(String pathStr, long size,
|
||||
Configuration conf,
|
||||
boolean checkWrite) throws IOException {
|
||||
AllocatorPerContext context = obtainContext(contextCfgItemName);
|
||||
return context.getLocalPathForWrite(pathStr, size, conf);
|
||||
return context.getLocalPathForWrite(pathStr, size, conf, checkWrite);
|
||||
}
|
||||
|
||||
/** Get a path from the local FS for reading. We search through all the
|
||||
|
@ -145,6 +163,23 @@ public class LocalDirAllocator {
|
|||
AllocatorPerContext context = obtainContext(contextCfgItemName);
|
||||
return context.getLocalPathToRead(pathStr, conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the paths that currently exist in the working directories.
|
||||
* @param pathStr the path underneath the roots
|
||||
* @param conf the configuration to look up the roots in
|
||||
* @return all of the paths that exist under any of the roots
|
||||
* @throws IOException
|
||||
*/
|
||||
public Iterable<Path> getAllLocalPathsToRead(String pathStr,
|
||||
Configuration conf
|
||||
) throws IOException {
|
||||
AllocatorPerContext context;
|
||||
synchronized (this) {
|
||||
context = obtainContext(contextCfgItemName);
|
||||
}
|
||||
return context.getAllLocalPathsToRead(pathStr, conf);
|
||||
}
|
||||
|
||||
/** Creates a temporary file in the local FS. Pass size as -1 if not known
|
||||
* apriori. We round-robin over the set of disks (via the configured dirs)
|
||||
|
@ -214,7 +249,8 @@ public class LocalDirAllocator {
|
|||
/** This method gets called everytime before any read/write to make sure
|
||||
* that any change to localDirs is reflected immediately.
|
||||
*/
|
||||
private void confChanged(Configuration conf) throws IOException {
|
||||
private synchronized void confChanged(Configuration conf)
|
||||
throws IOException {
|
||||
String newLocalDirs = conf.get(contextCfgItemName);
|
||||
if (!newLocalDirs.equals(savedLocalDirs)) {
|
||||
localDirs = conf.getTrimmedStrings(contextCfgItemName);
|
||||
|
@ -251,18 +287,22 @@ public class LocalDirAllocator {
|
|||
}
|
||||
}
|
||||
|
||||
private Path createPath(String path) throws IOException {
|
||||
private Path createPath(String path,
|
||||
boolean checkWrite) throws IOException {
|
||||
Path file = new Path(new Path(localDirs[dirNumLastAccessed]),
|
||||
path);
|
||||
//check whether we are able to create a directory here. If the disk
|
||||
//happens to be RDONLY we will fail
|
||||
try {
|
||||
DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
|
||||
return file;
|
||||
} catch (DiskErrorException d) {
|
||||
LOG.warn("Disk Error Exception: ", d);
|
||||
return null;
|
||||
if (checkWrite) {
|
||||
//check whether we are able to create a directory here. If the disk
|
||||
//happens to be RDONLY we will fail
|
||||
try {
|
||||
DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
|
||||
return file;
|
||||
} catch (DiskErrorException d) {
|
||||
LOG.warn("Disk Error Exception: ", d);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return file;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -272,17 +312,6 @@ public class LocalDirAllocator {
|
|||
int getCurrentDirectoryIndex() {
|
||||
return dirNumLastAccessed;
|
||||
}
|
||||
|
||||
/** Get a path from the local FS. This method should be used if the size of
|
||||
* the file is not known a priori.
|
||||
*
|
||||
* It will use roulette selection, picking directories
|
||||
* with probability proportional to their available space.
|
||||
*/
|
||||
public synchronized Path getLocalPathForWrite(String path,
|
||||
Configuration conf) throws IOException {
|
||||
return getLocalPathForWrite(path, SIZE_UNKNOWN, conf);
|
||||
}
|
||||
|
||||
/** Get a path from the local FS. If size is known, we go
|
||||
* round-robin over the set of disks (via the configured dirs) and return
|
||||
|
@ -292,7 +321,7 @@ public class LocalDirAllocator {
|
|||
* with probability proportional to their available space.
|
||||
*/
|
||||
public synchronized Path getLocalPathForWrite(String pathStr, long size,
|
||||
Configuration conf) throws IOException {
|
||||
Configuration conf, boolean checkWrite) throws IOException {
|
||||
confChanged(conf);
|
||||
int numDirs = localDirs.length;
|
||||
int numDirsSearched = 0;
|
||||
|
@ -324,7 +353,7 @@ public class LocalDirAllocator {
|
|||
dir++;
|
||||
}
|
||||
dirNumLastAccessed = dir;
|
||||
returnPath = createPath(pathStr);
|
||||
returnPath = createPath(pathStr, checkWrite);
|
||||
if (returnPath == null) {
|
||||
totalAvailable -= availableOnDisk[dir];
|
||||
availableOnDisk[dir] = 0; // skip this disk
|
||||
|
@ -335,7 +364,7 @@ public class LocalDirAllocator {
|
|||
while (numDirsSearched < numDirs && returnPath == null) {
|
||||
long capacity = dirDF[dirNumLastAccessed].getAvailable();
|
||||
if (capacity > size) {
|
||||
returnPath = createPath(pathStr);
|
||||
returnPath = createPath(pathStr, checkWrite);
|
||||
}
|
||||
dirNumLastAccessed++;
|
||||
dirNumLastAccessed = dirNumLastAccessed % numDirs;
|
||||
|
@ -361,7 +390,7 @@ public class LocalDirAllocator {
|
|||
Configuration conf) throws IOException {
|
||||
|
||||
// find an appropriate directory
|
||||
Path path = getLocalPathForWrite(pathStr, size, conf);
|
||||
Path path = getLocalPathForWrite(pathStr, size, conf, true);
|
||||
File dir = new File(path.getParent().toUri().getPath());
|
||||
String prefix = path.getName();
|
||||
|
||||
|
@ -398,6 +427,74 @@ public class LocalDirAllocator {
|
|||
" the configured local directories");
|
||||
}
|
||||
|
||||
private static class PathIterator implements Iterator<Path>, Iterable<Path> {
|
||||
private final FileSystem fs;
|
||||
private final String pathStr;
|
||||
private int i = 0;
|
||||
private final String[] rootDirs;
|
||||
private Path next = null;
|
||||
|
||||
private PathIterator(FileSystem fs, String pathStr, String[] rootDirs)
|
||||
throws IOException {
|
||||
this.fs = fs;
|
||||
this.pathStr = pathStr;
|
||||
this.rootDirs = rootDirs;
|
||||
advance();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return next != null;
|
||||
}
|
||||
|
||||
private void advance() throws IOException {
|
||||
while (i < rootDirs.length) {
|
||||
next = new Path(rootDirs[i++], pathStr);
|
||||
if (fs.exists(next)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
next = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path next() {
|
||||
Path result = next;
|
||||
try {
|
||||
advance();
|
||||
} catch (IOException ie) {
|
||||
throw new RuntimeException("Can't check existance of " + next, ie);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException("read only iterator");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<Path> iterator() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all of the paths that currently exist in the working directories.
|
||||
* @param pathStr the path underneath the roots
|
||||
* @param conf the configuration to look up the roots in
|
||||
* @return all of the paths that exist under any of the roots
|
||||
* @throws IOException
|
||||
*/
|
||||
synchronized Iterable<Path> getAllLocalPathsToRead(String pathStr,
|
||||
Configuration conf) throws IOException {
|
||||
confChanged(conf);
|
||||
if (pathStr.startsWith("/")) {
|
||||
pathStr = pathStr.substring(1);
|
||||
}
|
||||
return new PathIterator(localFS, pathStr, localDirs);
|
||||
}
|
||||
|
||||
/** We search through all the configured dirs for the file's existence
|
||||
* and return true when we find one
|
||||
*/
|
||||
|
|
|
@ -228,10 +228,10 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
public FSDataOutputStream append(Path f, int bufferSize,
|
||||
Progressable progress) throws IOException {
|
||||
if (!exists(f)) {
|
||||
throw new FileNotFoundException("File " + f + " not found.");
|
||||
throw new FileNotFoundException("File " + f + " not found");
|
||||
}
|
||||
if (getFileStatus(f).isDirectory()) {
|
||||
throw new IOException("Cannot append to a diretory (=" + f + " ).");
|
||||
throw new IOException("Cannot append to a diretory (=" + f + " )");
|
||||
}
|
||||
return new FSDataOutputStream(new BufferedOutputStream(
|
||||
new LocalFSFileOutputStream(f, true), bufferSize), statistics);
|
||||
|
@ -242,7 +242,7 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
short replication, long blockSize, Progressable progress)
|
||||
throws IOException {
|
||||
if (exists(f) && !overwrite) {
|
||||
throw new IOException("File already exists:"+f);
|
||||
throw new IOException("File already exists: "+f);
|
||||
}
|
||||
Path parent = f.getParent();
|
||||
if (parent != null && !mkdirs(parent)) {
|
||||
|
@ -271,11 +271,18 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
return FileUtil.copy(this, src, this, dst, true, getConf());
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the given path to a file or directory.
|
||||
* @param p the path to delete
|
||||
* @param recursive to delete sub-directories
|
||||
* @return true if the file or directory and all its contents were deleted
|
||||
* @throws IOException if p is non-empty and recursive is false
|
||||
*/
|
||||
public boolean delete(Path p, boolean recursive) throws IOException {
|
||||
File f = pathToFile(p);
|
||||
if (f.isFile()) {
|
||||
return f.delete();
|
||||
} else if ((!recursive) && f.isDirectory() &&
|
||||
} else if (!recursive && f.isDirectory() &&
|
||||
(FileUtil.listFiles(f).length != 0)) {
|
||||
throw new IOException("Directory " + f.toString() + " is not empty");
|
||||
}
|
||||
|
@ -287,7 +294,7 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
FileStatus[] results;
|
||||
|
||||
if (!localf.exists()) {
|
||||
throw new FileNotFoundException("File " + f + " does not exist.");
|
||||
throw new FileNotFoundException("File " + f + " does not exist");
|
||||
}
|
||||
if (localf.isFile()) {
|
||||
return new FileStatus[] {
|
||||
|
@ -421,7 +428,7 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
if (path.exists()) {
|
||||
return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(), this);
|
||||
} else {
|
||||
throw new FileNotFoundException("File " + f + " does not exist.");
|
||||
throw new FileNotFoundException("File " + f + " does not exist");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,16 +48,12 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
import org.apache.hadoop.conf.ConfServlet;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.http.AdminAuthorizedServlet;
|
||||
import org.apache.hadoop.http.FilterContainer;
|
||||
import org.apache.hadoop.http.FilterInitializer;
|
||||
import org.apache.hadoop.http.HtmlQuoting;
|
||||
import org.apache.hadoop.jmx.JMXJsonServlet;
|
||||
import org.apache.hadoop.log.LogLevel;
|
||||
import org.apache.hadoop.metrics.MetricsServlet;
|
||||
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.mortbay.io.Buffer;
|
||||
|
@ -79,6 +75,8 @@ import org.mortbay.jetty.webapp.WebAppContext;
|
|||
import org.mortbay.thread.QueuedThreadPool;
|
||||
import org.mortbay.util.MultiException;
|
||||
|
||||
import com.sun.jersey.spi.container.servlet.ServletContainer;
|
||||
|
||||
/**
|
||||
* Create a Jetty embedded server to answer http requests. The primary goal
|
||||
* is to serve up status information for the server.
|
||||
|
@ -178,7 +176,7 @@ public class HttpServer implements FilterContainer {
|
|||
|
||||
int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
|
||||
// If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
|
||||
// default value (currently 254).
|
||||
// default value (currently 250).
|
||||
QueuedThreadPool threadPool = maxThreads == -1 ?
|
||||
new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
|
||||
webServer.setThreadPool(threadPool);
|
||||
|
@ -325,6 +323,22 @@ public class HttpServer implements FilterContainer {
|
|||
webAppContext.setAttribute(name, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a Jersey resource package.
|
||||
* @param packageName The Java package name containing the Jersey resource.
|
||||
* @param pathSpec The path spec for the servlet
|
||||
*/
|
||||
public void addJerseyResourcePackage(final String packageName,
|
||||
final String pathSpec) {
|
||||
LOG.info("addJerseyResourcePackage: packageName=" + packageName
|
||||
+ ", pathSpec=" + pathSpec);
|
||||
final ServletHolder sh = new ServletHolder(ServletContainer.class);
|
||||
sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
|
||||
"com.sun.jersey.api.core.PackagesResourceConfig");
|
||||
sh.setInitParameter("com.sun.jersey.config.property.packages", packageName);
|
||||
webAppContext.addServlet(sh, pathSpec);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a servlet in the server.
|
||||
* @param name The name of the servlet (can be passed as null)
|
||||
|
|
|
@ -285,8 +285,8 @@ public class Client {
|
|||
authMethod = AuthMethod.KERBEROS;
|
||||
}
|
||||
|
||||
header = new ConnectionHeader(protocol == null ? null : protocol
|
||||
.getName(), ticket, authMethod);
|
||||
header =
|
||||
new ConnectionHeader(RPC.getProtocolName(protocol), ticket, authMethod);
|
||||
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug("Use " + authMethod + " authentication for protocol "
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
|
||||
|
||||
/**
|
||||
* The protocol name that is used when a client and server connect.
|
||||
* By default the class name of the protocol interface is the protocol name.
|
||||
*
|
||||
* Why override the default name (i.e. the class name)?
|
||||
* One use case overriding the default name (i.e. the class name) is when
|
||||
* there are multiple implementations of the same protocol, each with say a
|
||||
* different version/serialization.
|
||||
* In Hadoop this is used to allow multiple server and client adapters
|
||||
* for different versions of the same protocol service.
|
||||
*/
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
public @interface ProtocolInfo {
|
||||
String protocolName(); // the name of the protocol (i.e. rpc service)
|
||||
}
|
|
@ -62,6 +62,20 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
*/
|
||||
public class RPC {
|
||||
static final Log LOG = LogFactory.getLog(RPC.class);
|
||||
|
||||
|
||||
/**
|
||||
* Get the protocol name.
|
||||
* If the protocol class has a ProtocolAnnotation, then get the protocol
|
||||
* name from the annotation; otherwise the class name is the protocol name.
|
||||
*/
|
||||
static public String getProtocolName(Class<?> protocol) {
|
||||
if (protocol == null) {
|
||||
return null;
|
||||
}
|
||||
ProtocolInfo anno = (ProtocolInfo) protocol.getAnnotation(ProtocolInfo.class);
|
||||
return (anno == null) ? protocol.getName() : anno.protocolName();
|
||||
}
|
||||
|
||||
private RPC() {} // no public ctor
|
||||
|
||||
|
@ -553,8 +567,10 @@ public class RPC {
|
|||
}
|
||||
|
||||
/** Construct a server for a protocol implementation instance. */
|
||||
public static Server getServer(Class<?> protocol,
|
||||
Object instance, String bindAddress, int port,
|
||||
|
||||
public static <PROTO extends VersionedProtocol, IMPL extends PROTO>
|
||||
Server getServer(Class<PROTO> protocol,
|
||||
IMPL instance, String bindAddress, int port,
|
||||
int numHandlers, int numReaders, int queueSizePerHandler,
|
||||
boolean verbose, Configuration conf,
|
||||
SecretManager<? extends TokenIdentifier> secretManager)
|
||||
|
@ -576,6 +592,18 @@ public class RPC {
|
|||
super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler,
|
||||
conf, serverName, secretManager);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a protocol to the existing server.
|
||||
* @param protocolClass - the protocol class
|
||||
* @param protocolImpl - the impl of the protocol that will be called
|
||||
* @return the server (for convenience)
|
||||
*/
|
||||
public <PROTO extends VersionedProtocol, IMPL extends PROTO>
|
||||
Server addProtocol(Class<PROTO> protocolClass, IMPL protocolImpl
|
||||
) throws IOException {
|
||||
throw new IOException("addProtocol Not Implemented");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -900,7 +900,7 @@ public abstract class Server {
|
|||
private InetAddress addr;
|
||||
|
||||
ConnectionHeader header = new ConnectionHeader();
|
||||
Class<?> protocol;
|
||||
String protocolName;
|
||||
boolean useSasl;
|
||||
SaslServer saslServer;
|
||||
private AuthMethod authMethod;
|
||||
|
@ -1287,15 +1287,8 @@ public abstract class Server {
|
|||
DataInputStream in =
|
||||
new DataInputStream(new ByteArrayInputStream(buf));
|
||||
header.readFields(in);
|
||||
try {
|
||||
String protocolClassName = header.getProtocol();
|
||||
if (protocolClassName != null) {
|
||||
protocol = getProtocolClass(header.getProtocol(), conf);
|
||||
rpcDetailedMetrics.init(protocol);
|
||||
}
|
||||
} catch (ClassNotFoundException cnfe) {
|
||||
throw new IOException("Unknown protocol: " + header.getProtocol());
|
||||
}
|
||||
protocolName = header.getProtocol();
|
||||
|
||||
|
||||
UserGroupInformation protocolUser = header.getUgi();
|
||||
if (!useSasl) {
|
||||
|
@ -1484,7 +1477,7 @@ public abstract class Server {
|
|||
// Make the call as the user via Subject.doAs, thus associating
|
||||
// the call with the Subject
|
||||
if (call.connection.user == null) {
|
||||
value = call(call.connection.protocol, call.param,
|
||||
value = call(call.connection.protocolName, call.param,
|
||||
call.timestamp);
|
||||
} else {
|
||||
value =
|
||||
|
@ -1493,7 +1486,7 @@ public abstract class Server {
|
|||
@Override
|
||||
public Writable run() throws Exception {
|
||||
// make the call
|
||||
return call(call.connection.protocol,
|
||||
return call(call.connection.protocolName,
|
||||
call.param, call.timestamp);
|
||||
|
||||
}
|
||||
|
@ -1753,7 +1746,7 @@ public abstract class Server {
|
|||
|
||||
/**
|
||||
* Called for each call.
|
||||
* @deprecated Use {@link #call(Class, Writable, long)} instead
|
||||
* @deprecated Use {@link #call(String, Writable, long)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public Writable call(Writable param, long receiveTime) throws IOException {
|
||||
|
@ -1761,7 +1754,7 @@ public abstract class Server {
|
|||
}
|
||||
|
||||
/** Called for each call. */
|
||||
public abstract Writable call(Class<?> protocol,
|
||||
public abstract Writable call(String protocol,
|
||||
Writable param, long receiveTime)
|
||||
throws IOException;
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ public interface VersionedProtocol {
|
|||
* @return the version that the server will speak
|
||||
* @throws IOException if any IO error occurs
|
||||
*/
|
||||
@Deprecated
|
||||
public long getProtocolVersion(String protocol,
|
||||
long clientVersion) throws IOException;
|
||||
|
||||
|
|
|
@ -27,6 +27,9 @@ import java.lang.reflect.InvocationTargetException;
|
|||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.io.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
|
||||
|
@ -35,6 +38,7 @@ import javax.net.SocketFactory;
|
|||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.hadoop.io.*;
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.SecretManager;
|
||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
|
@ -47,10 +51,46 @@ import org.apache.hadoop.conf.*;
|
|||
public class WritableRpcEngine implements RpcEngine {
|
||||
private static final Log LOG = LogFactory.getLog(RPC.class);
|
||||
|
||||
|
||||
/**
|
||||
* Get all superInterfaces that extend VersionedProtocol
|
||||
* @param childInterfaces
|
||||
* @return the super interfaces that extend VersionedProtocol
|
||||
*/
|
||||
private static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) {
|
||||
List<Class<?>> allInterfaces = new ArrayList<Class<?>>();
|
||||
|
||||
for (Class<?> childInterface : childInterfaces) {
|
||||
if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
|
||||
allInterfaces.add(childInterface);
|
||||
allInterfaces.addAll(
|
||||
Arrays.asList(
|
||||
getSuperInterfaces(childInterface.getInterfaces())));
|
||||
} else {
|
||||
LOG.warn("Interface " + childInterface +
|
||||
" ignored because it does not extend VersionedProtocol");
|
||||
}
|
||||
}
|
||||
return (Class<?>[]) allInterfaces.toArray(new Class[allInterfaces.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all interfaces that the given protocol implements or extends
|
||||
* which are assignable from VersionedProtocol.
|
||||
*/
|
||||
private static Class<?>[] getProtocolInterfaces(Class<?> protocol) {
|
||||
Class<?>[] interfaces = protocol.getInterfaces();
|
||||
return getSuperInterfaces(interfaces);
|
||||
}
|
||||
|
||||
|
||||
//writableRpcVersion should be updated if there is a change
|
||||
//in format of the rpc messages.
|
||||
public static long writableRpcVersion = 1L;
|
||||
|
||||
// 2L - added declared class to Invocation
|
||||
public static final long writableRpcVersion = 2L;
|
||||
|
||||
|
||||
/** A method invocation, including the method name and its parameters.*/
|
||||
private static class Invocation implements Writable, Configurable {
|
||||
private String methodName;
|
||||
|
@ -59,11 +99,13 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
private Configuration conf;
|
||||
private long clientVersion;
|
||||
private int clientMethodsHash;
|
||||
private String declaringClassProtocolName;
|
||||
|
||||
//This could be different from static writableRpcVersion when received
|
||||
//at server, if client is using a different version.
|
||||
private long rpcVersion;
|
||||
|
||||
@SuppressWarnings("unused") // called when deserializing an invocation
|
||||
public Invocation() {}
|
||||
|
||||
public Invocation(Method method, Object[] parameters) {
|
||||
|
@ -88,6 +130,8 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
this.clientMethodsHash = ProtocolSignature.getFingerprint(method
|
||||
.getDeclaringClass().getMethods());
|
||||
}
|
||||
this.declaringClassProtocolName =
|
||||
RPC.getProtocolName(method.getDeclaringClass());
|
||||
}
|
||||
|
||||
/** The name of the method invoked. */
|
||||
|
@ -103,6 +147,7 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
return clientVersion;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
private int getClientMethodsHash() {
|
||||
return clientMethodsHash;
|
||||
}
|
||||
|
@ -115,8 +160,10 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
return rpcVersion;
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
rpcVersion = in.readLong();
|
||||
declaringClassProtocolName = UTF8.readString(in);
|
||||
methodName = UTF8.readString(in);
|
||||
clientVersion = in.readLong();
|
||||
clientMethodsHash = in.readInt();
|
||||
|
@ -124,13 +171,16 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
parameterClasses = new Class[parameters.length];
|
||||
ObjectWritable objectWritable = new ObjectWritable();
|
||||
for (int i = 0; i < parameters.length; i++) {
|
||||
parameters[i] = ObjectWritable.readObject(in, objectWritable, this.conf);
|
||||
parameters[i] =
|
||||
ObjectWritable.readObject(in, objectWritable, this.conf);
|
||||
parameterClasses[i] = objectWritable.getDeclaredClass();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeLong(rpcVersion);
|
||||
UTF8.writeString(out, declaringClassProtocolName);
|
||||
UTF8.writeString(out, methodName);
|
||||
out.writeLong(clientVersion);
|
||||
out.writeInt(clientMethodsHash);
|
||||
|
@ -273,30 +323,161 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
|
||||
/** Construct a server for a protocol implementation instance listening on a
|
||||
* port and address. */
|
||||
public Server getServer(Class<?> protocol,
|
||||
Object instance, String bindAddress, int port,
|
||||
int numHandlers, int numReaders, int queueSizePerHandler,
|
||||
boolean verbose, Configuration conf,
|
||||
public RPC.Server getServer(Class<?> protocolClass,
|
||||
Object protocolImpl, String bindAddress, int port,
|
||||
int numHandlers, int numReaders, int queueSizePerHandler,
|
||||
boolean verbose, Configuration conf,
|
||||
SecretManager<? extends TokenIdentifier> secretManager)
|
||||
throws IOException {
|
||||
return new Server(instance, conf, bindAddress, port, numHandlers,
|
||||
numReaders, queueSizePerHandler, verbose, secretManager);
|
||||
return new Server(protocolClass, protocolImpl, conf, bindAddress, port,
|
||||
numHandlers, numReaders, queueSizePerHandler, verbose, secretManager);
|
||||
}
|
||||
|
||||
|
||||
/** An RPC Server. */
|
||||
public static class Server extends RPC.Server {
|
||||
private Object instance;
|
||||
private boolean verbose;
|
||||
|
||||
/**
|
||||
* The key in Map
|
||||
*/
|
||||
static class ProtoNameVer {
|
||||
final String protocol;
|
||||
final long version;
|
||||
ProtoNameVer(String protocol, long ver) {
|
||||
this.protocol = protocol;
|
||||
this.version = ver;
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o == null)
|
||||
return false;
|
||||
if (this == o)
|
||||
return true;
|
||||
if (! (o instanceof ProtoNameVer))
|
||||
return false;
|
||||
ProtoNameVer pv = (ProtoNameVer) o;
|
||||
return ((pv.protocol.equals(this.protocol)) &&
|
||||
(pv.version == this.version));
|
||||
}
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return protocol.hashCode() * 37 + (int) version;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The value in map
|
||||
*/
|
||||
static class ProtoClassProtoImpl {
|
||||
final Class<?> protocolClass;
|
||||
final Object protocolImpl;
|
||||
ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) {
|
||||
this.protocolClass = protocolClass;
|
||||
this.protocolImpl = protocolImpl;
|
||||
}
|
||||
}
|
||||
|
||||
private Map<ProtoNameVer, ProtoClassProtoImpl> protocolImplMap =
|
||||
new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10);
|
||||
|
||||
// Register protocol and its impl for rpc calls
|
||||
private void registerProtocolAndImpl(Class<?> protocolClass,
|
||||
Object protocolImpl) throws IOException {
|
||||
String protocolName = RPC.getProtocolName(protocolClass);
|
||||
VersionedProtocol vp = (VersionedProtocol) protocolImpl;
|
||||
long version;
|
||||
try {
|
||||
version = vp.getProtocolVersion(protocolName, 0);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Protocol " + protocolClass +
|
||||
" NOT registered as getProtocolVersion throws exception ");
|
||||
return;
|
||||
}
|
||||
protocolImplMap.put(new ProtoNameVer(protocolName, version),
|
||||
new ProtoClassProtoImpl(protocolClass, protocolImpl));
|
||||
LOG.info("ProtocolImpl=" + protocolImpl.getClass().getName() +
|
||||
" protocolClass=" + protocolClass.getName() + " version=" + version);
|
||||
}
|
||||
|
||||
private static class VerProtocolImpl {
|
||||
final long version;
|
||||
final ProtoClassProtoImpl protocolTarget;
|
||||
VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) {
|
||||
this.version = ver;
|
||||
this.protocolTarget = protocolTarget;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings("unused") // will be useful later.
|
||||
private VerProtocolImpl[] getSupportedProtocolVersions(
|
||||
String protocolName) {
|
||||
VerProtocolImpl[] resultk = new VerProtocolImpl[protocolImplMap.size()];
|
||||
int i = 0;
|
||||
for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
|
||||
protocolImplMap.entrySet()) {
|
||||
if (pv.getKey().protocol.equals(protocolName)) {
|
||||
resultk[i++] =
|
||||
new VerProtocolImpl(pv.getKey().version, pv.getValue());
|
||||
}
|
||||
}
|
||||
if (i == 0) {
|
||||
return null;
|
||||
}
|
||||
VerProtocolImpl[] result = new VerProtocolImpl[i];
|
||||
System.arraycopy(resultk, 0, result, 0, i);
|
||||
return result;
|
||||
}
|
||||
|
||||
private VerProtocolImpl getHighestSupportedProtocol(String protocolName) {
|
||||
Long highestVersion = 0L;
|
||||
ProtoClassProtoImpl highest = null;
|
||||
for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : protocolImplMap
|
||||
.entrySet()) {
|
||||
if (pv.getKey().protocol.equals(protocolName)) {
|
||||
if ((highest == null) || (pv.getKey().version > highestVersion)) {
|
||||
highest = pv.getValue();
|
||||
highestVersion = pv.getKey().version;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (highest == null) {
|
||||
return null;
|
||||
}
|
||||
return new VerProtocolImpl(highestVersion, highest);
|
||||
}
|
||||
|
||||
|
||||
/** Construct an RPC server.
|
||||
* @param instance the instance whose methods will be called
|
||||
* @param conf the configuration to use
|
||||
* @param bindAddress the address to bind on to listen for connection
|
||||
* @param port the port to listen for connections on
|
||||
*
|
||||
* @deprecated Use #Server(Class, Object, Configuration, String, int)
|
||||
*
|
||||
*/
|
||||
public Server(Object instance, Configuration conf, String bindAddress, int port)
|
||||
@Deprecated
|
||||
public Server(Object instance, Configuration conf, String bindAddress,
|
||||
int port)
|
||||
throws IOException {
|
||||
this(instance, conf, bindAddress, port, 1, -1, -1, false, null);
|
||||
this(null, instance, conf, bindAddress, port);
|
||||
}
|
||||
|
||||
|
||||
/** Construct an RPC server.
|
||||
* @param protocol class
|
||||
* @param instance the instance whose methods will be called
|
||||
* @param conf the configuration to use
|
||||
* @param bindAddress the address to bind on to listen for connection
|
||||
* @param port the port to listen for connections on
|
||||
*/
|
||||
public Server(Class<?> protocolClass, Object protocolImpl,
|
||||
Configuration conf, String bindAddress, int port)
|
||||
throws IOException {
|
||||
this(protocolClass, protocolImpl, conf, bindAddress, port, 1, -1, -1,
|
||||
false, null);
|
||||
}
|
||||
|
||||
private static String classNameBase(String className) {
|
||||
|
@ -307,35 +488,103 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
return names[names.length-1];
|
||||
}
|
||||
|
||||
|
||||
/** Construct an RPC server.
|
||||
* @param instance the instance whose methods will be called
|
||||
* @param protocolImpl the instance whose methods will be called
|
||||
* @param conf the configuration to use
|
||||
* @param bindAddress the address to bind on to listen for connection
|
||||
* @param port the port to listen for connections on
|
||||
* @param numHandlers the number of method handler threads to run
|
||||
* @param verbose whether each call should be logged
|
||||
*
|
||||
* @deprecated use Server#Server(Class, Object,
|
||||
* Configuration, String, int, int, int, int, boolean, SecretManager)
|
||||
*/
|
||||
@Deprecated
|
||||
public Server(Object protocolImpl, Configuration conf, String bindAddress,
|
||||
int port, int numHandlers, int numReaders, int queueSizePerHandler,
|
||||
boolean verbose, SecretManager<? extends TokenIdentifier> secretManager)
|
||||
throws IOException {
|
||||
this(null, protocolImpl, conf, bindAddress, port,
|
||||
numHandlers, numReaders, queueSizePerHandler, verbose,
|
||||
secretManager);
|
||||
|
||||
}
|
||||
|
||||
/** Construct an RPC server.
|
||||
* @param protocolClass - the protocol being registered
|
||||
* can be null for compatibility with old usage (see below for details)
|
||||
* @param protocolImpl the protocol impl that will be called
|
||||
* @param conf the configuration to use
|
||||
* @param bindAddress the address to bind on to listen for connection
|
||||
* @param port the port to listen for connections on
|
||||
* @param numHandlers the number of method handler threads to run
|
||||
* @param verbose whether each call should be logged
|
||||
*/
|
||||
public Server(Object instance, Configuration conf, String bindAddress, int port,
|
||||
int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose,
|
||||
SecretManager<? extends TokenIdentifier> secretManager)
|
||||
public Server(Class<?> protocolClass, Object protocolImpl,
|
||||
Configuration conf, String bindAddress, int port,
|
||||
int numHandlers, int numReaders, int queueSizePerHandler,
|
||||
boolean verbose, SecretManager<? extends TokenIdentifier> secretManager)
|
||||
throws IOException {
|
||||
super(bindAddress, port, Invocation.class, numHandlers, numReaders,
|
||||
queueSizePerHandler, conf,
|
||||
classNameBase(instance.getClass().getName()), secretManager);
|
||||
this.instance = instance;
|
||||
classNameBase(protocolImpl.getClass().getName()), secretManager);
|
||||
|
||||
this.verbose = verbose;
|
||||
|
||||
|
||||
Class<?>[] protocols;
|
||||
if (protocolClass == null) { // derive protocol from impl
|
||||
/*
|
||||
* In order to remain compatible with the old usage where a single
|
||||
* target protocolImpl is suppled for all protocol interfaces, and
|
||||
* the protocolImpl is derived from the protocolClass(es)
|
||||
* we register all interfaces extended by the protocolImpl
|
||||
*/
|
||||
protocols = getProtocolInterfaces(protocolImpl.getClass());
|
||||
|
||||
} else {
|
||||
if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) {
|
||||
throw new IOException("protocolClass "+ protocolClass +
|
||||
" is not implemented by protocolImpl which is of class " +
|
||||
protocolImpl.getClass());
|
||||
}
|
||||
// register protocol class and its super interfaces
|
||||
registerProtocolAndImpl(protocolClass, protocolImpl);
|
||||
protocols = getProtocolInterfaces(protocolClass);
|
||||
}
|
||||
for (Class<?> p : protocols) {
|
||||
if (!p.equals(VersionedProtocol.class)) {
|
||||
registerProtocolAndImpl(p, protocolImpl);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public Writable call(Class<?> protocol, Writable param, long receivedTime)
|
||||
|
||||
@Override
|
||||
public <PROTO extends VersionedProtocol, IMPL extends PROTO> Server
|
||||
addProtocol(
|
||||
Class<PROTO> protocolClass, IMPL protocolImpl) throws IOException {
|
||||
registerProtocolAndImpl(protocolClass, protocolImpl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a client call
|
||||
* @param protocolName - the protocol name (the class of the client proxy
|
||||
* used to make calls to the rpc server.
|
||||
* @param param parameters
|
||||
* @param receivedTime time at which the call receoved (for metrics)
|
||||
* @return the call's return
|
||||
* @throws IOException
|
||||
*/
|
||||
public Writable call(String protocolName, Writable param, long receivedTime)
|
||||
throws IOException {
|
||||
try {
|
||||
Invocation call = (Invocation)param;
|
||||
if (verbose) log("Call: " + call);
|
||||
|
||||
Method method = protocol.getMethod(call.getMethodName(),
|
||||
call.getParameterClasses());
|
||||
method.setAccessible(true);
|
||||
|
||||
// Verify rpc version
|
||||
if (call.getRpcVersion() != writableRpcVersion) {
|
||||
// Client is using a different version of WritableRpc
|
||||
|
@ -344,25 +593,51 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
+ call.getRpcVersion() + ", server side version="
|
||||
+ writableRpcVersion);
|
||||
}
|
||||
|
||||
//Verify protocol version.
|
||||
//Bypass the version check for VersionedProtocol
|
||||
if (!method.getDeclaringClass().equals(VersionedProtocol.class)) {
|
||||
long clientVersion = call.getProtocolVersion();
|
||||
ProtocolSignature serverInfo = ((VersionedProtocol) instance)
|
||||
.getProtocolSignature(protocol.getCanonicalName(), call
|
||||
.getProtocolVersion(), call.getClientMethodsHash());
|
||||
long serverVersion = serverInfo.getVersion();
|
||||
if (serverVersion != clientVersion) {
|
||||
LOG.warn("Version mismatch: client version=" + clientVersion
|
||||
+ ", server version=" + serverVersion);
|
||||
throw new RPC.VersionMismatch(protocol.getName(), clientVersion,
|
||||
serverVersion);
|
||||
|
||||
long clientVersion = call.getProtocolVersion();
|
||||
final String protoName;
|
||||
ProtoClassProtoImpl protocolImpl;
|
||||
if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) {
|
||||
// VersionProtocol methods are often used by client to figure out
|
||||
// which version of protocol to use.
|
||||
//
|
||||
// Versioned protocol methods should go the protocolName protocol
|
||||
// rather than the declaring class of the method since the
|
||||
// the declaring class is VersionedProtocol which is not
|
||||
// registered directly.
|
||||
// Send the call to the highest protocol version
|
||||
protocolImpl =
|
||||
getHighestSupportedProtocol(protocolName).protocolTarget;
|
||||
} else {
|
||||
protoName = call.declaringClassProtocolName;
|
||||
|
||||
// Find the right impl for the protocol based on client version.
|
||||
ProtoNameVer pv =
|
||||
new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
|
||||
protocolImpl = protocolImplMap.get(pv);
|
||||
if (protocolImpl == null) { // no match for Protocol AND Version
|
||||
VerProtocolImpl highest =
|
||||
getHighestSupportedProtocol(protoName);
|
||||
if (highest == null) {
|
||||
throw new IOException("Unknown protocol: " + protoName);
|
||||
} else { // protocol supported but not the version that client wants
|
||||
throw new RPC.VersionMismatch(protoName, clientVersion,
|
||||
highest.version);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Invoke the protocol method
|
||||
|
||||
long startTime = System.currentTimeMillis();
|
||||
Object value = method.invoke(instance, call.getParameters());
|
||||
Method method =
|
||||
protocolImpl.protocolClass.getMethod(call.getMethodName(),
|
||||
call.getParameterClasses());
|
||||
method.setAccessible(true);
|
||||
rpcDetailedMetrics.init(protocolImpl.protocolClass);
|
||||
Object value =
|
||||
method.invoke(protocolImpl.protocolImpl, call.getParameters());
|
||||
int processingTime = (int) (System.currentTimeMillis() - startTime);
|
||||
int qTime = (int) (startTime-receivedTime);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
|
|
@ -132,6 +132,12 @@ public class GangliaContext extends AbstractMetricsContext {
|
|||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(contextName);
|
||||
sb.append('.');
|
||||
|
||||
if (contextName.equals("jvm") && outRec.getTag("processName") != null) {
|
||||
sb.append(outRec.getTag("processName"));
|
||||
sb.append('.');
|
||||
}
|
||||
|
||||
sb.append(recordName);
|
||||
sb.append('.');
|
||||
int sbBaseLen = sb.length();
|
||||
|
|
|
@ -20,13 +20,21 @@ package org.apache.hadoop.metrics2.sink.ganglia;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.configuration.SubsetConfiguration;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.AbstractMetric;
|
||||
import org.apache.hadoop.metrics2.MetricsException;
|
||||
import org.apache.hadoop.metrics2.MetricsRecord;
|
||||
import org.apache.hadoop.metrics2.MetricsTag;
|
||||
import org.apache.hadoop.metrics2.impl.MsInfo;
|
||||
import org.apache.hadoop.metrics2.util.MetricsCache;
|
||||
import org.apache.hadoop.metrics2.util.MetricsCache.Record;
|
||||
|
||||
|
@ -38,8 +46,67 @@ public class GangliaSink30 extends AbstractGangliaSink {
|
|||
|
||||
public final Log LOG = LogFactory.getLog(this.getClass());
|
||||
|
||||
private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";
|
||||
|
||||
private MetricsCache metricsCache = new MetricsCache();
|
||||
|
||||
// a key with a NULL value means ALL
|
||||
private Map<String,Set<String>> useTagsMap = new HashMap<String,Set<String>>();
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void init(SubsetConfiguration conf) {
|
||||
super.init(conf);
|
||||
|
||||
conf.setListDelimiter(',');
|
||||
Iterator<String> it = (Iterator<String>) conf.getKeys();
|
||||
while (it.hasNext()) {
|
||||
String propertyName = it.next();
|
||||
if (propertyName.startsWith(TAGS_FOR_PREFIX_PROPERTY_PREFIX)) {
|
||||
String contextName = propertyName.substring(TAGS_FOR_PREFIX_PROPERTY_PREFIX.length());
|
||||
String[] tags = conf.getStringArray(propertyName);
|
||||
boolean useAllTags = false;
|
||||
Set<String> set = null;
|
||||
if (tags.length > 0) {
|
||||
set = new HashSet<String>();
|
||||
for (String tag : tags) {
|
||||
tag = tag.trim();
|
||||
useAllTags |= tag.equals("*");
|
||||
if (tag.length() > 0) {
|
||||
set.add(tag);
|
||||
}
|
||||
}
|
||||
if (useAllTags) {
|
||||
set = null;
|
||||
}
|
||||
}
|
||||
useTagsMap.put(contextName, set);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public void appendPrefix(MetricsRecord record, StringBuilder sb) {
|
||||
String contextName = record.context();
|
||||
Collection<MetricsTag> tags = record.tags();
|
||||
if (useTagsMap.containsKey(contextName)) {
|
||||
Set<String> useTags = useTagsMap.get(contextName);
|
||||
for (MetricsTag t : tags) {
|
||||
if (useTags == null || useTags.contains(t.name())) {
|
||||
|
||||
// the context is always skipped here because it is always added
|
||||
|
||||
// the hostname is always skipped to avoid case-mismatches
|
||||
// from different DNSes.
|
||||
|
||||
if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
|
||||
sb.append('.').append(t.name()).append('=').append(t.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putMetrics(MetricsRecord record) {
|
||||
// The method handles both cases whether Ganglia support dense publish
|
||||
|
@ -53,6 +120,8 @@ public class GangliaSink30 extends AbstractGangliaSink {
|
|||
sb.append('.');
|
||||
sb.append(recordName);
|
||||
|
||||
appendPrefix(record, sb);
|
||||
|
||||
String groupName = sb.toString();
|
||||
sb.append('.');
|
||||
int sbBaseLen = sb.length();
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import org.apache.hadoop.alfredo.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.FilterContainer;
|
||||
import org.apache.hadoop.http.FilterInitializer;
|
||||
|
|
|
@ -23,7 +23,7 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.alfredo.util.KerberosName;
|
||||
import org.apache.hadoop.security.authentication.util.KerberosName;
|
||||
|
||||
import sun.security.krb5.Config;
|
||||
import sun.security.krb5.KrbException;
|
||||
|
|
|
@ -158,10 +158,7 @@ public class RunJar {
|
|||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
public void run() {
|
||||
try {
|
||||
FileUtil.fullyDelete(workDir);
|
||||
} catch (IOException e) {
|
||||
}
|
||||
FileUtil.fullyDelete(workDir);
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -317,6 +317,11 @@
|
|||
<value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.webhdfs.impl</name>
|
||||
<value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.ftp.impl</name>
|
||||
<value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.junit.After;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mortbay.log.Log;
|
||||
|
||||
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
||||
|
||||
|
@ -62,8 +63,6 @@ public abstract class FSMainOperationsBaseTest {
|
|||
private static String TEST_DIR_AXX = "test/hadoop/axx";
|
||||
private static int numBlocks = 2;
|
||||
|
||||
static final String LOCAL_FS_ROOT_URI = "file:///tmp/test";
|
||||
|
||||
|
||||
protected static FileSystem fSys;
|
||||
|
||||
|
@ -83,7 +82,7 @@ public abstract class FSMainOperationsBaseTest {
|
|||
}
|
||||
};
|
||||
|
||||
private static byte[] data = getFileData(numBlocks,
|
||||
protected static final byte[] data = getFileData(numBlocks,
|
||||
getDefaultBlockSize());
|
||||
|
||||
@Before
|
||||
|
@ -183,7 +182,7 @@ public abstract class FSMainOperationsBaseTest {
|
|||
|
||||
@Test
|
||||
public void testWDAbsolute() throws IOException {
|
||||
Path absoluteDir = new Path(LOCAL_FS_ROOT_URI + "/existingDir");
|
||||
Path absoluteDir = new Path(fSys.getUri() + "/test/existingDir");
|
||||
fSys.mkdirs(absoluteDir);
|
||||
fSys.setWorkingDirectory(absoluteDir);
|
||||
Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
|
||||
|
@ -646,7 +645,7 @@ public abstract class FSMainOperationsBaseTest {
|
|||
writeReadAndDelete(getDefaultBlockSize() * 2);
|
||||
}
|
||||
|
||||
private void writeReadAndDelete(int len) throws IOException {
|
||||
protected void writeReadAndDelete(int len) throws IOException {
|
||||
Path path = getTestRootPath(fSys, "test/hadoop/file");
|
||||
|
||||
fSys.mkdirs(path.getParent());
|
||||
|
@ -768,6 +767,7 @@ public abstract class FSMainOperationsBaseTest {
|
|||
rename(src, dst, false, false, false, Rename.NONE);
|
||||
Assert.fail("Should throw FileNotFoundException");
|
||||
} catch (IOException e) {
|
||||
Log.info("XXX", e);
|
||||
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ import org.apache.hadoop.fs.Path;
|
|||
public abstract class FileSystemContractBaseTest extends TestCase {
|
||||
|
||||
protected FileSystem fs;
|
||||
private byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
|
||||
protected byte[] data = new byte[getBlockSize() * 2]; // two blocks of data
|
||||
{
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
data[i] = (byte) (i % 10);
|
||||
|
@ -215,7 +215,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
|
|||
writeReadAndDelete(getBlockSize() * 2);
|
||||
}
|
||||
|
||||
private void writeReadAndDelete(int len) throws IOException {
|
||||
protected void writeReadAndDelete(int len) throws IOException {
|
||||
Path path = path("/test/hadoop/file");
|
||||
|
||||
fs.mkdirs(path.getParent());
|
||||
|
@ -256,7 +256,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
|
|||
assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
|
||||
|
||||
try {
|
||||
fs.create(path, false);
|
||||
fs.create(path, false).close();
|
||||
fail("Should throw IOException.");
|
||||
} catch (IOException e) {
|
||||
// Expected
|
||||
|
|
|
@ -17,16 +17,15 @@
|
|||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.net.URI;
|
||||
import java.util.Random;
|
||||
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.junit.Assert;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* Helper class for unit tests.
|
||||
|
@ -143,23 +142,33 @@ public final class FileSystemTestHelper {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public static void writeFile(FileSystem fSys, Path path,byte b[])
|
||||
throws Exception {
|
||||
FSDataOutputStream out =
|
||||
fSys.create(path);
|
||||
out.write(b);
|
||||
out.close();
|
||||
static String writeFile(FileSystem fileSys, Path name, int fileSize)
|
||||
throws IOException {
|
||||
final long seed = 0xDEADBEEFL;
|
||||
// Create and write a file that contains three blocks of data
|
||||
FSDataOutputStream stm = fileSys.create(name);
|
||||
byte[] buffer = new byte[fileSize];
|
||||
Random rand = new Random(seed);
|
||||
rand.nextBytes(buffer);
|
||||
stm.write(buffer);
|
||||
stm.close();
|
||||
return new String(buffer);
|
||||
}
|
||||
|
||||
public static byte[] readFile(FileSystem fSys, Path path, int len )
|
||||
throws Exception {
|
||||
DataInputStream dis = fSys.open(path);
|
||||
byte[] buffer = new byte[len];
|
||||
IOUtils.readFully(dis, buffer, 0, len);
|
||||
dis.close();
|
||||
return buffer;
|
||||
static String readFile(FileSystem fs, Path name, int buflen)
|
||||
throws IOException {
|
||||
byte[] b = new byte[buflen];
|
||||
int offset = 0;
|
||||
FSDataInputStream in = fs.open(name);
|
||||
for (int remaining, n;
|
||||
(remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
|
||||
offset += n);
|
||||
assertEquals(offset, Math.min(b.length, in.getPos()));
|
||||
in.close();
|
||||
String s = new String(b, 0, offset);
|
||||
return s;
|
||||
}
|
||||
|
||||
public static FileStatus containsPath(FileSystem fSys, Path path,
|
||||
FileStatus[] dirList)
|
||||
throws IOException {
|
||||
|
|
|
@ -18,10 +18,9 @@
|
|||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
|
@ -56,13 +55,13 @@ public class TestChecksumFileSystem extends TestCase {
|
|||
|
||||
// Exercise some boundary cases - a divisor of the chunk size
|
||||
// the chunk size, 2x chunk size, and +/-1 around these.
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 128);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 511);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 512);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 513);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 1023);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 1024);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 1025);
|
||||
readFile(localFs, testPath, 128);
|
||||
readFile(localFs, testPath, 511);
|
||||
readFile(localFs, testPath, 512);
|
||||
readFile(localFs, testPath, 513);
|
||||
readFile(localFs, testPath, 1023);
|
||||
readFile(localFs, testPath, 1024);
|
||||
readFile(localFs, testPath, 1025);
|
||||
|
||||
localFs.delete(localFs.getChecksumFile(testPath), true);
|
||||
assertTrue("checksum deleted", !localFs.exists(localFs.getChecksumFile(testPath)));
|
||||
|
@ -74,7 +73,7 @@ public class TestChecksumFileSystem extends TestCase {
|
|||
|
||||
boolean errorRead = false;
|
||||
try {
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 1024);
|
||||
readFile(localFs, testPath, 1024);
|
||||
}catch(ChecksumException ie) {
|
||||
errorRead = true;
|
||||
}
|
||||
|
@ -83,7 +82,7 @@ public class TestChecksumFileSystem extends TestCase {
|
|||
//now setting verify false, the read should succeed
|
||||
try {
|
||||
localFs.setVerifyChecksum(false);
|
||||
String str = TestLocalFileSystem.readFile(localFs, testPath, 1024);
|
||||
String str = readFile(localFs, testPath, 1024).toString();
|
||||
assertTrue("read", "testing".equals(str));
|
||||
} finally {
|
||||
// reset for other tests
|
||||
|
@ -104,13 +103,13 @@ public class TestChecksumFileSystem extends TestCase {
|
|||
|
||||
// Exercise some boundary cases - a divisor of the chunk size
|
||||
// the chunk size, 2x chunk size, and +/-1 around these.
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 128);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 511);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 512);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 513);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 1023);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 1024);
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 1025);
|
||||
readFile(localFs, testPath, 128);
|
||||
readFile(localFs, testPath, 511);
|
||||
readFile(localFs, testPath, 512);
|
||||
readFile(localFs, testPath, 513);
|
||||
readFile(localFs, testPath, 1023);
|
||||
readFile(localFs, testPath, 1024);
|
||||
readFile(localFs, testPath, 1025);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -140,7 +139,7 @@ public class TestChecksumFileSystem extends TestCase {
|
|||
|
||||
// Now reading the file should fail with a ChecksumException
|
||||
try {
|
||||
TestLocalFileSystem.readFile(localFs, testPath, 1024);
|
||||
readFile(localFs, testPath, 1024);
|
||||
fail("Did not throw a ChecksumException when reading truncated " +
|
||||
"crc file");
|
||||
} catch(ChecksumException ie) {
|
||||
|
@ -149,7 +148,7 @@ public class TestChecksumFileSystem extends TestCase {
|
|||
// telling it not to verify checksums, should avoid issue.
|
||||
try {
|
||||
localFs.setVerifyChecksum(false);
|
||||
String str = TestLocalFileSystem.readFile(localFs, testPath, 1024);
|
||||
String str = readFile(localFs, testPath, 1024).toString();
|
||||
assertTrue("read", "testing truncation".equals(str));
|
||||
} finally {
|
||||
// reset for other tests
|
||||
|
|
|
@ -29,7 +29,7 @@ public class TestDU extends TestCase {
|
|||
final static private File DU_DIR = new File(
|
||||
System.getProperty("test.build.data","/tmp"), "dutmp");
|
||||
|
||||
public void setUp() throws IOException {
|
||||
public void setUp() {
|
||||
FileUtil.fullyDelete(DU_DIR);
|
||||
assertTrue(DU_DIR.mkdirs());
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ public class TestHardLink {
|
|||
* @throws IOException
|
||||
*/
|
||||
@BeforeClass
|
||||
public static void setupClean() throws IOException {
|
||||
public static void setupClean() {
|
||||
//delete source and target directories if they exist
|
||||
FileUtil.fullyDelete(src);
|
||||
FileUtil.fullyDelete(tgt_one);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -208,4 +209,33 @@ public class TestLocalDirAllocator extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
|
||||
* The second dir exists & is RW
|
||||
* getLocalPathForWrite with checkAccess set to false should create a parent
|
||||
* directory. With checkAccess true, the directory should not be created.
|
||||
* @throws Exception
|
||||
*/
|
||||
public void testLocalPathForWriteDirCreation() throws IOException {
|
||||
try {
|
||||
conf.set(CONTEXT, BUFFER_DIR[0] + "," + BUFFER_DIR[1]);
|
||||
assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
|
||||
BUFFER_ROOT.setReadOnly();
|
||||
Path p1 =
|
||||
dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
|
||||
assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
|
||||
|
||||
Path p2 =
|
||||
dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
|
||||
false);
|
||||
try {
|
||||
localFs.getFileStatus(p2.getParent());
|
||||
} catch (Exception e) {
|
||||
assertEquals(e.getClass(), FileNotFoundException.class);
|
||||
}
|
||||
} finally {
|
||||
Shell.execCommand(new String[] { "chmod", "u+w", BUFFER_DIR_ROOT });
|
||||
rmBufferDirs();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,37 +18,23 @@
|
|||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
||||
|
||||
import java.io.*;
|
||||
import junit.framework.*;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* This class tests the local file system via the FileSystem abstraction.
|
||||
*/
|
||||
public class TestLocalFileSystem extends TestCase {
|
||||
public class TestLocalFileSystem {
|
||||
private static String TEST_ROOT_DIR
|
||||
= System.getProperty("test.build.data","build/test/data/work-dir/localfs");
|
||||
|
||||
|
||||
static void writeFile(FileSystem fs, Path name) throws IOException {
|
||||
FSDataOutputStream stm = fs.create(name);
|
||||
stm.writeBytes("42\n");
|
||||
stm.close();
|
||||
}
|
||||
|
||||
static String readFile(FileSystem fs, Path name, int buflen) throws IOException {
|
||||
byte[] b = new byte[buflen];
|
||||
int offset = 0;
|
||||
FSDataInputStream in = fs.open(name);
|
||||
for(int remaining, n;
|
||||
(remaining = b.length - offset) > 0 && (n = in.read(b, offset, remaining)) != -1;
|
||||
offset += n);
|
||||
assertEquals(offset, Math.min(b.length, in.getPos()));
|
||||
in.close();
|
||||
|
||||
String s = new String(b, 0, offset);
|
||||
System.out.println("s=" + s);
|
||||
return s;
|
||||
}
|
||||
private Configuration conf;
|
||||
private FileSystem fileSys;
|
||||
|
||||
private void cleanupFile(FileSystem fs, Path name) throws IOException {
|
||||
assertTrue(fs.exists(name));
|
||||
|
@ -56,12 +42,18 @@ public class TestLocalFileSystem extends TestCase {
|
|||
assertTrue(!fs.exists(name));
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
conf = new Configuration();
|
||||
fileSys = FileSystem.getLocal(conf);
|
||||
fileSys.delete(new Path(TEST_ROOT_DIR), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the capability of setting the working directory.
|
||||
*/
|
||||
@Test
|
||||
public void testWorkingDirectory() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fileSys = FileSystem.getLocal(conf);
|
||||
Path origDir = fileSys.getWorkingDirectory();
|
||||
Path subdir = new Path(TEST_ROOT_DIR, "new");
|
||||
try {
|
||||
|
@ -85,7 +77,7 @@ public class TestLocalFileSystem extends TestCase {
|
|||
// create files and manipulate them.
|
||||
Path file1 = new Path("file1");
|
||||
Path file2 = new Path("sub/file2");
|
||||
writeFile(fileSys, file1);
|
||||
String contents = writeFile(fileSys, file1, 1);
|
||||
fileSys.copyFromLocalFile(file1, file2);
|
||||
assertTrue(fileSys.exists(file1));
|
||||
assertTrue(fileSys.isFile(file1));
|
||||
|
@ -103,11 +95,10 @@ public class TestLocalFileSystem extends TestCase {
|
|||
InputStream stm = fileSys.open(file1);
|
||||
byte[] buffer = new byte[3];
|
||||
int bytesRead = stm.read(buffer, 0, 3);
|
||||
assertEquals("42\n", new String(buffer, 0, bytesRead));
|
||||
assertEquals(contents, new String(buffer, 0, bytesRead));
|
||||
stm.close();
|
||||
} finally {
|
||||
fileSys.setWorkingDirectory(origDir);
|
||||
fileSys.delete(subdir, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,6 +106,7 @@ public class TestLocalFileSystem extends TestCase {
|
|||
* test Syncable interface on raw local file system
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testSyncable() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fs = FileSystem.getLocal(conf).getRawFileSystem();
|
||||
|
@ -148,12 +140,13 @@ public class TestLocalFileSystem extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCopy() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
LocalFileSystem fs = FileSystem.getLocal(conf);
|
||||
Path src = new Path(TEST_ROOT_DIR, "dingo");
|
||||
Path dst = new Path(TEST_ROOT_DIR, "yak");
|
||||
writeFile(fs, src);
|
||||
writeFile(fs, src, 1);
|
||||
assertTrue(FileUtil.copy(fs, src, fs, dst, true, false, conf));
|
||||
assertTrue(!fs.exists(src) && fs.exists(dst));
|
||||
assertTrue(FileUtil.copy(fs, dst, fs, src, false, false, conf));
|
||||
|
@ -170,9 +163,12 @@ public class TestLocalFileSystem extends TestCase {
|
|||
try {
|
||||
FileUtil.copy(fs, dst, fs, src, true, true, conf);
|
||||
fail("Failed to detect existing dir");
|
||||
} catch (IOException e) { }
|
||||
} catch (IOException e) {
|
||||
// Expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHomeDirectory() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fileSys = FileSystem.getLocal(conf);
|
||||
|
@ -182,16 +178,18 @@ public class TestLocalFileSystem extends TestCase {
|
|||
assertEquals(home, fsHome);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPathEscapes() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
FileSystem fs = FileSystem.getLocal(conf);
|
||||
Path path = new Path(TEST_ROOT_DIR, "foo%bar");
|
||||
writeFile(fs, path);
|
||||
writeFile(fs, path, 1);
|
||||
FileStatus status = fs.getFileStatus(path);
|
||||
assertEquals(path.makeQualified(fs), status.getPath());
|
||||
cleanupFile(fs, path);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMkdirs() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
LocalFileSystem fs = FileSystem.getLocal(conf);
|
||||
|
@ -199,18 +197,40 @@ public class TestLocalFileSystem extends TestCase {
|
|||
Path test_file = new Path(TEST_ROOT_DIR, "file1");
|
||||
assertTrue(fs.mkdirs(test_dir));
|
||||
|
||||
writeFile(fs, test_file);
|
||||
writeFile(fs, test_file, 1);
|
||||
// creating dir over a file
|
||||
Path bad_dir = new Path(test_file, "another_dir");
|
||||
|
||||
try {
|
||||
fs.mkdirs(bad_dir);
|
||||
fail("Failed to detect existing file in path");
|
||||
} catch (FileAlreadyExistsException e) { }
|
||||
} catch (FileAlreadyExistsException e) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
try {
|
||||
fs.mkdirs(null);
|
||||
fail("Failed to detect null in mkdir arg");
|
||||
} catch (IllegalArgumentException e) { }
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Expected
|
||||
}
|
||||
}
|
||||
|
||||
/** Test deleting a file, directory, and non-existent path */
|
||||
@Test
|
||||
public void testBasicDelete() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
LocalFileSystem fs = FileSystem.getLocal(conf);
|
||||
Path dir1 = new Path(TEST_ROOT_DIR, "dir1");
|
||||
Path file1 = new Path(TEST_ROOT_DIR, "file1");
|
||||
Path file2 = new Path(TEST_ROOT_DIR+"/dir1", "file2");
|
||||
Path file3 = new Path(TEST_ROOT_DIR, "does-not-exist");
|
||||
assertTrue(fs.mkdirs(dir1));
|
||||
writeFile(fs, file1, 1);
|
||||
writeFile(fs, file2, 1);
|
||||
assertFalse("Returned true deleting non-existant path",
|
||||
fs.delete(file3));
|
||||
assertTrue("Did not delete file", fs.delete(file1));
|
||||
assertTrue("Did not delete non-empty dir", fs.delete(dir1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.hadoop.fs;
|
|||
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
|
||||
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
|
@ -42,14 +42,6 @@ public class TestTrash extends TestCase {
|
|||
new Path(new File(System.getProperty("test.build.data","/tmp")
|
||||
).toURI().toString().replace(' ', '+'), "testTrash");
|
||||
|
||||
protected static Path writeFile(FileSystem fs, Path f) throws IOException {
|
||||
DataOutputStream out = fs.create(f);
|
||||
out.writeBytes("dhruba: " + f);
|
||||
out.close();
|
||||
assertTrue(fs.exists(f));
|
||||
return f;
|
||||
}
|
||||
|
||||
protected static Path mkdir(FileSystem fs, Path p) throws IOException {
|
||||
assertTrue(fs.mkdirs(p));
|
||||
assertTrue(fs.exists(p));
|
||||
|
@ -139,7 +131,7 @@ public class TestTrash extends TestCase {
|
|||
|
||||
// Second, create a file in that directory.
|
||||
Path myFile = new Path(base, "test/mkdirs/myFile");
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
// Verify that expunge without Trash directory
|
||||
// won't throw Exception
|
||||
|
@ -176,7 +168,7 @@ public class TestTrash extends TestCase {
|
|||
}
|
||||
|
||||
// Verify that we can recreate the file
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
// Verify that we succeed in removing the file we re-created
|
||||
{
|
||||
|
@ -194,7 +186,7 @@ public class TestTrash extends TestCase {
|
|||
}
|
||||
|
||||
// Verify that we can recreate the file
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
// Verify that we succeed in removing the whole directory
|
||||
// along with the file inside it.
|
||||
|
@ -234,7 +226,7 @@ public class TestTrash extends TestCase {
|
|||
{
|
||||
Path toErase = new Path(trashRoot, "toErase");
|
||||
int retVal = -1;
|
||||
writeFile(trashRootFs, toErase);
|
||||
writeFile(trashRootFs, toErase, 10);
|
||||
try {
|
||||
retVal = shell.run(new String[] {"-rm", toErase.toString()});
|
||||
} catch (Exception e) {
|
||||
|
@ -265,7 +257,7 @@ public class TestTrash extends TestCase {
|
|||
|
||||
// recreate directory and file
|
||||
mkdir(fs, myPath);
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
// remove file first, then remove directory
|
||||
{
|
||||
|
@ -316,7 +308,7 @@ public class TestTrash extends TestCase {
|
|||
|
||||
// recreate directory and file
|
||||
mkdir(fs, myPath);
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
// Verify that skip trash option really skips the trash for files (rm)
|
||||
{
|
||||
|
@ -346,7 +338,7 @@ public class TestTrash extends TestCase {
|
|||
|
||||
// recreate directory and file
|
||||
mkdir(fs, myPath);
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
// Verify that skip trash option really skips the trash for rmr
|
||||
{
|
||||
|
@ -392,7 +384,7 @@ public class TestTrash extends TestCase {
|
|||
for(int i=0;i<num_runs; i++) {
|
||||
|
||||
//create file
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
// delete file
|
||||
try {
|
||||
|
@ -452,8 +444,7 @@ public class TestTrash extends TestCase {
|
|||
lfs.delete(p, true);
|
||||
}
|
||||
try {
|
||||
f = writeFile(lfs, f);
|
||||
|
||||
writeFile(lfs, f, 10);
|
||||
FileSystem.closeAll();
|
||||
FileSystem localFs = FileSystem.get(URI.create("file:///"), conf);
|
||||
Trash lTrash = new Trash(localFs, conf);
|
||||
|
@ -515,7 +506,7 @@ public class TestTrash extends TestCase {
|
|||
while (true) {
|
||||
// Create a file with a new name
|
||||
Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
// Delete the file to trash
|
||||
String[] args = new String[2];
|
||||
|
@ -606,7 +597,7 @@ public class TestTrash extends TestCase {
|
|||
int iters = 1000;
|
||||
for(int i=0;i<iters; i++) {
|
||||
|
||||
writeFile(fs, myFile);
|
||||
writeFile(fs, myFile, 10);
|
||||
|
||||
start = System.currentTimeMillis();
|
||||
|
||||
|
|
|
@ -18,9 +18,7 @@
|
|||
package org.apache.hadoop.http;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.URLConnection;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.util.Arrays;
|
||||
|
@ -52,6 +50,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
|
||||
import org.apache.hadoop.http.resource.JerseyResource;
|
||||
import org.apache.hadoop.security.Groups;
|
||||
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
|
@ -59,6 +58,7 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
public class TestHttpServer extends HttpServerFunctionalTest {
|
||||
static final Log LOG = LogFactory.getLog(TestHttpServer.class);
|
||||
|
@ -75,7 +75,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
) throws ServletException, IOException {
|
||||
PrintWriter out = response.getWriter();
|
||||
Map<String, String[]> params = request.getParameterMap();
|
||||
SortedSet<String> keys = new TreeSet(params.keySet());
|
||||
SortedSet<String> keys = new TreeSet<String>(params.keySet());
|
||||
for(String key: keys) {
|
||||
out.print(key);
|
||||
out.print(':');
|
||||
|
@ -101,7 +101,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
HttpServletResponse response
|
||||
) throws ServletException, IOException {
|
||||
PrintWriter out = response.getWriter();
|
||||
SortedSet<String> sortedKeys = new TreeSet();
|
||||
SortedSet<String> sortedKeys = new TreeSet<String>();
|
||||
Enumeration<String> keys = request.getParameterNames();
|
||||
while(keys.hasMoreElements()) {
|
||||
sortedKeys.add(keys.nextElement());
|
||||
|
@ -118,7 +118,6 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
|
||||
@SuppressWarnings("serial")
|
||||
public static class HtmlContentServlet extends HttpServlet {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void doGet(HttpServletRequest request,
|
||||
HttpServletResponse response
|
||||
|
@ -131,10 +130,14 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
}
|
||||
|
||||
@BeforeClass public static void setup() throws Exception {
|
||||
server = createTestServer();
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
|
||||
server = createTestServer(conf);
|
||||
server.addServlet("echo", "/echo", EchoServlet.class);
|
||||
server.addServlet("echomap", "/echomap", EchoMapServlet.class);
|
||||
server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
|
||||
server.addJerseyResourcePackage(
|
||||
JerseyResource.class.getPackage().getName(), "/jersey/*");
|
||||
server.start();
|
||||
baseUrl = getServerURL(server);
|
||||
LOG.info("HTTP server started: "+ baseUrl);
|
||||
|
@ -161,7 +164,8 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
assertEquals("a:b\nc:d\n",
|
||||
readOutput(new URL(baseUrl, "/echo?a=b&c=d")));
|
||||
int serverThreads = server.webServer.getThreadPool().getThreads();
|
||||
assertTrue(serverThreads <= MAX_THREADS);
|
||||
assertTrue("More threads are started than expected, Server Threads count: "
|
||||
+ serverThreads, serverThreads <= MAX_THREADS);
|
||||
System.out.println("Number of threads = " + serverThreads +
|
||||
" which is less or equal than the max = " + MAX_THREADS);
|
||||
} catch (Exception e) {
|
||||
|
@ -404,4 +408,18 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
|||
values, parameterValues));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static Map<String, Object> parse(String jsonString) {
|
||||
return (Map<String, Object>)JSON.parse(jsonString);
|
||||
}
|
||||
|
||||
@Test public void testJersey() throws Exception {
|
||||
LOG.info("BEGIN testJersey()");
|
||||
final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar"));
|
||||
final Map<String, Object> m = parse(js);
|
||||
LOG.info("m=" + m);
|
||||
assertEquals("foo", m.get(JerseyResource.PATH));
|
||||
assertEquals("bar", m.get(JerseyResource.OP));
|
||||
LOG.info("END testJersey()");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.http.resource;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import javax.ws.rs.DefaultValue;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.QueryParam;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.Response;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
/**
|
||||
* A simple Jersey resource class TestHttpServer.
|
||||
* The servlet simply puts the path and the op parameter in a map
|
||||
* and return it in JSON format in the response.
|
||||
*/
|
||||
@Path("")
|
||||
public class JerseyResource {
|
||||
static final Log LOG = LogFactory.getLog(JerseyResource.class);
|
||||
|
||||
public static final String PATH = "path";
|
||||
public static final String OP = "op";
|
||||
|
||||
@GET
|
||||
@Path("{" + PATH + ":.*}")
|
||||
@Produces({MediaType.APPLICATION_JSON})
|
||||
public Response get(
|
||||
@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path,
|
||||
@QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op
|
||||
) throws IOException {
|
||||
LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op);
|
||||
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put(PATH, path);
|
||||
m.put(OP, op);
|
||||
final String js = JSON.toString(m);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
}
|
|
@ -50,7 +50,7 @@ public class TestNativeIO {
|
|||
}
|
||||
|
||||
@Before
|
||||
public void setupTestDir() throws IOException {
|
||||
public void setupTestDir() {
|
||||
FileUtil.fullyDelete(TEST_DIR);
|
||||
TEST_DIR.mkdirs();
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ public class TestIPC {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Writable call(Class<?> protocol, Writable param, long receiveTime)
|
||||
public Writable call(String protocol, Writable param, long receiveTime)
|
||||
throws IOException {
|
||||
if (sleep) {
|
||||
// sleep a bit
|
||||
|
|
|
@ -72,7 +72,7 @@ public class TestIPCServerResponder extends TestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Writable call(Class<?> protocol, Writable param, long receiveTime)
|
||||
public Writable call(String protocol, Writable param, long receiveTime)
|
||||
throws IOException {
|
||||
if (sleep) {
|
||||
try {
|
||||
|
|
|
@ -0,0 +1,255 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestMultipleProtocolServer {
|
||||
private static final String ADDRESS = "0.0.0.0";
|
||||
private static InetSocketAddress addr;
|
||||
private static RPC.Server server;
|
||||
|
||||
private static Configuration conf = new Configuration();
|
||||
|
||||
|
||||
@ProtocolInfo(protocolName="Foo")
|
||||
interface Foo0 extends VersionedProtocol {
|
||||
public static final long versionID = 0L;
|
||||
String ping() throws IOException;
|
||||
|
||||
}
|
||||
|
||||
@ProtocolInfo(protocolName="Foo")
|
||||
interface Foo1 extends VersionedProtocol {
|
||||
public static final long versionID = 1L;
|
||||
String ping() throws IOException;
|
||||
String ping2() throws IOException;
|
||||
}
|
||||
|
||||
@ProtocolInfo(protocolName="Foo")
|
||||
interface FooUnimplemented extends VersionedProtocol {
|
||||
public static final long versionID = 2L;
|
||||
String ping() throws IOException;
|
||||
}
|
||||
|
||||
interface Mixin extends VersionedProtocol{
|
||||
public static final long versionID = 0L;
|
||||
void hello() throws IOException;
|
||||
}
|
||||
interface Bar extends Mixin, VersionedProtocol {
|
||||
public static final long versionID = 0L;
|
||||
int echo(int i) throws IOException;
|
||||
}
|
||||
|
||||
|
||||
|
||||
class Foo0Impl implements Foo0 {
|
||||
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol, long clientVersion)
|
||||
throws IOException {
|
||||
return Foo0.versionID;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public ProtocolSignature getProtocolSignature(String protocol,
|
||||
long clientVersion, int clientMethodsHash) throws IOException {
|
||||
Class<? extends VersionedProtocol> inter;
|
||||
try {
|
||||
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||
getGenericInterfaces()[0];
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||
getProtocolVersion(protocol, clientVersion), inter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String ping() {
|
||||
return "Foo0";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class Foo1Impl implements Foo1 {
|
||||
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol, long clientVersion)
|
||||
throws IOException {
|
||||
return Foo1.versionID;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public ProtocolSignature getProtocolSignature(String protocol,
|
||||
long clientVersion, int clientMethodsHash) throws IOException {
|
||||
Class<? extends VersionedProtocol> inter;
|
||||
try {
|
||||
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||
getGenericInterfaces()[0];
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||
getProtocolVersion(protocol, clientVersion), inter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String ping() {
|
||||
return "Foo1";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String ping2() {
|
||||
return "Foo1";
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
class BarImpl implements Bar {
|
||||
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol, long clientVersion)
|
||||
throws IOException {
|
||||
return Bar.versionID;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public ProtocolSignature getProtocolSignature(String protocol,
|
||||
long clientVersion, int clientMethodsHash) throws IOException {
|
||||
Class<? extends VersionedProtocol> inter;
|
||||
try {
|
||||
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||
getGenericInterfaces()[0];
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||
getProtocolVersion(protocol, clientVersion), inter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int echo(int i) {
|
||||
return i;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void hello() {
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
// create a server with two handlers
|
||||
server = RPC.getServer(Foo0.class,
|
||||
new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
|
||||
server.addProtocol(Foo1.class, new Foo1Impl());
|
||||
server.addProtocol(Bar.class, new BarImpl());
|
||||
server.addProtocol(Mixin.class, new BarImpl());
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
server.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test1() throws IOException {
|
||||
ProtocolProxy<?> proxy;
|
||||
proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf);
|
||||
|
||||
Foo0 foo0 = (Foo0)proxy.getProxy();
|
||||
Assert.assertEquals("Foo0", foo0.ping());
|
||||
|
||||
|
||||
proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf);
|
||||
|
||||
|
||||
Foo1 foo1 = (Foo1)proxy.getProxy();
|
||||
Assert.assertEquals("Foo1", foo1.ping());
|
||||
Assert.assertEquals("Foo1", foo1.ping());
|
||||
|
||||
|
||||
proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf);
|
||||
|
||||
|
||||
Bar bar = (Bar)proxy.getProxy();
|
||||
Assert.assertEquals(99, bar.echo(99));
|
||||
|
||||
// Now test Mixin class method
|
||||
|
||||
Mixin mixin = bar;
|
||||
mixin.hello();
|
||||
}
|
||||
|
||||
|
||||
// Server does not implement the FooUnimplemented version of protocol Foo.
|
||||
// See that calls to it fail.
|
||||
@Test(expected=IOException.class)
|
||||
public void testNonExistingProtocol() throws IOException {
|
||||
ProtocolProxy<?> proxy;
|
||||
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
|
||||
FooUnimplemented.versionID, addr, conf);
|
||||
|
||||
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
|
||||
foo.ping();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* getProtocolVersion of an unimplemented version should return highest version
|
||||
* Similarly getProtocolSignature should work.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testNonExistingProtocol2() throws IOException {
|
||||
ProtocolProxy<?> proxy;
|
||||
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
|
||||
FooUnimplemented.versionID, addr, conf);
|
||||
|
||||
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
|
||||
Assert.assertEquals(Foo1.versionID,
|
||||
foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),
|
||||
FooUnimplemented.versionID));
|
||||
foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),
|
||||
FooUnimplemented.versionID, 0);
|
||||
}
|
||||
|
||||
@Test(expected=IOException.class)
|
||||
public void testIncorrectServerCreation() throws IOException {
|
||||
RPC.getServer(Foo1.class,
|
||||
new Foo0Impl(), ADDRESS, 0, 2, false, conf, null);
|
||||
}
|
||||
}
|
|
@ -39,7 +39,7 @@ import org.junit.Test;
|
|||
public class TestRPCCompatibility {
|
||||
private static final String ADDRESS = "0.0.0.0";
|
||||
private static InetSocketAddress addr;
|
||||
private static Server server;
|
||||
private static RPC.Server server;
|
||||
private ProtocolProxy<?> proxy;
|
||||
|
||||
public static final Log LOG =
|
||||
|
@ -52,10 +52,12 @@ public class TestRPCCompatibility {
|
|||
void ping() throws IOException;
|
||||
}
|
||||
|
||||
public interface TestProtocol1 extends TestProtocol0 {
|
||||
public interface TestProtocol1 extends VersionedProtocol, TestProtocol0 {
|
||||
String echo(String value) throws IOException;
|
||||
}
|
||||
|
||||
@ProtocolInfo(protocolName=
|
||||
"org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
|
||||
public interface TestProtocol2 extends TestProtocol1 {
|
||||
int echo(int value) throws IOException;
|
||||
}
|
||||
|
@ -89,11 +91,23 @@ public class TestRPCCompatibility {
|
|||
public static class TestImpl1 extends TestImpl0 implements TestProtocol1 {
|
||||
@Override
|
||||
public String echo(String value) { return value; }
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol,
|
||||
long clientVersion) throws IOException {
|
||||
return TestProtocol1.versionID;
|
||||
}
|
||||
}
|
||||
|
||||
public static class TestImpl2 extends TestImpl1 implements TestProtocol2 {
|
||||
@Override
|
||||
public int echo(int value) { return value; }
|
||||
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol,
|
||||
long clientVersion) throws IOException {
|
||||
return TestProtocol2.versionID;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -109,8 +123,10 @@ public class TestRPCCompatibility {
|
|||
@Test // old client vs new server
|
||||
public void testVersion0ClientVersion1Server() throws Exception {
|
||||
// create a server with two handlers
|
||||
TestImpl1 impl = new TestImpl1();
|
||||
server = RPC.getServer(TestProtocol1.class,
|
||||
new TestImpl1(), ADDRESS, 0, 2, false, conf, null);
|
||||
impl, ADDRESS, 0, 2, false, conf, null);
|
||||
server.addProtocol(TestProtocol0.class, impl);
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
|
@ -172,8 +188,10 @@ public class TestRPCCompatibility {
|
|||
@Test // Compatible new client & old server
|
||||
public void testVersion2ClientVersion1Server() throws Exception {
|
||||
// create a server with two handlers
|
||||
TestImpl1 impl = new TestImpl1();
|
||||
server = RPC.getServer(TestProtocol1.class,
|
||||
new TestImpl1(), ADDRESS, 0, 2, false, conf, null);
|
||||
impl, ADDRESS, 0, 2, false, conf, null);
|
||||
server.addProtocol(TestProtocol0.class, impl);
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
|
@ -190,8 +208,10 @@ public class TestRPCCompatibility {
|
|||
@Test // equal version client and server
|
||||
public void testVersion2ClientVersion2Server() throws Exception {
|
||||
// create a server with two handlers
|
||||
TestImpl2 impl = new TestImpl2();
|
||||
server = RPC.getServer(TestProtocol2.class,
|
||||
new TestImpl2(), ADDRESS, 0, 2, false, conf, null);
|
||||
impl, ADDRESS, 0, 2, false, conf, null);
|
||||
server.addProtocol(TestProtocol0.class, impl);
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
|
@ -250,14 +270,16 @@ public class TestRPCCompatibility {
|
|||
assertEquals(hash1, hash2);
|
||||
}
|
||||
|
||||
@ProtocolInfo(protocolName=
|
||||
"org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
|
||||
public interface TestProtocol4 extends TestProtocol2 {
|
||||
public static final long versionID = 1L;
|
||||
public static final long versionID = 4L;
|
||||
int echo(int value) throws IOException;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVersionMismatch() throws IOException {
|
||||
server = RPC.getServer(TestProtocol2.class, new TestImpl0(), ADDRESS, 0, 2,
|
||||
server = RPC.getServer(TestProtocol2.class, new TestImpl2(), ADDRESS, 0, 2,
|
||||
false, conf, null);
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
@ -268,7 +290,8 @@ public class TestRPCCompatibility {
|
|||
proxy.echo(21);
|
||||
fail("The call must throw VersionMismatch exception");
|
||||
} catch (IOException ex) {
|
||||
Assert.assertTrue(ex.getMessage().contains("VersionMismatch"));
|
||||
Assert.assertTrue("Expected version mismatch but got " + ex.getMessage(),
|
||||
ex.getMessage().contains("VersionMismatch"));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -26,12 +26,17 @@ import java.net.DatagramPacket;
|
|||
import java.net.DatagramSocket;
|
||||
import java.net.SocketException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.metrics2.AbstractMetric;
|
||||
import org.apache.hadoop.metrics2.MetricsRecord;
|
||||
import org.apache.hadoop.metrics2.MetricsTag;
|
||||
import org.apache.hadoop.metrics2.annotation.Metric;
|
||||
import org.apache.hadoop.metrics2.annotation.Metrics;
|
||||
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
|
||||
|
@ -54,6 +59,44 @@ public class TestGangliaMetrics {
|
|||
"test.s1rec.S1NumOps",
|
||||
"test.s1rec.S1AvgTime" };
|
||||
|
||||
@Test
|
||||
public void testTagsForPrefix() throws Exception {
|
||||
ConfigBuilder cb = new ConfigBuilder()
|
||||
.add("test.sink.ganglia.tagsForPrefix.all", "*")
|
||||
.add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, NumActiveSources")
|
||||
.add("test.sink.ganglia.tagsForPrefix.none", "");
|
||||
GangliaSink30 sink = new GangliaSink30();
|
||||
sink.init(cb.subset("test.sink.ganglia"));
|
||||
|
||||
List<MetricsTag> tags = new ArrayList<MetricsTag>();
|
||||
tags.add(new MetricsTag(MsInfo.Context, "all"));
|
||||
tags.add(new MetricsTag(MsInfo.NumActiveSources, "foo"));
|
||||
tags.add(new MetricsTag(MsInfo.NumActiveSinks, "bar"));
|
||||
tags.add(new MetricsTag(MsInfo.NumAllSinks, "haa"));
|
||||
tags.add(new MetricsTag(MsInfo.Hostname, "host"));
|
||||
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
|
||||
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 1, tags, metrics);
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sink.appendPrefix(record, sb);
|
||||
assertEquals(".NumActiveSources=foo.NumActiveSinks=bar.NumAllSinks=haa", sb.toString());
|
||||
|
||||
tags.set(0, new MetricsTag(MsInfo.Context, "some"));
|
||||
sb = new StringBuilder();
|
||||
sink.appendPrefix(record, sb);
|
||||
assertEquals(".NumActiveSources=foo.NumActiveSinks=bar", sb.toString());
|
||||
|
||||
tags.set(0, new MetricsTag(MsInfo.Context, "none"));
|
||||
sb = new StringBuilder();
|
||||
sink.appendPrefix(record, sb);
|
||||
assertEquals("", sb.toString());
|
||||
|
||||
tags.set(0, new MetricsTag(MsInfo.Context, "nada"));
|
||||
sb = new StringBuilder();
|
||||
sink.appendPrefix(record, sb);
|
||||
assertEquals("", sb.toString());
|
||||
}
|
||||
|
||||
@Test public void testGangliaMetrics2() throws Exception {
|
||||
ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
|
||||
.add("test.sink.gsink30.context", "test") // filter out only "test"
|
||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.hadoop.security;
|
|||
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.hadoop.alfredo.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.FilterContainer;
|
||||
import org.mockito.Mockito;
|
||||
|
|
|
@ -49,8 +49,7 @@ public class TestRunJar extends TestCase {
|
|||
}
|
||||
|
||||
@After
|
||||
protected void tearDown()
|
||||
throws Exception {
|
||||
protected void tearDown() {
|
||||
FileUtil.fullyDelete(TEST_ROOT_DIR);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
|
||||
<modules>
|
||||
<module>hadoop-auth</module>
|
||||
<module>hadoop-auth-examples</module>
|
||||
<module>hadoop-common</module>
|
||||
<module>hadoop-annotations</module>
|
||||
</modules>
|
||||
|
|
|
@ -5,9 +5,31 @@ Trunk (unreleased changes)
|
|||
HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel
|
||||
via hairong)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
|
||||
|
||||
HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
|
||||
HdfsConstants. (Harsh J Chouraria via atm)
|
||||
HDFS-2197. Refactor RPC call implementations out of NameNode class (todd)
|
||||
|
||||
HDFS-2018. Move all journal stream management code into one place.
|
||||
(Ivan Kelly via jitendra)
|
||||
|
||||
HDFS-2223. Untangle depencencies between NN components (todd)
|
||||
|
||||
BUG FIXES
|
||||
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
|
||||
|
||||
HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
|
||||
via atm)
|
||||
HDFS-2310. TestBackupNode fails since HADOOP-7524 went in.
|
||||
(Ivan Kelly via todd)
|
||||
|
||||
HDFS-2313. Rat excludes has a typo for excluding editsStored files. (atm)
|
||||
|
||||
HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd)
|
||||
|
||||
Release 0.23.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -687,6 +709,9 @@ Release 0.23.0 - Unreleased
|
|||
HDFS-2266. Add Namesystem and SafeMode interfaces to avoid directly
|
||||
referring to FSNamesystem in BlockManager. (szetszwo)
|
||||
|
||||
HDFS-1217. Change some NameNode methods from public to package private.
|
||||
(Laxman via szetszwo)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
|
||||
|
@ -1003,6 +1028,9 @@ Release 0.23.0 - Unreleased
|
|||
HDFS-2286. DataXceiverServer logs AsynchronousCloseException at shutdown
|
||||
(todd)
|
||||
|
||||
HDFS-2289. Ensure jsvc is bundled with the HDFS distribution artifact.
|
||||
(Alejandro Abdelnur via acmurthy)
|
||||
|
||||
BREAKDOWN OF HDFS-1073 SUBTASKS
|
||||
|
||||
HDFS-1521. Persist transaction ID on disk between NN restarts.
|
||||
|
@ -1086,6 +1114,7 @@ Release 0.22.0 - Unreleased
|
|||
(jghoman)
|
||||
|
||||
HDFS-1330. Make RPCs to DataNodes timeout. (hairong)
|
||||
Added additional unit tests per HADOOP-6889. (John George via mattf)
|
||||
|
||||
HDFS-202. HDFS support of listLocatedStatus introduced in HADOOP-6870.
|
||||
HDFS piggyback block locations to each file status when listing a
|
||||
|
@ -1541,6 +1570,11 @@ Release 0.22.0 - Unreleased
|
|||
HDFS-1981. NameNode does not saveNamespace() when editsNew is empty.
|
||||
(Uma Maheswara Rao G via shv)
|
||||
|
||||
HDFS-2258. Reset lease limits to default values in TestLeaseRecovery2. (shv)
|
||||
|
||||
HDFS-2232. Generalize regular expressions in TestHDFSCLI.
|
||||
(Plamen Jeliazkov via shv)
|
||||
|
||||
Release 0.21.1 - Unreleased
|
||||
HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
|
||||
|
||||
|
|
|
@ -296,7 +296,7 @@
|
|||
<exclude>src/test/all-tests</exclude>
|
||||
<exclude>src/test/resources/*.tgz</exclude>
|
||||
<exclude>src/test/resources/data*</exclude>
|
||||
<exclude>src/test/resources/editStored*</exclude>
|
||||
<exclude>src/test/resources/editsStored*</exclude>
|
||||
<exclude>src/test/resources/empty-file</exclude>
|
||||
<exclude>src/main/webapps/datanode/robots.txt</exclude>
|
||||
<exclude>src/main/docs/releasenotes.html</exclude>
|
||||
|
@ -304,6 +304,56 @@
|
|||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>xprepare-package-hadoop-daemon</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<condition property="commons.daemon.os.name" value="darwin">
|
||||
<os name="Mac OS X"/>
|
||||
</condition>
|
||||
<condition property="commons.daemon.os.arch" value="universal">
|
||||
<os name="Mac OS X"/>
|
||||
</condition>
|
||||
<condition property="commons.daemon.os.name" value="linux">
|
||||
<os name="Linux" />
|
||||
</condition>
|
||||
<!-- Set commons.daemon.os.arch to either i686 or x86_64 for GNU/Linux -->
|
||||
<condition property="commons.daemon.os.arch" value="x86_64">
|
||||
<os name="Linux" arch="amd64"/>
|
||||
</condition>
|
||||
<condition property="commons.daemon.os.arch" value="i686">
|
||||
<os name="Linux" /> <!-- This is a guess -->
|
||||
</condition>
|
||||
<property name="commons.daemon.tar.name"
|
||||
value="commons-daemon-${commons-daemon.version}-bin-${commons.daemon.os.name}-${commons.daemon.os.arch}.tar.gz"/>
|
||||
|
||||
<property name="commons.daemon.download.dir"
|
||||
value="${project.build.directory}/downloads/commons-daemon"/>
|
||||
<delete dir="${commons.daemon.download.dir}"/>
|
||||
<mkdir dir="${commons.daemon.download.dir}"/>
|
||||
<get src="http://archive.apache.org/dist/commons/daemon/binaries/${commons-daemon.version}/${commons.daemon.os.name}/${commons.daemon.tar.name}"
|
||||
dest="${commons.daemon.download.dir}/${commons.daemon.tar.name}" verbose="true" skipexisting="true"/>
|
||||
<untar compression="gzip" src="${commons.daemon.download.dir}/${commons.daemon.tar.name}"
|
||||
dest="${commons.daemon.download.dir}"/>
|
||||
<copy file="${commons.daemon.download.dir}/jsvc"
|
||||
todir="${project.build.directory}/${project.artifactId}-${project.version}/libexec"
|
||||
verbose="true"/>
|
||||
<chmod perm="ugo+x" type="file">
|
||||
<fileset file="${project.build.directory}/${project.artifactId}-${project.version}/libexec/jsvc"/>
|
||||
</chmod>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ if [ "$starting_secure_dn" = "true" ]; then
|
|||
HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
|
||||
fi
|
||||
|
||||
exec "$HADOOP_HDFS_HOME/bin/jsvc" \
|
||||
exec "$HADOOP_HDFS_HOME/libexec/jsvc" \
|
||||
-Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \
|
||||
-errfile "$HADOOP_LOG_DIR/jsvc.err" \
|
||||
-pidfile "$HADOOP_SECURE_DN_PID" \
|
||||
|
|
|
@ -505,7 +505,7 @@
|
|||
using <code>'bin/hadoop dfsadmin -safemode'</code> command. NameNode front
|
||||
page shows whether Safemode is on or off. A more detailed
|
||||
description and configuration is maintained as JavaDoc for
|
||||
<a href="http://hadoop.apache.org/core/docs/current/api/org/apache/hadoop/dfs/NameNode.html#setSafeMode(org.apache.hadoop.dfs.FSConstants.SafeModeAction)"><code>setSafeMode()</code></a>.
|
||||
<a href="http://hadoop.apache.org/core/docs/current/api/org/apache/hadoop/dfs/NameNode.html#setSafeMode(org.apache.hadoop.dfs.HdfsConstants.SafeModeAction)"><code>setSafeMode()</code></a>.
|
||||
</p>
|
||||
|
||||
</section> <section> <title> fsck </title>
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.DFSClient;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
|
@ -70,9 +70,9 @@ public class Hdfs extends AbstractFileSystem {
|
|||
* @throws IOException
|
||||
*/
|
||||
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
|
||||
super(theUri, FSConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
|
||||
super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
|
||||
|
||||
if (!theUri.getScheme().equalsIgnoreCase(FSConstants.HDFS_URI_SCHEME)) {
|
||||
if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
|
||||
throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
|
||||
}
|
||||
String host = theUri.getHost();
|
||||
|
|
|
@ -60,10 +60,10 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
|
@ -77,7 +77,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
|
|||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
|
@ -156,14 +156,14 @@ public class DFSClient implements java.io.Closeable {
|
|||
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
|
||||
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
|
||||
confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
||||
HdfsConstants.WRITE_TIMEOUT);
|
||||
HdfsServerConstants.WRITE_TIMEOUT);
|
||||
ioBufferSize = conf.getInt(
|
||||
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
|
||||
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
|
||||
bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
|
||||
DFS_BYTES_PER_CHECKSUM_DEFAULT);
|
||||
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||
HdfsConstants.READ_TIMEOUT);
|
||||
HdfsServerConstants.READ_TIMEOUT);
|
||||
/** dfs.write.packet.size is an internal config variable */
|
||||
writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
|
||||
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
|
||||
|
@ -279,12 +279,12 @@ public class DFSClient implements java.io.Closeable {
|
|||
*/
|
||||
int getDatanodeWriteTimeout(int numNodes) {
|
||||
return (dfsClientConf.confTime > 0) ?
|
||||
(dfsClientConf.confTime + HdfsConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
|
||||
(dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
|
||||
}
|
||||
|
||||
int getDatanodeReadTimeout(int numNodes) {
|
||||
return dfsClientConf.socketTimeout > 0 ?
|
||||
(HdfsConstants.READ_TIMEOUT_EXTENSION * numNodes +
|
||||
(HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes +
|
||||
dfsClientConf.socketTimeout) : 0;
|
||||
}
|
||||
|
||||
|
@ -1046,7 +1046,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
|
||||
out = new DataOutputStream(
|
||||
new BufferedOutputStream(NetUtils.getOutputStream(sock),
|
||||
FSConstants.SMALL_BUFFER_SIZE));
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
in = new DataInputStream(NetUtils.getInputStream(sock));
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
@ -1225,7 +1225,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
/**
|
||||
* Enter, leave or get safe mode.
|
||||
*
|
||||
* @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
|
||||
* @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction)
|
||||
*/
|
||||
public boolean setSafeMode(SafeModeAction action) throws IOException {
|
||||
return namenode.setSafeMode(action);
|
||||
|
@ -1293,7 +1293,7 @@ public class DFSClient implements java.io.Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* @see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)
|
||||
* @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction)
|
||||
*/
|
||||
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
|
||||
throws IOException {
|
||||
|
@ -1392,10 +1392,10 @@ public class DFSClient implements java.io.Closeable {
|
|||
void setQuota(String src, long namespaceQuota, long diskspaceQuota)
|
||||
throws IOException {
|
||||
// sanity check
|
||||
if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET &&
|
||||
namespaceQuota != FSConstants.QUOTA_RESET) ||
|
||||
(diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET &&
|
||||
diskspaceQuota != FSConstants.QUOTA_RESET)) {
|
||||
if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
|
||||
namespaceQuota != HdfsConstants.QUOTA_RESET) ||
|
||||
(diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
|
||||
diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
|
||||
throw new IllegalArgumentException("Invalid values for quota : " +
|
||||
namespaceQuota + " and " +
|
||||
diskspaceQuota);
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
|
@ -166,7 +166,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
this.seqno = HEART_BEAT_SEQNO;
|
||||
|
||||
buffer = null;
|
||||
int packetSize = PacketHeader.PKT_HEADER_LEN + FSConstants.BYTES_IN_INTEGER;
|
||||
int packetSize = PacketHeader.PKT_HEADER_LEN + HdfsConstants.BYTES_IN_INTEGER;
|
||||
buf = new byte[packetSize];
|
||||
|
||||
checksumStart = dataStart = packetSize;
|
||||
|
@ -234,12 +234,12 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
dataStart - checksumLen , checksumLen);
|
||||
}
|
||||
|
||||
int pktLen = FSConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
|
||||
int pktLen = HdfsConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
|
||||
|
||||
//normally dataStart == checksumPos, i.e., offset is zero.
|
||||
buffer = ByteBuffer.wrap(
|
||||
buf, dataStart - checksumPos,
|
||||
PacketHeader.PKT_HEADER_LEN + pktLen - FSConstants.BYTES_IN_INTEGER);
|
||||
PacketHeader.PKT_HEADER_LEN + pktLen - HdfsConstants.BYTES_IN_INTEGER);
|
||||
buf = null;
|
||||
buffer.mark();
|
||||
|
||||
|
@ -849,7 +849,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
|
||||
out = new DataOutputStream(new BufferedOutputStream(
|
||||
NetUtils.getOutputStream(sock, writeTimeout),
|
||||
FSConstants.SMALL_BUFFER_SIZE));
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
|
||||
//send the TRANSFER_BLOCK request
|
||||
new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
|
||||
|
@ -1023,7 +1023,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
//
|
||||
out = new DataOutputStream(new BufferedOutputStream(
|
||||
NetUtils.getOutputStream(s, writeTimeout),
|
||||
FSConstants.SMALL_BUFFER_SIZE));
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
|
||||
assert null == blockReplyStream : "Previous blockReplyStream unclosed";
|
||||
blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
|
||||
|
@ -1173,7 +1173,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
|
|||
final int timeout = client.getDatanodeReadTimeout(length);
|
||||
NetUtils.connect(sock, isa, timeout);
|
||||
sock.setSoTimeout(timeout);
|
||||
sock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
|
||||
sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
|
||||
if(DFSClient.LOG.isDebugEnabled()) {
|
||||
DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
|
@ -646,7 +646,7 @@ public class DFSUtil {
|
|||
static ClientProtocol createNamenode(ClientProtocol rpcNamenode)
|
||||
throws IOException {
|
||||
RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
|
||||
5, FSConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
|
||||
5, HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
|
||||
|
||||
Map<Class<? extends Exception>,RetryPolicy> remoteExceptionToPolicyMap =
|
||||
new HashMap<Class<? extends Exception>, RetryPolicy>();
|
||||
|
|
|
@ -49,9 +49,9 @@ import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
|
@ -108,7 +108,7 @@ public class DistributedFileSystem extends FileSystem {
|
|||
|
||||
InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority());
|
||||
this.dfs = new DFSClient(namenode, conf, statistics);
|
||||
this.uri = URI.create(FSConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
|
||||
this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority());
|
||||
this.workingDir = getHomeDirectory();
|
||||
}
|
||||
|
||||
|
@ -642,9 +642,9 @@ public class DistributedFileSystem extends FileSystem {
|
|||
* Enter, leave or get safe mode.
|
||||
*
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
|
||||
* FSConstants.SafeModeAction)
|
||||
* HdfsConstants.SafeModeAction)
|
||||
*/
|
||||
public boolean setSafeMode(FSConstants.SafeModeAction action)
|
||||
public boolean setSafeMode(HdfsConstants.SafeModeAction action)
|
||||
throws IOException {
|
||||
return dfs.setSafeMode(action);
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import java.util.Map;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -162,7 +162,7 @@ class LeaseRenewer {
|
|||
/** The time in milliseconds that the map became empty. */
|
||||
private long emptyTime = Long.MAX_VALUE;
|
||||
/** A fixed lease renewal time period in milliseconds */
|
||||
private long renewal = FSConstants.LEASE_SOFTLIMIT_PERIOD/2;
|
||||
private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD/2;
|
||||
|
||||
/** A daemon for renewing lease */
|
||||
private Daemon daemon = null;
|
||||
|
@ -352,7 +352,7 @@ class LeaseRenewer {
|
|||
|
||||
//update renewal time
|
||||
if (renewal == dfsc.getHdfsTimeout()/2) {
|
||||
long min = FSConstants.LEASE_SOFTLIMIT_PERIOD;
|
||||
long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
|
||||
for(DFSClient c : dfsclients) {
|
||||
if (c.getHdfsTimeout() > 0) {
|
||||
final long timeout = c.getHdfsTimeout();
|
||||
|
|
|
@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatus
|
|||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
@ -394,7 +394,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
throws IOException {
|
||||
// in and out will be closed when sock is closed (by the caller)
|
||||
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
||||
NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT)));
|
||||
NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
|
||||
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
|
||||
|
||||
//
|
||||
|
@ -486,7 +486,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
void sendReadResult(Socket sock, Status statusCode) {
|
||||
assert !sentStatusCode : "already sent status code to " + sock;
|
||||
try {
|
||||
OutputStream out = NetUtils.getOutputStream(sock, HdfsConstants.WRITE_TIMEOUT);
|
||||
OutputStream out = NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT);
|
||||
|
||||
ClientReadStatusProto.newBuilder()
|
||||
.setStatus(statusCode)
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
|
|||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
|
@ -578,7 +578,7 @@ public interface ClientProtocol extends VersionedProtocol {
|
|||
* Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
|
||||
* otherwise all datanodes if type is ALL.
|
||||
*/
|
||||
public DatanodeInfo[] getDatanodeReport(FSConstants.DatanodeReportType type)
|
||||
public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -601,7 +601,7 @@ public interface ClientProtocol extends VersionedProtocol {
|
|||
* <p>
|
||||
* Safe mode is entered automatically at name node startup.
|
||||
* Safe mode can also be entered manually using
|
||||
* {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
|
||||
* {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
|
||||
* <p>
|
||||
* At startup the name node accepts data node reports collecting
|
||||
* information about block locations.
|
||||
|
@ -617,11 +617,11 @@ public interface ClientProtocol extends VersionedProtocol {
|
|||
* Then the name node leaves safe mode.
|
||||
* <p>
|
||||
* If safe mode is turned on manually using
|
||||
* {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
|
||||
* {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
|
||||
* then the name node stays in safe mode until it is manually turned off
|
||||
* using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
|
||||
* using {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
|
||||
* Current state of the name node can be verified using
|
||||
* {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
|
||||
* {@link #setSafeMode(HdfsConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
|
||||
* <h4>Configuration parameters:</h4>
|
||||
* <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
|
||||
* <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
|
||||
|
@ -644,7 +644,7 @@ public interface ClientProtocol extends VersionedProtocol {
|
|||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean setSafeMode(FSConstants.SafeModeAction action)
|
||||
public boolean setSafeMode(HdfsConstants.SafeModeAction action)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -685,7 +685,7 @@ public interface ClientProtocol extends VersionedProtocol {
|
|||
/**
|
||||
* Report distributed upgrade progress or force current upgrade to proceed.
|
||||
*
|
||||
* @param action {@link FSConstants.UpgradeAction} to perform
|
||||
* @param action {@link HdfsConstants.UpgradeAction} to perform
|
||||
* @return upgrade status information or null if no upgrades are in progress
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -777,8 +777,8 @@ public interface ClientProtocol extends VersionedProtocol {
|
|||
* <br><br>
|
||||
*
|
||||
* The quota can have three types of values : (1) 0 or more will set
|
||||
* the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
|
||||
* the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
|
||||
* the quota to that value, (2) {@link HdfsConstants#QUOTA_DONT_SET} implies
|
||||
* the quota will not be changed, and (3) {@link HdfsConstants#QUOTA_RESET}
|
||||
* implies the quota will be reset. Any other value is a runtime error.
|
||||
*
|
||||
* @throws AccessControlException permission denied
|
||||
|
|
|
@ -26,9 +26,9 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|||
*
|
||||
************************************/
|
||||
@InterfaceAudience.Private
|
||||
public final class FSConstants {
|
||||
public final class HdfsConstants {
|
||||
/* Hidden constructor */
|
||||
private FSConstants() {
|
||||
private HdfsConstants() {
|
||||
}
|
||||
|
||||
public static int MIN_BLOCKS_FOR_WRITE = 5;
|
|
@ -55,15 +55,15 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.Util;
|
||||
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||
|
@ -306,13 +306,13 @@ public class Balancer {
|
|||
DataInputStream in = null;
|
||||
try {
|
||||
sock.connect(NetUtils.createSocketAddr(
|
||||
target.datanode.getName()), HdfsConstants.READ_TIMEOUT);
|
||||
target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT);
|
||||
sock.setKeepAlive(true);
|
||||
out = new DataOutputStream( new BufferedOutputStream(
|
||||
sock.getOutputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
|
||||
sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
|
||||
sendRequest(out);
|
||||
in = new DataInputStream( new BufferedInputStream(
|
||||
sock.getInputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
|
||||
sock.getInputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
|
||||
receiveResponse(in);
|
||||
bytesMoved.inc(block.getNumBytes());
|
||||
LOG.info( "Moving block " + block.getBlock().getBlockId() +
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightGSet;
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
|
||||
/**
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue