Merge trunk into HDFS-6584
This commit is contained in:
commit
300cb12bef
|
@ -496,6 +496,14 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-10863. KMS should have a blacklist for decrypting EEKs.
|
||||
(asuresh via tucu)
|
||||
|
||||
HADOOP-11054. Add a KeyProvider instantiation based on a URI. (tucu)
|
||||
|
||||
HADOOP-11015. Http server/client utils to propagate and recreate
|
||||
Exceptions from server to client. (tucu)
|
||||
|
||||
HADOOP-11060. Create a CryptoCodec test that verifies interoperability
|
||||
between the JCE and OpenSSL implementations. (hitliuyi via tucu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
|
||||
|
@ -684,6 +692,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-8815. RandomDatum needs to override hashCode().
|
||||
(Brandon Li via suresh)
|
||||
|
||||
HADOOP-11056. OsSecureRandom.setConf() might leak file descriptors (yzhang
|
||||
via cmccabe)
|
||||
|
||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HADOOP-10734. Implement high-performance secure random number sources.
|
||||
|
@ -743,6 +754,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HADOOP-11012. hadoop fs -text of zero-length file causes EOFException
|
||||
(Eric Payne via jlowe)
|
||||
|
||||
HADOOP-11063. KMS cannot deploy on Windows, because class names are too long.
|
||||
(cnauroth)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -367,7 +367,7 @@
|
|||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.crypto.key.kms.KMSClientProvider"/>
|
||||
<Class name="org.apache.hadoop.util.HttpExceptionUtils"/>
|
||||
<Method name="validateResponse"/>
|
||||
<Bug pattern="REC_CATCH_EXCEPTION"/>
|
||||
</Match>
|
||||
|
|
|
@ -63,16 +63,10 @@ public abstract class KeyProviderFactory {
|
|||
for(String path: conf.getStringCollection(KEY_PROVIDER_PATH)) {
|
||||
try {
|
||||
URI uri = new URI(path);
|
||||
boolean found = false;
|
||||
for(KeyProviderFactory factory: serviceLoader) {
|
||||
KeyProvider kp = factory.createProvider(uri, conf);
|
||||
if (kp != null) {
|
||||
result.add(kp);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
KeyProvider kp = get(uri, conf);
|
||||
if (kp != null) {
|
||||
result.add(kp);
|
||||
} else {
|
||||
throw new IOException("No KeyProviderFactory for " + uri + " in " +
|
||||
KEY_PROVIDER_PATH);
|
||||
}
|
||||
|
@ -83,4 +77,26 @@ public abstract class KeyProviderFactory {
|
|||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a KeyProvider based on a provided URI.
|
||||
*
|
||||
* @param uri key provider URI
|
||||
* @param conf configuration to initialize the key provider
|
||||
* @return the key provider for the specified URI, or <code>NULL</code> if
|
||||
* a provider for the specified URI scheme could not be found.
|
||||
* @throws IOException thrown if the provider failed to initialize.
|
||||
*/
|
||||
public static KeyProvider get(URI uri, Configuration conf)
|
||||
throws IOException {
|
||||
KeyProvider kp = null;
|
||||
for (KeyProviderFactory factory : serviceLoader) {
|
||||
kp = factory.createProvider(uri, conf);
|
||||
if (kp != null) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return kp;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
|
|||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
|
||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||
import org.apache.http.client.utils.URIBuilder;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
|
||||
|
@ -44,7 +45,6 @@ import java.io.InputStream;
|
|||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.Writer;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.URI;
|
||||
|
@ -54,7 +54,6 @@ import java.net.URLEncoder;
|
|||
import java.security.GeneralSecurityException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
|
@ -413,58 +412,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
return conn;
|
||||
}
|
||||
|
||||
// trick, riding on generics to throw an undeclared exception
|
||||
|
||||
private static void throwEx(Throwable ex) {
|
||||
KMSClientProvider.<RuntimeException>throwException(ex);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <E extends Throwable> void throwException(Throwable ex)
|
||||
throws E {
|
||||
throw (E) ex;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static void validateResponse(HttpURLConnection conn, int expected)
|
||||
throws IOException {
|
||||
int status = conn.getResponseCode();
|
||||
if (status != expected) {
|
||||
InputStream es = null;
|
||||
try {
|
||||
Exception toThrow;
|
||||
String contentType = conn.getHeaderField(CONTENT_TYPE);
|
||||
if (contentType != null &&
|
||||
contentType.toLowerCase().startsWith(APPLICATION_JSON_MIME)) {
|
||||
es = conn.getErrorStream();
|
||||
ObjectMapper mapper = new ObjectMapper();
|
||||
Map json = mapper.readValue(es, Map.class);
|
||||
String exClass = (String) json.get(
|
||||
KMSRESTConstants.ERROR_EXCEPTION_JSON);
|
||||
String exMsg = (String)
|
||||
json.get(KMSRESTConstants.ERROR_MESSAGE_JSON);
|
||||
try {
|
||||
ClassLoader cl = KMSClientProvider.class.getClassLoader();
|
||||
Class klass = cl.loadClass(exClass);
|
||||
Constructor constr = klass.getConstructor(String.class);
|
||||
toThrow = (Exception) constr.newInstance(exMsg);
|
||||
} catch (Exception ex) {
|
||||
toThrow = new IOException(MessageFormat.format(
|
||||
"HTTP status [{0}], {1}", status, conn.getResponseMessage()));
|
||||
}
|
||||
} else {
|
||||
toThrow = new IOException(MessageFormat.format(
|
||||
"HTTP status [{0}], {1}", status, conn.getResponseMessage()));
|
||||
}
|
||||
throwEx(toThrow);
|
||||
} finally {
|
||||
if (es != null) {
|
||||
es.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static <T> T call(HttpURLConnection conn, Map jsonOutput,
|
||||
int expectedResponse, Class<T> klass)
|
||||
throws IOException {
|
||||
|
@ -477,7 +424,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
conn.getInputStream().close();
|
||||
throw ex;
|
||||
}
|
||||
validateResponse(conn, expectedResponse);
|
||||
HttpExceptionUtils.validateResponse(conn, expectedResponse);
|
||||
if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
|
||||
&& klass != null) {
|
||||
ObjectMapper mapper = new ObjectMapper();
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.io.FileInputStream;
|
|||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -37,6 +39,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class OsSecureRandom extends Random implements Closeable, Configurable {
|
||||
public static final Log LOG = LogFactory.getLog(OsSecureRandom.class);
|
||||
|
||||
private static final long serialVersionUID = 6391500337172057900L;
|
||||
|
||||
private transient Configuration conf;
|
||||
|
@ -72,12 +76,20 @@ public class OsSecureRandom extends Random implements Closeable, Configurable {
|
|||
HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY,
|
||||
HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT);
|
||||
File randomDevFile = new File(randomDevPath);
|
||||
|
||||
try {
|
||||
close();
|
||||
this.stream = new FileInputStream(randomDevFile);
|
||||
fillReservoir(0);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
fillReservoir(0);
|
||||
} catch (RuntimeException e) {
|
||||
close();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -109,7 +121,10 @@ public class OsSecureRandom extends Random implements Closeable, Configurable {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public void close() throws IOException {
|
||||
stream.close();
|
||||
synchronized public void close() {
|
||||
if (stream != null) {
|
||||
IOUtils.cleanup(LOG, stream);
|
||||
stream = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHand
|
|||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
|
||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.client.utils.URLEncodedUtils;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
|
@ -221,18 +222,8 @@ public class DelegationTokenAuthenticationFilter
|
|||
try {
|
||||
ProxyUsers.authorize(ugi, request.getRemoteHost());
|
||||
} catch (AuthorizationException ex) {
|
||||
String msg = String.format(
|
||||
"User '%s' from host '%s' not allowed to impersonate user '%s'",
|
||||
realUser, request.getRemoteHost(), doAsUser);
|
||||
response.setStatus(HttpServletResponse.SC_FORBIDDEN);
|
||||
response.setContentType(APPLICATION_JSON_MIME);
|
||||
Map<String, String> json = new HashMap<String, String>();
|
||||
json.put(ERROR_EXCEPTION_JSON,
|
||||
AuthorizationException.class.getName());
|
||||
json.put(ERROR_MESSAGE_JSON, msg);
|
||||
Writer writer = response.getWriter();
|
||||
ObjectMapper jsonMapper = new ObjectMapper();
|
||||
jsonMapper.writeValue(writer, json);
|
||||
HttpExceptionUtils.createServletExceptionResponse(response,
|
||||
HttpServletResponse.SC_FORBIDDEN, ex);
|
||||
requestCompleted = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.security.authentication.server.AuthenticationToken;
|
|||
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
|
||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
|
||||
import javax.servlet.ServletException;
|
||||
|
@ -346,8 +347,9 @@ public abstract class DelegationTokenAuthenticationHandler
|
|||
token.setExpires(0);
|
||||
request.setAttribute(DELEGATION_TOKEN_UGI_ATTRIBUTE, ugi);
|
||||
} catch (Throwable ex) {
|
||||
throw new AuthenticationException("Could not verify DelegationToken, " +
|
||||
ex.toString(), ex);
|
||||
token = null;
|
||||
HttpExceptionUtils.createServletExceptionResponse(response,
|
||||
HttpServletResponse.SC_FORBIDDEN, new AuthenticationException(ex));
|
||||
}
|
||||
} else {
|
||||
token = authHandler.authenticate(request, response);
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.security.authentication.client.Authenticator;
|
|||
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -217,7 +218,7 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
|
|||
AuthenticatedURL aUrl = new AuthenticatedURL(this);
|
||||
HttpURLConnection conn = aUrl.openConnection(url, token);
|
||||
conn.setRequestMethod(operation.getHttpMethod());
|
||||
validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
if (hasResponse) {
|
||||
String contentType = conn.getHeaderField(CONTENT_TYPE);
|
||||
contentType = (contentType != null) ? contentType.toLowerCase()
|
||||
|
@ -241,21 +242,4 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
|
|||
return ret;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static void validateResponse(HttpURLConnection conn, int expected)
|
||||
throws IOException {
|
||||
int status = conn.getResponseCode();
|
||||
if (status != expected) {
|
||||
try {
|
||||
conn.getInputStream().close();
|
||||
} catch (IOException ex) {
|
||||
//NOP
|
||||
}
|
||||
String msg = String.format("HTTP status, expected [%d], got [%d]: %s",
|
||||
expected, status, conn.getResponseMessage());
|
||||
LOG.debug(msg);
|
||||
throw new IOException(msg);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.Response;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.Writer;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* HTTP utility class to help propagate server side exception to the client
|
||||
* over HTTP as a JSON payload.
|
||||
* <p/>
|
||||
* It creates HTTP Servlet and JAX-RPC error responses including details of the
|
||||
* exception that allows a client to recreate the remote exception.
|
||||
* <p/>
|
||||
* It parses HTTP client connections and recreates the exception.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class HttpExceptionUtils {
|
||||
|
||||
public static final String ERROR_JSON = "RemoteException";
|
||||
public static final String ERROR_EXCEPTION_JSON = "exception";
|
||||
public static final String ERROR_CLASSNAME_JSON = "javaClassName";
|
||||
public static final String ERROR_MESSAGE_JSON = "message";
|
||||
|
||||
private static final String APPLICATION_JSON_MIME = "application/json";
|
||||
|
||||
private static final String ENTER = System.getProperty("line.separator");
|
||||
|
||||
/**
|
||||
* Creates a HTTP servlet response serializing the exception in it as JSON.
|
||||
*
|
||||
* @param response the servlet response
|
||||
* @param status the error code to set in the response
|
||||
* @param ex the exception to serialize in the response
|
||||
* @throws IOException thrown if there was an error while creating the
|
||||
* response
|
||||
*/
|
||||
public static void createServletExceptionResponse(
|
||||
HttpServletResponse response, int status, Throwable ex)
|
||||
throws IOException {
|
||||
response.setStatus(status);
|
||||
response.setContentType(APPLICATION_JSON_MIME);
|
||||
Map<String, Object> json = new LinkedHashMap<String, Object>();
|
||||
json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex));
|
||||
json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName());
|
||||
json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName());
|
||||
Map<String, Object> jsonResponse = new LinkedHashMap<String, Object>();
|
||||
jsonResponse.put(ERROR_JSON, json);
|
||||
ObjectMapper jsonMapper = new ObjectMapper();
|
||||
Writer writer = response.getWriter();
|
||||
jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, jsonResponse);
|
||||
writer.flush();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a HTTP JAX-RPC response serializing the exception in it as JSON.
|
||||
*
|
||||
* @param status the error code to set in the response
|
||||
* @param ex the exception to serialize in the response
|
||||
* @return the JAX-RPC response with the set error and JSON encoded exception
|
||||
*/
|
||||
public static Response createJerseyExceptionResponse(Response.Status status,
|
||||
Throwable ex) {
|
||||
Map<String, Object> json = new LinkedHashMap<String, Object>();
|
||||
json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex));
|
||||
json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName());
|
||||
json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName());
|
||||
Map<String, Object> response = new LinkedHashMap<String, Object>();
|
||||
response.put(ERROR_JSON, json);
|
||||
return Response.status(status).type(MediaType.APPLICATION_JSON).
|
||||
entity(response).build();
|
||||
}
|
||||
|
||||
private static String getOneLineMessage(Throwable exception) {
|
||||
String message = exception.getMessage();
|
||||
if (message != null) {
|
||||
int i = message.indexOf(ENTER);
|
||||
if (i > -1) {
|
||||
message = message.substring(0, i);
|
||||
}
|
||||
}
|
||||
return message;
|
||||
}
|
||||
|
||||
// trick, riding on generics to throw an undeclared exception
|
||||
|
||||
private static void throwEx(Throwable ex) {
|
||||
HttpExceptionUtils.<RuntimeException>throwException(ex);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <E extends Throwable> void throwException(Throwable ex)
|
||||
throws E {
|
||||
throw (E) ex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the status of an <code>HttpURLConnection</code> against an
|
||||
* expected HTTP status code. If the current status code is not the expected
|
||||
* one it throws an exception with a detail message using Server side error
|
||||
* messages if available.
|
||||
* <p/>
|
||||
* <b>NOTE:</b> this method will throw the deserialized exception even if not
|
||||
* declared in the <code>throws</code> of the method signature.
|
||||
*
|
||||
* @param conn the <code>HttpURLConnection</code>.
|
||||
* @param expectedStatus the expected HTTP status code.
|
||||
* @throws IOException thrown if the current status code does not match the
|
||||
* expected one.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static void validateResponse(HttpURLConnection conn,
|
||||
int expectedStatus) throws IOException {
|
||||
if (conn.getResponseCode() != expectedStatus) {
|
||||
Exception toThrow;
|
||||
InputStream es = null;
|
||||
try {
|
||||
es = conn.getErrorStream();
|
||||
ObjectMapper mapper = new ObjectMapper();
|
||||
Map json = mapper.readValue(es, Map.class);
|
||||
json = (Map) json.get(ERROR_JSON);
|
||||
String exClass = (String) json.get(ERROR_CLASSNAME_JSON);
|
||||
String exMsg = (String) json.get(ERROR_MESSAGE_JSON);
|
||||
if (exClass != null) {
|
||||
try {
|
||||
ClassLoader cl = HttpExceptionUtils.class.getClassLoader();
|
||||
Class klass = cl.loadClass(exClass);
|
||||
Constructor constr = klass.getConstructor(String.class);
|
||||
toThrow = (Exception) constr.newInstance(exMsg);
|
||||
} catch (Exception ex) {
|
||||
toThrow = new IOException(String.format(
|
||||
"HTTP status [%d], exception [%s], message [%s] ",
|
||||
conn.getResponseCode(), exClass, exMsg));
|
||||
}
|
||||
} else {
|
||||
String msg = (exMsg != null) ? exMsg : conn.getResponseMessage();
|
||||
toThrow = new IOException(String.format(
|
||||
"HTTP status [%d], message [%s]", conn.getResponseCode(), msg));
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
toThrow = new IOException(String.format(
|
||||
"HTTP status [%d], message [%s]", conn.getResponseCode(),
|
||||
conn.getResponseMessage()));
|
||||
} finally {
|
||||
if (es != null) {
|
||||
try {
|
||||
es.close();
|
||||
} catch (IOException ex) {
|
||||
//ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
throwEx(toThrow);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -52,35 +52,40 @@ public class TestCryptoCodec {
|
|||
private Configuration conf = new Configuration();
|
||||
private int count = 10000;
|
||||
private int seed = new Random().nextInt();
|
||||
private final String jceCodecClass =
|
||||
"org.apache.hadoop.crypto.JceAesCtrCryptoCodec";
|
||||
private final String opensslCodecClass =
|
||||
"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec";
|
||||
|
||||
@Test(timeout=120000)
|
||||
public void testJceAesCtrCryptoCodec() throws Exception {
|
||||
cryptoCodecTest(conf, seed, 0,
|
||||
"org.apache.hadoop.crypto.JceAesCtrCryptoCodec");
|
||||
cryptoCodecTest(conf, seed, count,
|
||||
"org.apache.hadoop.crypto.JceAesCtrCryptoCodec");
|
||||
Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl());
|
||||
Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason());
|
||||
cryptoCodecTest(conf, seed, 0, jceCodecClass, jceCodecClass);
|
||||
cryptoCodecTest(conf, seed, count, jceCodecClass, jceCodecClass);
|
||||
cryptoCodecTest(conf, seed, count, jceCodecClass, opensslCodecClass);
|
||||
}
|
||||
|
||||
@Test(timeout=1200000)
|
||||
@Test(timeout=120000)
|
||||
public void testOpensslAesCtrCryptoCodec() throws Exception {
|
||||
Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl());
|
||||
Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason());
|
||||
cryptoCodecTest(conf, seed, 0,
|
||||
"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec");
|
||||
cryptoCodecTest(conf, seed, count,
|
||||
"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec");
|
||||
cryptoCodecTest(conf, seed, 0, opensslCodecClass, opensslCodecClass);
|
||||
cryptoCodecTest(conf, seed, count, opensslCodecClass, opensslCodecClass);
|
||||
cryptoCodecTest(conf, seed, count, opensslCodecClass, jceCodecClass);
|
||||
}
|
||||
|
||||
private void cryptoCodecTest(Configuration conf, int seed, int count,
|
||||
String codecClass) throws IOException, GeneralSecurityException {
|
||||
CryptoCodec codec = null;
|
||||
String encCodecClass, String decCodecClass) throws IOException,
|
||||
GeneralSecurityException {
|
||||
CryptoCodec encCodec = null;
|
||||
try {
|
||||
codec = (CryptoCodec)ReflectionUtils.newInstance(
|
||||
conf.getClassByName(codecClass), conf);
|
||||
encCodec = (CryptoCodec)ReflectionUtils.newInstance(
|
||||
conf.getClassByName(encCodecClass), conf);
|
||||
} catch (ClassNotFoundException cnfe) {
|
||||
throw new IOException("Illegal crypto codec!");
|
||||
}
|
||||
LOG.info("Created a Codec object of type: " + codecClass);
|
||||
LOG.info("Created a Codec object of type: " + encCodecClass);
|
||||
|
||||
// Generate data
|
||||
DataOutputBuffer data = new DataOutputBuffer();
|
||||
|
@ -98,18 +103,27 @@ public class TestCryptoCodec {
|
|||
// Encrypt data
|
||||
DataOutputBuffer encryptedDataBuffer = new DataOutputBuffer();
|
||||
CryptoOutputStream out = new CryptoOutputStream(encryptedDataBuffer,
|
||||
codec, bufferSize, key, iv);
|
||||
encCodec, bufferSize, key, iv);
|
||||
out.write(data.getData(), 0, data.getLength());
|
||||
out.flush();
|
||||
out.close();
|
||||
LOG.info("Finished encrypting data");
|
||||
|
||||
CryptoCodec decCodec = null;
|
||||
try {
|
||||
decCodec = (CryptoCodec)ReflectionUtils.newInstance(
|
||||
conf.getClassByName(decCodecClass), conf);
|
||||
} catch (ClassNotFoundException cnfe) {
|
||||
throw new IOException("Illegal crypto codec!");
|
||||
}
|
||||
LOG.info("Created a Codec object of type: " + decCodecClass);
|
||||
|
||||
// Decrypt data
|
||||
DataInputBuffer decryptedDataBuffer = new DataInputBuffer();
|
||||
decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0,
|
||||
encryptedDataBuffer.getLength());
|
||||
CryptoInputStream in = new CryptoInputStream(decryptedDataBuffer,
|
||||
codec, bufferSize, key, iv);
|
||||
decCodec, bufferSize, key, iv);
|
||||
DataInputStream dataIn = new DataInputStream(new BufferedInputStream(in));
|
||||
|
||||
// Check
|
||||
|
@ -146,7 +160,7 @@ public class TestCryptoCodec {
|
|||
decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0,
|
||||
encryptedDataBuffer.getLength());
|
||||
in = new CryptoInputStream(decryptedDataBuffer,
|
||||
codec, bufferSize, key, iv);
|
||||
decCodec, bufferSize, key, iv);
|
||||
|
||||
// Check
|
||||
originalIn = new DataInputStream(new BufferedInputStream(originalData));
|
||||
|
@ -156,11 +170,30 @@ public class TestCryptoCodec {
|
|||
assertEquals("Decrypted stream read by byte does not match",
|
||||
expected, in.read());
|
||||
} while (expected != -1);
|
||||
|
||||
// Seek to a certain position and decrypt
|
||||
originalData.reset(data.getData(), 0, data.getLength());
|
||||
decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0,
|
||||
encryptedDataBuffer.getLength());
|
||||
in = new CryptoInputStream(new TestCryptoStreams.FakeInputStream(
|
||||
decryptedDataBuffer), decCodec, bufferSize, key, iv);
|
||||
int seekPos = data.getLength() / 3;
|
||||
in.seek(seekPos);
|
||||
|
||||
// Check
|
||||
TestCryptoStreams.FakeInputStream originalInput =
|
||||
new TestCryptoStreams.FakeInputStream(originalData);
|
||||
originalInput.seek(seekPos);
|
||||
do {
|
||||
expected = originalInput.read();
|
||||
assertEquals("Decrypted stream read by byte does not match",
|
||||
expected, in.read());
|
||||
} while (expected != -1);
|
||||
|
||||
LOG.info("SUCCESS! Completed checking " + count + " records");
|
||||
|
||||
// Check secure random generator
|
||||
testSecureRandom(codec);
|
||||
testSecureRandom(encCodec);
|
||||
}
|
||||
|
||||
/** Test secure random generator */
|
||||
|
|
|
@ -159,7 +159,7 @@ public class TestCryptoStreams extends CryptoStreamsTestBase {
|
|||
}
|
||||
}
|
||||
|
||||
private class FakeInputStream extends InputStream implements
|
||||
public static class FakeInputStream extends InputStream implements
|
||||
Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor,
|
||||
CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess {
|
||||
private final byte[] oneByteBuf = new byte[1];
|
||||
|
|
|
@ -357,4 +357,17 @@ public class TestKeyProviderFactory {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetProviderViaURI() throws Exception {
|
||||
Configuration conf = new Configuration(false);
|
||||
URI uri = new URI(JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir +
|
||||
"/test.jks");
|
||||
KeyProvider kp = KeyProviderFactory.get(uri, conf);
|
||||
Assert.assertNotNull(kp);
|
||||
Assert.assertEquals(JavaKeyStoreProvider.class, kp.getClass());
|
||||
uri = new URI("foo://bar");
|
||||
kp = KeyProviderFactory.get(uri, conf);
|
||||
Assert.assertNull(kp);
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.util.Arrays;
|
|||
|
||||
import org.apache.commons.lang.SystemUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -136,4 +137,18 @@ public class TestOsSecureRandom {
|
|||
}
|
||||
random.close();
|
||||
}
|
||||
|
||||
@Test(timeout=120000)
|
||||
public void testOsSecureRandomSetConf() throws IOException {
|
||||
Assume.assumeTrue(SystemUtils.IS_OS_LINUX);
|
||||
OsSecureRandom random = new OsSecureRandom();
|
||||
for(int n = 0; n < 10; ++n) {
|
||||
random.setConf(new Configuration());
|
||||
String[] scmd = new String[] {"/bin/sh", "-c", "lsof | wc -l"};
|
||||
ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
|
||||
sce.execute();
|
||||
System.out.println("==lsof result " + n + ":");
|
||||
System.out.println(sce.getOutput());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
|
|||
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
|
||||
import org.apache.hadoop.security.token.SecretManager;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
|
@ -224,7 +225,8 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
|
|||
Mockito.when(request.getQueryString()).thenReturn(
|
||||
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() + "&" +
|
||||
DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
|
||||
token.encodeToUrlString());
|
||||
token.encodeToUrlString()
|
||||
);
|
||||
Assert.assertFalse(handler.managementOperation(null, request, response));
|
||||
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
|
||||
try {
|
||||
|
@ -273,8 +275,8 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
|
|||
UserGroupInformation.getCurrentUser(), "user");
|
||||
Mockito.when(request.getQueryString()).
|
||||
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
|
||||
"&" + DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
|
||||
dToken.encodeToUrlString());
|
||||
"&" + DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
|
||||
dToken.encodeToUrlString());
|
||||
Assert.assertFalse(handler.managementOperation(token, request, response));
|
||||
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
|
||||
pwriter.close();
|
||||
|
@ -333,15 +335,11 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
|
|||
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
||||
Mockito.when(request.getQueryString()).thenReturn(
|
||||
DelegationTokenAuthenticator.DELEGATION_PARAM + "=invalid");
|
||||
|
||||
try {
|
||||
handler.authenticate(request, response);
|
||||
Assert.fail();
|
||||
} catch (AuthenticationException ex) {
|
||||
//NOP
|
||||
} catch (Exception ex) {
|
||||
Assert.fail();
|
||||
}
|
||||
StringWriter writer = new StringWriter();
|
||||
Mockito.when(response.getWriter()).thenReturn(new PrintWriter(writer));
|
||||
Assert.assertNull(handler.authenticate(request, response));
|
||||
Mockito.verify(response).setStatus(HttpServletResponse.SC_FORBIDDEN);
|
||||
Assert.assertTrue(writer.toString().contains("AuthenticationException"));
|
||||
}
|
||||
|
||||
private void testInvalidDelegationTokenHeader() throws Exception {
|
||||
|
@ -350,15 +348,10 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
|
|||
Mockito.when(request.getHeader(Mockito.eq(
|
||||
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
|
||||
"invalid");
|
||||
|
||||
try {
|
||||
handler.authenticate(request, response);
|
||||
Assert.fail();
|
||||
} catch (AuthenticationException ex) {
|
||||
//NOP
|
||||
} catch (Exception ex) {
|
||||
Assert.fail();
|
||||
}
|
||||
StringWriter writer = new StringWriter();
|
||||
Mockito.when(response.getWriter()).thenReturn(new PrintWriter(writer));
|
||||
Assert.assertNull(handler.authenticate(request, response));
|
||||
Assert.assertTrue(writer.toString().contains("AuthenticationException"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.Response;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class TestHttpExceptionUtils {
|
||||
|
||||
@Test
|
||||
public void testCreateServletException() throws IOException {
|
||||
StringWriter writer = new StringWriter();
|
||||
PrintWriter printWriter = new PrintWriter(writer);
|
||||
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
||||
Mockito.when(response.getWriter()).thenReturn(printWriter);
|
||||
int status = HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
|
||||
Exception ex = new IOException("Hello IOEX");
|
||||
HttpExceptionUtils.createServletExceptionResponse(response, status, ex);
|
||||
Mockito.verify(response).setStatus(status);
|
||||
Mockito.verify(response).setContentType(Mockito.eq("application/json"));
|
||||
ObjectMapper mapper = new ObjectMapper();
|
||||
Map json = mapper.readValue(writer.toString(), Map.class);
|
||||
json = (Map) json.get(HttpExceptionUtils.ERROR_JSON);
|
||||
Assert.assertEquals(IOException.class.getName(),
|
||||
json.get(HttpExceptionUtils.ERROR_CLASSNAME_JSON));
|
||||
Assert.assertEquals(IOException.class.getSimpleName(),
|
||||
json.get(HttpExceptionUtils.ERROR_EXCEPTION_JSON));
|
||||
Assert.assertEquals("Hello IOEX",
|
||||
json.get(HttpExceptionUtils.ERROR_MESSAGE_JSON));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateJerseyException() throws IOException {
|
||||
Exception ex = new IOException("Hello IOEX");
|
||||
Response response = HttpExceptionUtils.createJerseyExceptionResponse(
|
||||
Response.Status.INTERNAL_SERVER_ERROR, ex);
|
||||
Assert.assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(),
|
||||
response.getStatus());
|
||||
Assert.assertArrayEquals(
|
||||
Arrays.asList(MediaType.APPLICATION_JSON_TYPE).toArray(),
|
||||
response.getMetadata().get("Content-Type").toArray());
|
||||
Map entity = (Map) response.getEntity();
|
||||
entity = (Map) entity.get(HttpExceptionUtils.ERROR_JSON);
|
||||
Assert.assertEquals(IOException.class.getName(),
|
||||
entity.get(HttpExceptionUtils.ERROR_CLASSNAME_JSON));
|
||||
Assert.assertEquals(IOException.class.getSimpleName(),
|
||||
entity.get(HttpExceptionUtils.ERROR_EXCEPTION_JSON));
|
||||
Assert.assertEquals("Hello IOEX",
|
||||
entity.get(HttpExceptionUtils.ERROR_MESSAGE_JSON));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateResponseOK() throws IOException {
|
||||
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
|
||||
Mockito.when(conn.getResponseCode()).thenReturn(
|
||||
HttpURLConnection.HTTP_CREATED);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
|
||||
}
|
||||
|
||||
@Test(expected = IOException.class)
|
||||
public void testValidateResponseFailNoErrorMessage() throws IOException {
|
||||
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
|
||||
Mockito.when(conn.getResponseCode()).thenReturn(
|
||||
HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateResponseNonJsonErrorMessage() throws IOException {
|
||||
String msg = "stream";
|
||||
InputStream is = new ByteArrayInputStream(msg.getBytes());
|
||||
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
|
||||
Mockito.when(conn.getErrorStream()).thenReturn(is);
|
||||
Mockito.when(conn.getResponseMessage()).thenReturn("msg");
|
||||
Mockito.when(conn.getResponseCode()).thenReturn(
|
||||
HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
try {
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
|
||||
Assert.fail();
|
||||
} catch (IOException ex) {
|
||||
Assert.assertTrue(ex.getMessage().contains("msg"));
|
||||
Assert.assertTrue(ex.getMessage().contains("" +
|
||||
HttpURLConnection.HTTP_BAD_REQUEST));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateResponseJsonErrorKnownException() throws IOException {
|
||||
Map<String, Object> json = new HashMap<String, Object>();
|
||||
json.put(HttpExceptionUtils.ERROR_EXCEPTION_JSON, IllegalStateException.class.getSimpleName());
|
||||
json.put(HttpExceptionUtils.ERROR_CLASSNAME_JSON, IllegalStateException.class.getName());
|
||||
json.put(HttpExceptionUtils.ERROR_MESSAGE_JSON, "EX");
|
||||
Map<String, Object> response = new HashMap<String, Object>();
|
||||
response.put(HttpExceptionUtils.ERROR_JSON, json);
|
||||
ObjectMapper jsonMapper = new ObjectMapper();
|
||||
String msg = jsonMapper.writeValueAsString(response);
|
||||
InputStream is = new ByteArrayInputStream(msg.getBytes());
|
||||
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
|
||||
Mockito.when(conn.getErrorStream()).thenReturn(is);
|
||||
Mockito.when(conn.getResponseMessage()).thenReturn("msg");
|
||||
Mockito.when(conn.getResponseCode()).thenReturn(
|
||||
HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
try {
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
|
||||
Assert.fail();
|
||||
} catch (IllegalStateException ex) {
|
||||
Assert.assertEquals("EX", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testValidateResponseJsonErrorUnknownException()
|
||||
throws IOException {
|
||||
Map<String, Object> json = new HashMap<String, Object>();
|
||||
json.put(HttpExceptionUtils.ERROR_EXCEPTION_JSON, "FooException");
|
||||
json.put(HttpExceptionUtils.ERROR_CLASSNAME_JSON, "foo.FooException");
|
||||
json.put(HttpExceptionUtils.ERROR_MESSAGE_JSON, "EX");
|
||||
Map<String, Object> response = new HashMap<String, Object>();
|
||||
response.put(HttpExceptionUtils.ERROR_JSON, json);
|
||||
ObjectMapper jsonMapper = new ObjectMapper();
|
||||
String msg = jsonMapper.writeValueAsString(response);
|
||||
InputStream is = new ByteArrayInputStream(msg.getBytes());
|
||||
HttpURLConnection conn = Mockito.mock(HttpURLConnection.class);
|
||||
Mockito.when(conn.getErrorStream()).thenReturn(is);
|
||||
Mockito.when(conn.getResponseMessage()).thenReturn("msg");
|
||||
Mockito.when(conn.getResponseCode()).thenReturn(
|
||||
HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
try {
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_CREATED);
|
||||
Assert.fail();
|
||||
} catch (IOException ex) {
|
||||
Assert.assertTrue(ex.getMessage().contains("EX"));
|
||||
Assert.assertTrue(ex.getMessage().contains("foo.FooException"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -243,6 +243,7 @@
|
|||
<goal>war</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<archiveClasses>true</archiveClasses>
|
||||
<warName>kms</warName>
|
||||
<webappDirectory>${project.build.directory}/kms
|
||||
</webappDirectory>
|
||||
|
|
|
@ -21,22 +21,19 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
|
||||
import com.sun.jersey.api.container.ContainerException;
|
||||
|
||||
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.ext.ExceptionMapper;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Jersey provider that converts KMS exceptions into detailed HTTP errors.
|
||||
|
@ -50,12 +47,7 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
|
|||
private static final String ENTER = System.getProperty("line.separator");
|
||||
|
||||
protected Response createResponse(Response.Status status, Throwable ex) {
|
||||
Map<String, Object> json = new LinkedHashMap<String, Object>();
|
||||
json.put(KMSRESTConstants.ERROR_EXCEPTION_JSON, ex.getClass().getName());
|
||||
json.put(KMSRESTConstants.ERROR_MESSAGE_JSON, getOneLineMessage(ex));
|
||||
log(status, ex);
|
||||
return Response.status(status).type(MediaType.APPLICATION_JSON).
|
||||
entity(json).build();
|
||||
return HttpExceptionUtils.createJerseyExceptionResponse(status, ex);
|
||||
}
|
||||
|
||||
protected String getOneLineMessage(Throwable exception) {
|
||||
|
|
|
@ -40,13 +40,12 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.lib.wsrs.EnumSetParam;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
||||
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
|
||||
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
|
||||
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
|
||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -179,11 +178,6 @@ public class HttpFSFileSystem extends FileSystem
|
|||
public static final String ACL_ENTRIES_JSON = "entries";
|
||||
public static final String ACL_BIT_JSON = "aclBit";
|
||||
|
||||
public static final String ERROR_JSON = "RemoteException";
|
||||
public static final String ERROR_EXCEPTION_JSON = "exception";
|
||||
public static final String ERROR_CLASSNAME_JSON = "javaClassName";
|
||||
public static final String ERROR_MESSAGE_JSON = "message";
|
||||
|
||||
public static final int HTTP_TEMPORARY_REDIRECT = 307;
|
||||
|
||||
private static final String HTTP_GET = "GET";
|
||||
|
@ -223,7 +217,6 @@ public class HttpFSFileSystem extends FileSystem
|
|||
private URI uri;
|
||||
private Path workingDir;
|
||||
private UserGroupInformation realUser;
|
||||
private String doAs;
|
||||
|
||||
|
||||
|
||||
|
@ -336,7 +329,6 @@ public class HttpFSFileSystem extends FileSystem
|
|||
if (realUser == null) {
|
||||
realUser = UserGroupInformation.getLoginUser();
|
||||
}
|
||||
doAs = ugi.getShortUserName();
|
||||
super.initialize(name, conf);
|
||||
try {
|
||||
uri = new URI(name.getScheme() + "://" + name.getAuthority());
|
||||
|
@ -435,7 +427,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.OPEN.toString());
|
||||
HttpURLConnection conn = getConnection(Operation.OPEN.getMethod(), params,
|
||||
f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
return new FSDataInputStream(
|
||||
new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
|
||||
}
|
||||
|
@ -462,7 +454,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
try {
|
||||
super.close();
|
||||
} finally {
|
||||
HttpFSUtils.validateResponse(conn, closeStatus);
|
||||
HttpExceptionUtils.validateResponse(conn, closeStatus);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -498,11 +490,11 @@ public class HttpFSFileSystem extends FileSystem
|
|||
OutputStream os = new BufferedOutputStream(conn.getOutputStream(), bufferSize);
|
||||
return new HttpFSDataOutputStream(conn, os, expectedStatus, statistics);
|
||||
} catch (IOException ex) {
|
||||
HttpFSUtils.validateResponse(conn, expectedStatus);
|
||||
HttpExceptionUtils.validateResponse(conn, expectedStatus);
|
||||
throw ex;
|
||||
}
|
||||
} else {
|
||||
HttpFSUtils.validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
|
||||
HttpExceptionUtils.validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
|
||||
throw new IOException("Missing HTTP 'Location' header for [" + conn.getURL() + "]");
|
||||
}
|
||||
} else {
|
||||
|
@ -514,7 +506,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
if (exceptionAlreadyHandled) {
|
||||
throw ex;
|
||||
} else {
|
||||
HttpFSUtils.validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
|
||||
HttpExceptionUtils.validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
@ -595,7 +587,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(SOURCES_PARAM, srcs);
|
||||
HttpURLConnection conn = getConnection(Operation.CONCAT.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -609,7 +601,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(DESTINATION_PARAM, dst.toString());
|
||||
HttpURLConnection conn = getConnection(Operation.RENAME.getMethod(),
|
||||
params, src, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return (Boolean) json.get(RENAME_JSON);
|
||||
}
|
||||
|
@ -644,7 +636,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
|
||||
HttpURLConnection conn = getConnection(Operation.DELETE.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return (Boolean) json.get(DELETE_JSON);
|
||||
}
|
||||
|
@ -665,7 +657,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.LISTSTATUS.toString());
|
||||
HttpURLConnection conn = getConnection(Operation.LISTSTATUS.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
json = (JSONObject) json.get(FILE_STATUSES_JSON);
|
||||
JSONArray jsonArray = (JSONArray) json.get(FILE_STATUS_JSON);
|
||||
|
@ -713,7 +705,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(PERMISSION_PARAM, permissionToString(permission));
|
||||
HttpURLConnection conn = getConnection(Operation.MKDIRS.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return (Boolean) json.get(MKDIRS_JSON);
|
||||
}
|
||||
|
@ -734,7 +726,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.GETFILESTATUS.toString());
|
||||
HttpURLConnection conn = getConnection(Operation.GETFILESTATUS.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
json = (JSONObject) json.get(FILE_STATUS_JSON);
|
||||
f = makeQualified(f);
|
||||
|
@ -753,7 +745,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
HttpURLConnection conn =
|
||||
getConnection(Operation.GETHOMEDIRECTORY.getMethod(), params,
|
||||
new Path(getUri().toString(), "/"), false);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return new Path((String) json.get(HOME_DIR_JSON));
|
||||
} catch (IOException ex) {
|
||||
|
@ -778,7 +770,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(GROUP_PARAM, groupname);
|
||||
HttpURLConnection conn = getConnection(Operation.SETOWNER.getMethod(),
|
||||
params, p, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -793,7 +785,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.SETPERMISSION.toString());
|
||||
params.put(PERMISSION_PARAM, permissionToString(permission));
|
||||
HttpURLConnection conn = getConnection(Operation.SETPERMISSION.getMethod(), params, p, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -815,7 +807,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(ACCESS_TIME_PARAM, Long.toString(atime));
|
||||
HttpURLConnection conn = getConnection(Operation.SETTIMES.getMethod(),
|
||||
params, p, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -837,7 +829,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(REPLICATION_PARAM, Short.toString(replication));
|
||||
HttpURLConnection conn =
|
||||
getConnection(Operation.SETREPLICATION.getMethod(), params, src, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return (Boolean) json.get(SET_REPLICATION_JSON);
|
||||
}
|
||||
|
@ -857,7 +849,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
|
||||
HttpURLConnection conn = getConnection(
|
||||
Operation.MODIFYACLENTRIES.getMethod(), params, path, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -874,7 +866,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
|
||||
HttpURLConnection conn = getConnection(
|
||||
Operation.REMOVEACLENTRIES.getMethod(), params, path, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -888,7 +880,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.REMOVEDEFAULTACL.toString());
|
||||
HttpURLConnection conn = getConnection(
|
||||
Operation.REMOVEDEFAULTACL.getMethod(), params, path, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -902,7 +894,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.REMOVEACL.toString());
|
||||
HttpURLConnection conn = getConnection(Operation.REMOVEACL.getMethod(),
|
||||
params, path, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -920,7 +912,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
|
||||
HttpURLConnection conn = getConnection(Operation.SETACL.getMethod(),
|
||||
params, path, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -935,7 +927,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.GETACLSTATUS.toString());
|
||||
HttpURLConnection conn = getConnection(Operation.GETACLSTATUS.getMethod(),
|
||||
params, path, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
json = (JSONObject) json.get(ACL_STATUS_JSON);
|
||||
return createAclStatus(json);
|
||||
|
@ -996,7 +988,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
|
||||
HttpURLConnection conn =
|
||||
getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) ((JSONObject)
|
||||
HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
|
||||
return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
|
||||
|
@ -1014,7 +1006,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString());
|
||||
HttpURLConnection conn =
|
||||
getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
final JSONObject json = (JSONObject) ((JSONObject)
|
||||
HttpFSUtils.jsonParse(conn)).get(FILE_CHECKSUM_JSON);
|
||||
return new FileChecksum() {
|
||||
|
@ -1115,7 +1107,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(XATTR_SET_FLAG_PARAM, EnumSetParam.toString(flag));
|
||||
HttpURLConnection conn = getConnection(Operation.SETXATTR.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1125,7 +1117,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(XATTR_NAME_PARAM, name);
|
||||
HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
Map<String, byte[]> xAttrs = createXAttrMap(
|
||||
(JSONArray) json.get(XATTRS_JSON));
|
||||
|
@ -1169,7 +1161,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.GETXATTRS.toString());
|
||||
HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return createXAttrMap((JSONArray) json.get(XATTRS_JSON));
|
||||
}
|
||||
|
@ -1185,7 +1177,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
multiValuedParams.put(XATTR_NAME_PARAM, names);
|
||||
HttpURLConnection conn = getConnection(Operation.GETXATTRS.getMethod(),
|
||||
params, multiValuedParams, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return createXAttrMap((JSONArray) json.get(XATTRS_JSON));
|
||||
}
|
||||
|
@ -1196,7 +1188,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(OP_PARAM, Operation.LISTXATTRS.toString());
|
||||
HttpURLConnection conn = getConnection(Operation.LISTXATTRS.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
return createXAttrNames((String) json.get(XATTRNAMES_JSON));
|
||||
}
|
||||
|
@ -1208,6 +1200,6 @@ public class HttpFSFileSystem extends FileSystem
|
|||
params.put(XATTR_NAME_PARAM, name);
|
||||
HttpURLConnection conn = getConnection(Operation.REMOVEXATTR.getMethod(),
|
||||
params, f, true);
|
||||
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,13 +19,11 @@ package org.apache.hadoop.fs.http.client;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.json.simple.JSONObject;
|
||||
import org.json.simple.parser.JSONParser;
|
||||
import org.json.simple.parser.ParseException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
|
@ -115,54 +113,6 @@ public class HttpFSUtils {
|
|||
return new URL(sb.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the status of an <code>HttpURLConnection</code> against an
|
||||
* expected HTTP status code. If the current status code is not the expected
|
||||
* one it throws an exception with a detail message using Server side error
|
||||
* messages if available.
|
||||
*
|
||||
* @param conn the <code>HttpURLConnection</code>.
|
||||
* @param expected the expected HTTP status code.
|
||||
*
|
||||
* @throws IOException thrown if the current status code does not match the
|
||||
* expected one.
|
||||
*/
|
||||
@SuppressWarnings({"unchecked"})
|
||||
static void validateResponse(HttpURLConnection conn, int expected)
|
||||
throws IOException {
|
||||
int status = conn.getResponseCode();
|
||||
if (status != expected) {
|
||||
try {
|
||||
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||
json = (JSONObject) json.get(HttpFSFileSystem.ERROR_JSON);
|
||||
String message = (String) json.get(HttpFSFileSystem.ERROR_MESSAGE_JSON);
|
||||
String exception = (String)
|
||||
json.get(HttpFSFileSystem.ERROR_EXCEPTION_JSON);
|
||||
String className = (String)
|
||||
json.get(HttpFSFileSystem.ERROR_CLASSNAME_JSON);
|
||||
|
||||
try {
|
||||
ClassLoader cl = HttpFSFileSystem.class.getClassLoader();
|
||||
Class klass = cl.loadClass(className);
|
||||
Constructor constr = klass.getConstructor(String.class);
|
||||
throw (IOException) constr.newInstance(message);
|
||||
} catch (IOException ex) {
|
||||
throw ex;
|
||||
} catch (Exception ex) {
|
||||
throw new IOException(MessageFormat.format("{0} - {1}", exception,
|
||||
message));
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
if (ex.getCause() instanceof IOException) {
|
||||
throw (IOException) ex.getCause();
|
||||
}
|
||||
throw new IOException(
|
||||
MessageFormat.format("HTTP status [{0}], {1}",
|
||||
status, conn.getResponseMessage()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method that JSON Parses the <code>InputStream</code> of a
|
||||
* <code>HttpURLConnection</code>.
|
||||
|
|
|
@ -19,15 +19,12 @@
|
|||
package org.apache.hadoop.lib.wsrs;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
|
||||
import org.apache.hadoop.util.HttpExceptionUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.ext.ExceptionMapper;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class ExceptionProvider implements ExceptionMapper<Throwable> {
|
||||
|
@ -36,14 +33,7 @@ public class ExceptionProvider implements ExceptionMapper<Throwable> {
|
|||
private static final String ENTER = System.getProperty("line.separator");
|
||||
|
||||
protected Response createResponse(Response.Status status, Throwable throwable) {
|
||||
Map<String, Object> json = new LinkedHashMap<String, Object>();
|
||||
json.put(HttpFSFileSystem.ERROR_MESSAGE_JSON, getOneLineMessage(throwable));
|
||||
json.put(HttpFSFileSystem.ERROR_EXCEPTION_JSON, throwable.getClass().getSimpleName());
|
||||
json.put(HttpFSFileSystem.ERROR_CLASSNAME_JSON, throwable.getClass().getName());
|
||||
Map<String, Object> response = new LinkedHashMap<String, Object>();
|
||||
response.put(HttpFSFileSystem.ERROR_JSON, json);
|
||||
log(status, throwable);
|
||||
return Response.status(status).type(MediaType.APPLICATION_JSON).entity(response).build();
|
||||
return HttpExceptionUtils.createJerseyExceptionResponse(status, throwable);
|
||||
}
|
||||
|
||||
protected String getOneLineMessage(Throwable throwable) {
|
||||
|
|
|
@ -183,6 +183,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
|
||||
private void testCreate() throws Exception {
|
||||
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
|
||||
FileSystem fs = FileSystem.get(getProxiedFSConf());
|
||||
fs.delete(path, true);
|
||||
testCreate(path, false);
|
||||
testCreate(path, true);
|
||||
try {
|
||||
|
@ -190,7 +192,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
Assert.fail("the create should have failed because the file exists " +
|
||||
"and override is FALSE");
|
||||
} catch (IOException ex) {
|
||||
|
||||
System.out.println("#");
|
||||
} catch (Exception ex) {
|
||||
Assert.fail(ex.toString());
|
||||
}
|
||||
|
|
|
@ -189,9 +189,8 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
|
||||
reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
|
||||
String res = reader.readLine();
|
||||
Assert.assertTrue(res.contains("RemoteException"));
|
||||
Assert.assertTrue(res.contains("ACL"));
|
||||
Assert.assertTrue(res.contains("rejected"));
|
||||
Assert.assertTrue(res.contains("AclException"));
|
||||
Assert.assertTrue(res.contains("Support for ACLs has been disabled"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,9 +223,8 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
BufferedReader reader;
|
||||
reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
|
||||
String err = reader.readLine();
|
||||
Assert.assertTrue(err.contains("RemoteException"));
|
||||
Assert.assertTrue(err.contains("ACL"));
|
||||
Assert.assertTrue(err.contains("rejected"));
|
||||
Assert.assertTrue(err.contains("AclException"));
|
||||
Assert.assertTrue(err.contains("Support for ACLs has been disabled"));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -458,6 +458,15 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-4257. The ReplaceDatanodeOnFailure policies could have a forgiving
|
||||
option (szetszwo via cmccabe)
|
||||
|
||||
HDFS-6959. Make the HDFS home directory location customizable. (yzhang via
|
||||
cmccabe)
|
||||
|
||||
HDFS-6886. Use single editlog record for creating file + overwrite. (Yi Liu
|
||||
via jing9)
|
||||
|
||||
HDFS-6376. Distcp data between two HA clusters requires another configuration.
|
||||
(Dave Marion and Haohui Mai via jing9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
@ -610,6 +619,15 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HDFS-6942. Fix typos in log messages. (Ray Chiang via wheat9)
|
||||
|
||||
HDFS-6848. Lack of synchronization on access to datanodeUuid in
|
||||
DataStorage#format(). (Xiaoyu Yao via Arpit Agarwal)
|
||||
|
||||
HDFS-6996. SnapshotDiff report can hit IndexOutOfBoundsException when there
|
||||
are nested renamed directory/file. (jing9)
|
||||
|
||||
HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'.
|
||||
(Xiaoyu Yao via Arpit Agarwal)
|
||||
|
||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6387. HDFS CLI admin tool for creating & deleting an
|
||||
|
@ -706,6 +724,11 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-2975. Rename with overwrite flag true can make NameNode to stuck in safemode
|
||||
on NN (crash + restart). (Yi Liu via umamahesh)
|
||||
|
||||
HDFS-6905. fs-encryption merge triggered release audit failures. (clamb via tucu)
|
||||
|
||||
HDFS-6714. TestBlocksScheduledCounter#testBlocksScheduledCounter should
|
||||
shutdown cluster (vinayakumarb)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -41,6 +41,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
|
||||
public static final String DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
|
||||
public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
|
||||
public static final String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
|
||||
public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
|
||||
public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.client.retry.policy.enabled";
|
||||
public static final boolean DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false;
|
||||
public static final String DFS_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.client.retry.policy.spec";
|
||||
|
@ -543,6 +545,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
|
||||
public static final String DFS_NAMESERVICES = "dfs.nameservices";
|
||||
public static final String DFS_NAMESERVICE_ID = "dfs.nameservice.id";
|
||||
public static final String DFS_INTERNAL_NAMESERVICES_KEY = "dfs.internal.nameservices";
|
||||
public static final String DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY = "dfs.namenode.resource.check.interval";
|
||||
public static final int DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT = 5000;
|
||||
public static final String DFS_NAMENODE_DU_RESERVED_KEY = "dfs.namenode.resource.du.reserved";
|
||||
|
|
|
@ -60,6 +60,7 @@ import java.util.Set;
|
|||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
import org.apache.commons.cli.Option;
|
||||
|
@ -612,7 +613,7 @@ public class DFSUtil {
|
|||
String keySuffix = concatSuffixes(suffixes);
|
||||
return addSuffix(key, keySuffix);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the configured address for all NameNodes in the cluster.
|
||||
* @param conf configuration
|
||||
|
@ -621,14 +622,25 @@ public class DFSUtil {
|
|||
* @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
|
||||
*/
|
||||
private static Map<String, Map<String, InetSocketAddress>>
|
||||
getAddresses(Configuration conf,
|
||||
String defaultAddress, String... keys) {
|
||||
getAddresses(Configuration conf, String defaultAddress, String... keys) {
|
||||
Collection<String> nameserviceIds = getNameServiceIds(conf);
|
||||
|
||||
return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the configured address for all NameNodes in the cluster.
|
||||
* @param conf configuration
|
||||
* @param nsIds
|
||||
*@param defaultAddress default address to return in case key is not found.
|
||||
* @param keys Set of keys to look for in the order of preference @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
|
||||
*/
|
||||
private static Map<String, Map<String, InetSocketAddress>>
|
||||
getAddressesForNsIds(Configuration conf, Collection<String> nsIds,
|
||||
String defaultAddress, String... keys) {
|
||||
// Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
|
||||
// across all of the configured nameservices and namenodes.
|
||||
Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
|
||||
for (String nsId : emptyAsSingletonNull(nameserviceIds)) {
|
||||
for (String nsId : emptyAsSingletonNull(nsIds)) {
|
||||
Map<String, InetSocketAddress> isas =
|
||||
getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
|
||||
if (!isas.isEmpty()) {
|
||||
|
@ -773,8 +785,7 @@ public class DFSUtil {
|
|||
|
||||
/**
|
||||
* Returns list of InetSocketAddresses corresponding to namenodes from the
|
||||
* configuration. Note this is to be used by datanodes to get the list of
|
||||
* namenode addresses to talk to.
|
||||
* configuration.
|
||||
*
|
||||
* Returns namenode address specifically configured for datanodes (using
|
||||
* service ports), if found. If not, regular RPC address configured for other
|
||||
|
@ -805,7 +816,60 @@ public class DFSUtil {
|
|||
}
|
||||
return addressList;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns list of InetSocketAddresses corresponding to the namenode
|
||||
* that manages this cluster. Note this is to be used by datanodes to get
|
||||
* the list of namenode addresses to talk to.
|
||||
*
|
||||
* Returns namenode address specifically configured for datanodes (using
|
||||
* service ports), if found. If not, regular RPC address configured for other
|
||||
* clients is returned.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @return list of InetSocketAddress
|
||||
* @throws IOException on error
|
||||
*/
|
||||
public static Map<String, Map<String, InetSocketAddress>>
|
||||
getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException {
|
||||
// Use default address as fall back
|
||||
String defaultAddress;
|
||||
try {
|
||||
defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
|
||||
} catch (IllegalArgumentException e) {
|
||||
defaultAddress = null;
|
||||
}
|
||||
|
||||
Collection<String> parentNameServices = conf.getTrimmedStringCollection
|
||||
(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);
|
||||
|
||||
if (parentNameServices.isEmpty()) {
|
||||
parentNameServices = conf.getTrimmedStringCollection
|
||||
(DFSConfigKeys.DFS_NAMESERVICES);
|
||||
} else {
|
||||
// Ensure that the internal service is ineed in the list of all available
|
||||
// nameservices.
|
||||
Set<String> availableNameServices = Sets.newHashSet(conf
|
||||
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES));
|
||||
for (String nsId : parentNameServices) {
|
||||
if (!availableNameServices.contains(nsId)) {
|
||||
throw new IOException("Unknown nameservice: " + nsId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> addressList =
|
||||
getAddressesForNsIds(conf, parentNameServices, defaultAddress,
|
||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||
if (addressList.isEmpty()) {
|
||||
throw new IOException("Incorrect configuration: namenode address "
|
||||
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
|
||||
+ DFS_NAMENODE_RPC_ADDRESS_KEY
|
||||
+ " is not configured.");
|
||||
}
|
||||
return addressList;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flatten the given map, as returned by other functions in this class,
|
||||
* into a flat list of {@link ConfiguredNNAddress} instances.
|
||||
|
|
|
@ -102,6 +102,8 @@ import com.google.common.base.Preconditions;
|
|||
public class DistributedFileSystem extends FileSystem {
|
||||
private Path workingDir;
|
||||
private URI uri;
|
||||
private String homeDirPrefix =
|
||||
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
|
||||
|
||||
DFSClient dfs;
|
||||
private boolean verifyChecksum = true;
|
||||
|
@ -136,7 +138,10 @@ public class DistributedFileSystem extends FileSystem {
|
|||
if (host == null) {
|
||||
throw new IOException("Incomplete HDFS URI, no host: "+ uri);
|
||||
}
|
||||
|
||||
homeDirPrefix = conf.get(
|
||||
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
|
||||
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
|
||||
|
||||
this.dfs = new DFSClient(uri, conf, statistics);
|
||||
this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority());
|
||||
this.workingDir = getHomeDirectory();
|
||||
|
@ -167,10 +172,10 @@ public class DistributedFileSystem extends FileSystem {
|
|||
workingDir = fixRelativePart(dir);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Path getHomeDirectory() {
|
||||
return makeQualified(new Path("/user/" + dfs.ugi.getShortUserName()));
|
||||
return makeQualified(new Path(homeDirPrefix + "/"
|
||||
+ dfs.ugi.getShortUserName()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -100,6 +100,7 @@ public abstract class Event {
|
|||
private String groupName;
|
||||
private FsPermission perms;
|
||||
private String symlinkTarget;
|
||||
private boolean overwrite;
|
||||
|
||||
public static class Builder {
|
||||
private INodeType iNodeType;
|
||||
|
@ -110,6 +111,7 @@ public abstract class Event {
|
|||
private String groupName;
|
||||
private FsPermission perms;
|
||||
private String symlinkTarget;
|
||||
private boolean overwrite;
|
||||
|
||||
public Builder iNodeType(INodeType type) {
|
||||
this.iNodeType = type;
|
||||
|
@ -150,6 +152,11 @@ public abstract class Event {
|
|||
this.symlinkTarget = symlinkTarget;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder overwrite(boolean overwrite) {
|
||||
this.overwrite = overwrite;
|
||||
return this;
|
||||
}
|
||||
|
||||
public CreateEvent build() {
|
||||
return new CreateEvent(this);
|
||||
|
@ -166,6 +173,7 @@ public abstract class Event {
|
|||
this.groupName = b.groupName;
|
||||
this.perms = b.perms;
|
||||
this.symlinkTarget = b.symlinkTarget;
|
||||
this.overwrite = b.overwrite;
|
||||
}
|
||||
|
||||
public INodeType getiNodeType() {
|
||||
|
@ -208,6 +216,10 @@ public abstract class Event {
|
|||
public String getSymlinkTarget() {
|
||||
return symlinkTarget;
|
||||
}
|
||||
|
||||
public boolean getOverwrite() {
|
||||
return overwrite;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2455,7 +2455,8 @@ public class PBHelper {
|
|||
.perms(convert(create.getPerms()))
|
||||
.replication(create.getReplication())
|
||||
.symlinkTarget(create.getSymlinkTarget().isEmpty() ? null :
|
||||
create.getSymlinkTarget()).build());
|
||||
create.getSymlinkTarget())
|
||||
.overwrite(create.getOverwrite()).build());
|
||||
break;
|
||||
case EVENT_METADATA:
|
||||
InotifyProtos.MetadataUpdateEventProto meta =
|
||||
|
@ -2533,7 +2534,8 @@ public class PBHelper {
|
|||
.setPerms(convert(ce2.getPerms()))
|
||||
.setReplication(ce2.getReplication())
|
||||
.setSymlinkTarget(ce2.getSymlinkTarget() == null ?
|
||||
"" : ce2.getSymlinkTarget()).build().toByteString()
|
||||
"" : ce2.getSymlinkTarget())
|
||||
.setOverwrite(ce2.getOverwrite()).build().toByteString()
|
||||
).build());
|
||||
break;
|
||||
case METADATA:
|
||||
|
|
|
@ -149,12 +149,12 @@ class BlockPoolManager {
|
|||
|
||||
void refreshNamenodes(Configuration conf)
|
||||
throws IOException {
|
||||
LOG.info("Refresh request received for nameservices: "
|
||||
+ conf.get(DFSConfigKeys.DFS_NAMESERVICES));
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> newAddressMap =
|
||||
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
|
||||
LOG.info("Refresh request received for nameservices: " + conf.get
|
||||
(DFSConfigKeys.DFS_NAMESERVICES));
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> newAddressMap = DFSUtil
|
||||
.getNNServiceRpcAddressesForCluster(conf);
|
||||
|
||||
synchronized (refreshNamenodesLock) {
|
||||
doRefreshNamenodes(newAddressMap);
|
||||
}
|
||||
|
|
|
@ -463,7 +463,7 @@ public class DataStorage extends Storage {
|
|||
this.clusterID = nsInfo.getClusterID();
|
||||
this.namespaceID = nsInfo.getNamespaceID();
|
||||
this.cTime = 0;
|
||||
this.datanodeUuid = datanodeUuid;
|
||||
setDatanodeUuid(datanodeUuid);
|
||||
|
||||
if (sd.getStorageUuid() == null) {
|
||||
// Assign a new Storage UUID.
|
||||
|
|
|
@ -433,7 +433,8 @@ public class FSDirectory implements Closeable {
|
|||
/**
|
||||
* @throws SnapshotAccessControlException
|
||||
* @see #unprotectedRenameTo(String, String, long)
|
||||
* @deprecated Use {@link #renameTo(String, String, boolean, Rename...)}
|
||||
* @deprecated Use {@link #renameTo(String, String, long,
|
||||
* BlocksMapUpdateInfo, Rename...)}
|
||||
*/
|
||||
@Deprecated
|
||||
boolean renameTo(String src, String dst, long mtime)
|
||||
|
@ -484,7 +485,7 @@ public class FSDirectory implements Closeable {
|
|||
* @throws QuotaExceededException if the operation violates any quota limit
|
||||
* @throws FileAlreadyExistsException if the src is a symlink that points to dst
|
||||
* @throws SnapshotAccessControlException if path is in RO snapshot
|
||||
* @deprecated See {@link #renameTo(String, String, boolean, Rename...)}
|
||||
* @deprecated See {@link #renameTo(String, String, long, BlocksMapUpdateInfo, Rename...)}
|
||||
*/
|
||||
@Deprecated
|
||||
boolean unprotectedRenameTo(String src, String dst, long timestamp)
|
||||
|
|
|
@ -703,7 +703,8 @@ public class FSEditLog implements LogsPurgeable {
|
|||
* Add open lease record to edit log.
|
||||
* Records the block locations of the last block.
|
||||
*/
|
||||
public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) {
|
||||
public void logOpenFile(String path, INodeFile newNode, boolean overwrite,
|
||||
boolean toLogRpcIds) {
|
||||
Preconditions.checkArgument(newNode.isUnderConstruction());
|
||||
PermissionStatus permissions = newNode.getPermissionStatus();
|
||||
AddOp op = AddOp.getInstance(cache.get())
|
||||
|
@ -717,7 +718,8 @@ public class FSEditLog implements LogsPurgeable {
|
|||
.setPermissionStatus(permissions)
|
||||
.setClientName(newNode.getFileUnderConstructionFeature().getClientName())
|
||||
.setClientMachine(
|
||||
newNode.getFileUnderConstructionFeature().getClientMachine());
|
||||
newNode.getFileUnderConstructionFeature().getClientMachine())
|
||||
.setOverwrite(overwrite);
|
||||
|
||||
AclFeature f = newNode.getAclFeature();
|
||||
if (f != null) {
|
||||
|
|
|
@ -342,8 +342,12 @@ public class FSEditLogLoader {
|
|||
|
||||
// See if the file already exists (persistBlocks call)
|
||||
final INodesInPath iip = fsDir.getLastINodeInPath(path);
|
||||
final INodeFile oldFile = INodeFile.valueOf(
|
||||
iip.getINode(0), path, true);
|
||||
INodeFile oldFile = INodeFile.valueOf(iip.getINode(0), path, true);
|
||||
if (oldFile != null && addCloseOp.overwrite) {
|
||||
// This is OP_ADD with overwrite
|
||||
fsDir.unprotectedDelete(path, addCloseOp.mtime);
|
||||
oldFile = null;
|
||||
}
|
||||
INodeFile newFile = oldFile;
|
||||
if (oldFile == null) { // this is OP_ADD on a new file (case 1)
|
||||
// versions > 0 support per file replication
|
||||
|
|
|
@ -409,6 +409,7 @@ public abstract class FSEditLogOp {
|
|||
List<XAttr> xAttrs;
|
||||
String clientName;
|
||||
String clientMachine;
|
||||
boolean overwrite;
|
||||
|
||||
private AddCloseOp(FSEditLogOpCodes opCode) {
|
||||
super(opCode);
|
||||
|
@ -488,6 +489,11 @@ public abstract class FSEditLogOp {
|
|||
this.clientMachine = clientMachine;
|
||||
return (T)this;
|
||||
}
|
||||
|
||||
<T extends AddCloseOp> T setOverwrite(boolean overwrite) {
|
||||
this.overwrite = overwrite;
|
||||
return (T)this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFields(DataOutputStream out) throws IOException {
|
||||
|
@ -507,6 +513,7 @@ public abstract class FSEditLogOp {
|
|||
b.build().writeDelimitedTo(out);
|
||||
FSImageSerialization.writeString(clientName,out);
|
||||
FSImageSerialization.writeString(clientMachine,out);
|
||||
FSImageSerialization.writeBoolean(overwrite, out);
|
||||
// write clientId and callId
|
||||
writeRpcIds(rpcClientId, rpcCallId, out);
|
||||
}
|
||||
|
@ -572,6 +579,12 @@ public abstract class FSEditLogOp {
|
|||
this.xAttrs = readXAttrsFromEditLog(in, logVersion);
|
||||
this.clientName = FSImageSerialization.readString(in);
|
||||
this.clientMachine = FSImageSerialization.readString(in);
|
||||
if (NameNodeLayoutVersion.supports(
|
||||
NameNodeLayoutVersion.Feature.CREATE_OVERWRITE, logVersion)) {
|
||||
this.overwrite = FSImageSerialization.readBoolean(in);
|
||||
} else {
|
||||
this.overwrite = false;
|
||||
}
|
||||
// read clientId and callId
|
||||
readRpcIds(in, logVersion);
|
||||
} else {
|
||||
|
@ -627,6 +640,8 @@ public abstract class FSEditLogOp {
|
|||
builder.append(clientName);
|
||||
builder.append(", clientMachine=");
|
||||
builder.append(clientMachine);
|
||||
builder.append(", overwrite=");
|
||||
builder.append(overwrite);
|
||||
if (this.opCode == OP_ADD) {
|
||||
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
|
||||
}
|
||||
|
@ -655,6 +670,8 @@ public abstract class FSEditLogOp {
|
|||
Long.toString(blockSize));
|
||||
XMLUtils.addSaxString(contentHandler, "CLIENT_NAME", clientName);
|
||||
XMLUtils.addSaxString(contentHandler, "CLIENT_MACHINE", clientMachine);
|
||||
XMLUtils.addSaxString(contentHandler, "OVERWRITE",
|
||||
Boolean.toString(overwrite));
|
||||
for (Block b : blocks) {
|
||||
FSEditLogOp.blockToXml(contentHandler, b);
|
||||
}
|
||||
|
@ -678,6 +695,7 @@ public abstract class FSEditLogOp {
|
|||
this.blockSize = Long.parseLong(st.getValue("BLOCKSIZE"));
|
||||
this.clientName = st.getValue("CLIENT_NAME");
|
||||
this.clientMachine = st.getValue("CLIENT_MACHINE");
|
||||
this.overwrite = Boolean.parseBoolean(st.getValueOrNull("OVERWRITE"));
|
||||
if (st.hasChildren("BLOCK")) {
|
||||
List<Stanza> blocks = st.getChildren("BLOCK");
|
||||
this.blocks = new Block[blocks.size()];
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.Ref
|
|||
import org.apache.hadoop.hdfs.util.XMLUtils;
|
||||
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
|
||||
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
|
||||
import org.apache.hadoop.io.BooleanWritable;
|
||||
import org.apache.hadoop.io.IntWritable;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
import org.apache.hadoop.io.ShortWritable;
|
||||
|
@ -88,6 +89,7 @@ public class FSImageSerialization {
|
|||
final IntWritable U_INT = new IntWritable();
|
||||
final LongWritable U_LONG = new LongWritable();
|
||||
final FsPermission FILE_PERM = new FsPermission((short) 0);
|
||||
final BooleanWritable U_BOOLEAN = new BooleanWritable();
|
||||
}
|
||||
|
||||
private static void writePermissionStatus(INodeAttributes inode,
|
||||
|
@ -366,6 +368,21 @@ public class FSImageSerialization {
|
|||
uLong.write(out);
|
||||
}
|
||||
|
||||
/** read the boolean value */
|
||||
static boolean readBoolean(DataInput in) throws IOException {
|
||||
BooleanWritable uBoolean = TL_DATA.get().U_BOOLEAN;
|
||||
uBoolean.readFields(in);
|
||||
return uBoolean.get();
|
||||
}
|
||||
|
||||
/** write the boolean value */
|
||||
static void writeBoolean(boolean value, DataOutputStream out)
|
||||
throws IOException {
|
||||
BooleanWritable uBoolean = TL_DATA.get().U_BOOLEAN;
|
||||
uBoolean.set(value);
|
||||
uBoolean.write(out);
|
||||
}
|
||||
|
||||
/** read the int value */
|
||||
static int readInt(DataInput in) throws IOException {
|
||||
IntWritable uInt = TL_DATA.get().U_INT;
|
||||
|
|
|
@ -2494,6 +2494,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* A special RetryStartFileException is used to indicate that we should
|
||||
* retry creation of a FileEncryptionInfo.
|
||||
*/
|
||||
BlocksMapUpdateInfo toRemoveBlocks = null;
|
||||
try {
|
||||
boolean shouldContinue = true;
|
||||
int iters = 0;
|
||||
|
@ -2542,9 +2543,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
checkOperation(OperationCategory.WRITE);
|
||||
checkNameNodeSafeMode("Cannot create file" + src);
|
||||
src = resolvePath(src, pathComponents);
|
||||
startFileInternal(pc, src, permissions, holder, clientMachine, create,
|
||||
overwrite, createParent, replication, blockSize, suite, edek,
|
||||
logRetryCache);
|
||||
toRemoveBlocks = startFileInternal(pc, src, permissions, holder,
|
||||
clientMachine, create, overwrite, createParent, replication,
|
||||
blockSize, suite, edek, logRetryCache);
|
||||
stat = dir.getFileInfo(src, false,
|
||||
FSDirectory.isReservedRawName(srcArg), false);
|
||||
} catch (StandbyException se) {
|
||||
|
@ -2565,6 +2566,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
// They need to be sync'ed even when an exception was thrown.
|
||||
if (!skipSync) {
|
||||
getEditLog().logSync();
|
||||
if (toRemoveBlocks != null) {
|
||||
removeBlocks(toRemoveBlocks);
|
||||
toRemoveBlocks.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2581,11 +2586,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* For description of parameters and exceptions thrown see
|
||||
* {@link ClientProtocol#create}
|
||||
*/
|
||||
private void startFileInternal(FSPermissionChecker pc, String src,
|
||||
PermissionStatus permissions, String holder, String clientMachine,
|
||||
boolean create, boolean overwrite, boolean createParent,
|
||||
short replication, long blockSize, CipherSuite suite,
|
||||
EncryptedKeyVersion edek, boolean logRetryEntry)
|
||||
private BlocksMapUpdateInfo startFileInternal(FSPermissionChecker pc,
|
||||
String src, PermissionStatus permissions, String holder,
|
||||
String clientMachine, boolean create, boolean overwrite,
|
||||
boolean createParent, short replication, long blockSize,
|
||||
CipherSuite suite, EncryptedKeyVersion edek, boolean logRetryEntry)
|
||||
throws FileAlreadyExistsException, AccessControlException,
|
||||
UnresolvedLinkException, FileNotFoundException,
|
||||
ParentNotDirectoryException, RetryStartFileException, IOException {
|
||||
|
@ -2621,9 +2626,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
if (isPermissionEnabled) {
|
||||
if (overwrite && myFile != null) {
|
||||
checkPathAccess(pc, src, FsAction.WRITE);
|
||||
} else {
|
||||
checkAncestorAccess(pc, src, FsAction.WRITE);
|
||||
}
|
||||
/*
|
||||
* To overwrite existing file, need to check 'w' permission
|
||||
* of parent (equals to ancestor in this case)
|
||||
*/
|
||||
checkAncestorAccess(pc, src, FsAction.WRITE);
|
||||
}
|
||||
|
||||
if (!createParent) {
|
||||
|
@ -2631,6 +2639,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
try {
|
||||
BlocksMapUpdateInfo toRemoveBlocks = null;
|
||||
if (myFile == null) {
|
||||
if (!create) {
|
||||
throw new FileNotFoundException("Can't overwrite non-existent " +
|
||||
|
@ -2638,11 +2647,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
} else {
|
||||
if (overwrite) {
|
||||
try {
|
||||
deleteInt(src, true, false); // File exists - delete if overwrite
|
||||
} catch (AccessControlException e) {
|
||||
logAuditEvent(false, "delete", src);
|
||||
throw e;
|
||||
toRemoveBlocks = new BlocksMapUpdateInfo();
|
||||
List<INode> toRemoveINodes = new ChunkedArrayList<INode>();
|
||||
long ret = dir.delete(src, toRemoveBlocks, toRemoveINodes, now());
|
||||
if (ret >= 0) {
|
||||
incrDeletedFileCount(ret);
|
||||
removePathAndBlocks(src, null, toRemoveINodes, true);
|
||||
}
|
||||
} else {
|
||||
// If lease soft limit time is expired, recover the lease
|
||||
|
@ -2676,11 +2686,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
// record file record in log, record new generation stamp
|
||||
getEditLog().logOpenFile(src, newNode, logRetryEntry);
|
||||
getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
|
||||
src + " inode " + newNode.getId() + " " + holder);
|
||||
}
|
||||
return toRemoveBlocks;
|
||||
} catch (IOException ie) {
|
||||
NameNode.stateChangeLog.warn("DIR* NameSystem.startFile: " + src + " " +
|
||||
ie.getMessage());
|
||||
|
@ -2783,7 +2794,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
if (writeToEditLog) {
|
||||
getEditLog().logOpenFile(src, cons, logRetryCache);
|
||||
getEditLog().logOpenFile(src, cons, false, logRetryCache);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -459,7 +459,8 @@ public abstract class INodeReference extends INode {
|
|||
end = mid;
|
||||
}
|
||||
}
|
||||
if (withNameList.get(start).lastSnapshotId >= snapshotId) {
|
||||
if (start < withNameList.size() &&
|
||||
withNameList.get(start).lastSnapshotId >= snapshotId) {
|
||||
return withNameList.get(start);
|
||||
} else {
|
||||
return this.getParentReference();
|
||||
|
|
|
@ -50,6 +50,7 @@ public class InotifyFSEditLogOpTranslator {
|
|||
.ownerName(addOp.permissions.getUserName())
|
||||
.groupName(addOp.permissions.getGroupName())
|
||||
.perms(addOp.permissions.getPermission())
|
||||
.overwrite(addOp.overwrite)
|
||||
.iNodeType(Event.CreateEvent.INodeType.FILE).build() };
|
||||
} else {
|
||||
return new Event[] { new Event.AppendEvent(addOp.path) };
|
||||
|
|
|
@ -66,7 +66,9 @@ public class NameNodeLayoutVersion {
|
|||
ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false),
|
||||
EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
|
||||
XATTRS(-57, "Extended attributes"),
|
||||
BLOCK_STORAGE_POLICY(-58, "Block Storage policy");
|
||||
CREATE_OVERWRITE(-58, "Use single editlog record for " +
|
||||
"creating file with overwrite"),
|
||||
BLOCK_STORAGE_POLICY(-59, "Block Storage policy");
|
||||
|
||||
private final FeatureInfo info;
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
|
|||
/**
|
||||
* Snapshots of this directory in ascending order of snapshot names.
|
||||
* Note that snapshots in ascending order of snapshot id are stored in
|
||||
* {@link INodeDirectoryWithSnapshot}.diffs (a private field).
|
||||
* {@link DirectoryWithSnapshotFeature}.diffs (a private field).
|
||||
*/
|
||||
private final List<Snapshot> snapshotsByNames = new ArrayList<Snapshot>();
|
||||
/** Number of snapshots allowed. */
|
||||
|
|
|
@ -355,6 +355,42 @@ public class DFSAdmin extends FsShell {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Common usage summary shared between "hdfs dfsadmin -help" and
|
||||
* "hdfs dfsadmin"
|
||||
*/
|
||||
private static final String commonUsageSummary =
|
||||
"\t[-report [-live] [-dead] [-decommissioning]]\n" +
|
||||
"\t[-safemode <enter | leave | get | wait>]\n" +
|
||||
"\t[-saveNamespace]\n" +
|
||||
"\t[-rollEdits]\n" +
|
||||
"\t[-restoreFailedStorage true|false|check]\n" +
|
||||
"\t[-refreshNodes]\n" +
|
||||
"\t[" + SetQuotaCommand.USAGE + "]\n" +
|
||||
"\t[" + ClearQuotaCommand.USAGE +"]\n" +
|
||||
"\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
|
||||
"\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
|
||||
"\t[-finalizeUpgrade]\n" +
|
||||
"\t[" + RollingUpgradeCommand.USAGE +"]\n" +
|
||||
"\t[-refreshServiceAcl]\n" +
|
||||
"\t[-refreshUserToGroupsMappings]\n" +
|
||||
"\t[-refreshSuperUserGroupsConfiguration]\n" +
|
||||
"\t[-refreshCallQueue]\n" +
|
||||
"\t[-refresh <host:ipc_port> <key> [arg1..argn]\n" +
|
||||
"\t[-printTopology]\n" +
|
||||
"\t[-refreshNamenodes datanode_host:ipc_port]\n"+
|
||||
"\t[-deleteBlockPool datanode_host:ipc_port blockpoolId [force]]\n"+
|
||||
"\t[-setBalancerBandwidth <bandwidth in bytes per second>]\n" +
|
||||
"\t[-fetchImage <local directory>]\n" +
|
||||
"\t[-allowSnapshot <snapshotDir>]\n" +
|
||||
"\t[-disallowSnapshot <snapshotDir>]\n" +
|
||||
"\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" +
|
||||
"\t[-getDatanodeInfo <datanode_host:ipc_port>]\n" +
|
||||
"\t[-metasave filename]\n" +
|
||||
"\t[-setStoragePolicy path policyName\n" +
|
||||
"\t[-getStoragePolicy path\n" +
|
||||
"\t[-help [cmd]]\n";
|
||||
|
||||
/**
|
||||
* Construct a DFSAdmin object.
|
||||
*/
|
||||
|
@ -618,7 +654,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
/**
|
||||
* Command to ask the namenode to save the namespace.
|
||||
* Usage: java DFSAdmin -saveNamespace
|
||||
* Usage: hdfs dfsadmin -saveNamespace
|
||||
* @exception IOException
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
|
||||
*/
|
||||
|
@ -659,7 +695,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
/**
|
||||
* Command to enable/disable/check restoring of failed storage replicas in the namenode.
|
||||
* Usage: java DFSAdmin -restoreFailedStorage true|false|check
|
||||
* Usage: hdfs dfsadmin -restoreFailedStorage true|false|check
|
||||
* @exception IOException
|
||||
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
|
||||
*/
|
||||
|
@ -697,7 +733,7 @@ public class DFSAdmin extends FsShell {
|
|||
/**
|
||||
* Command to ask the namenode to reread the hosts and excluded hosts
|
||||
* file.
|
||||
* Usage: java DFSAdmin -refreshNodes
|
||||
* Usage: hdfs dfsadmin -refreshNodes
|
||||
* @exception IOException
|
||||
*/
|
||||
public int refreshNodes() throws IOException {
|
||||
|
@ -730,7 +766,7 @@ public class DFSAdmin extends FsShell {
|
|||
/**
|
||||
* Command to ask the namenode to set the balancer bandwidth for all of the
|
||||
* datanodes.
|
||||
* Usage: java DFSAdmin -setBalancerBandwidth bandwidth
|
||||
* Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
|
||||
* @param argv List of of command line parameters.
|
||||
* @param idx The index of the command that is being processed.
|
||||
* @exception IOException
|
||||
|
@ -743,7 +779,7 @@ public class DFSAdmin extends FsShell {
|
|||
bandwidth = Long.parseLong(argv[idx]);
|
||||
} catch (NumberFormatException nfe) {
|
||||
System.err.println("NumberFormatException: " + nfe.getMessage());
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-setBalancerBandwidth <bandwidth in bytes per second>]");
|
||||
return exitCode;
|
||||
}
|
||||
|
@ -806,38 +842,11 @@ public class DFSAdmin extends FsShell {
|
|||
}
|
||||
|
||||
private void printHelp(String cmd) {
|
||||
String summary = "hadoop dfsadmin performs DFS administrative commands.\n" +
|
||||
String summary = "hdfs dfsadmin performs DFS administrative commands.\n" +
|
||||
"Note: Administrative commands can only be run with superuser permission.\n" +
|
||||
"The full syntax is: \n\n" +
|
||||
"hadoop dfsadmin\n" +
|
||||
"\t[-report [-live] [-dead] [-decommissioning]]\n" +
|
||||
"\t[-safemode <enter | leave | get | wait>]\n" +
|
||||
"\t[-saveNamespace]\n" +
|
||||
"\t[-rollEdits]\n" +
|
||||
"\t[-restoreFailedStorage true|false|check]\n" +
|
||||
"\t[-refreshNodes]\n" +
|
||||
"\t[" + SetQuotaCommand.USAGE + "]\n" +
|
||||
"\t[" + ClearQuotaCommand.USAGE +"]\n" +
|
||||
"\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
|
||||
"\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
|
||||
"\t[-finalizeUpgrade]\n" +
|
||||
"\t[" + RollingUpgradeCommand.USAGE +"]\n" +
|
||||
"\t[-refreshServiceAcl]\n" +
|
||||
"\t[-refreshUserToGroupsMappings]\n" +
|
||||
"\t[-refreshSuperUserGroupsConfiguration]\n" +
|
||||
"\t[-refreshCallQueue]\n" +
|
||||
"\t[-refresh <host:ipc_port> <key> [arg1..argn]\n" +
|
||||
"\t[-printTopology]\n" +
|
||||
"\t[-refreshNamenodes datanodehost:port]\n"+
|
||||
"\t[-deleteBlockPool datanodehost:port blockpoolId [force]]\n"+
|
||||
"\t[-setBalancerBandwidth <bandwidth>]\n" +
|
||||
"\t[-fetchImage <local directory>]\n" +
|
||||
"\t[-allowSnapshot <snapshotDir>]\n" +
|
||||
"\t[-disallowSnapshot <snapshotDir>]\n" +
|
||||
"\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" +
|
||||
"\t[-getDatanodeInfo <datanode_host:ipc_port>\n" +
|
||||
"\t[-setStoragePolicy path policyName\n" +
|
||||
"\t[-getStoragePolicy path\n" +
|
||||
"\t[-help [cmd]]\n";
|
||||
"hdfs dfsadmin\n" +
|
||||
commonUsageSummary;
|
||||
|
||||
String report ="-report [-live] [-dead] [-decommissioning]:\n" +
|
||||
"\tReports basic filesystem information and statistics.\n" +
|
||||
|
@ -856,15 +865,13 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
String saveNamespace = "-saveNamespace:\t" +
|
||||
"Save current namespace into storage directories and reset edits log.\n" +
|
||||
"\t\tRequires superuser permissions and safe mode.\n";
|
||||
"\t\tRequires safe mode.\n";
|
||||
|
||||
String rollEdits = "-rollEdits:\t" +
|
||||
"Rolls the edit log.\n" +
|
||||
"\t\tRequires superuser permissions.\n";
|
||||
"Rolls the edit log.\n";
|
||||
|
||||
String restoreFailedStorage = "-restoreFailedStorage:\t" +
|
||||
"Set/Unset/Check flag to attempt restore of failed storage replicas if they become available.\n" +
|
||||
"\t\tRequires superuser permissions.\n";
|
||||
"Set/Unset/Check flag to attempt restore of failed storage replicas if they become available.\n";
|
||||
|
||||
String refreshNodes = "-refreshNodes: \tUpdates the namenode with the " +
|
||||
"set of datanodes allowed to connect to the namenode.\n\n" +
|
||||
|
@ -1064,7 +1071,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
/**
|
||||
* Command to ask the namenode to finalize previously performed upgrade.
|
||||
* Usage: java DFSAdmin -finalizeUpgrade
|
||||
* Usage: hdfs dfsadmin -finalizeUpgrade
|
||||
* @exception IOException
|
||||
*/
|
||||
public int finalizeUpgrade() throws IOException {
|
||||
|
@ -1101,7 +1108,7 @@ public class DFSAdmin extends FsShell {
|
|||
|
||||
/**
|
||||
* Dumps DFS data structures into specified file.
|
||||
* Usage: java DFSAdmin -metasave filename
|
||||
* Usage: hdfs dfsadmin -metasave filename
|
||||
* @param argv List of of command line parameters.
|
||||
* @param idx The index of the command that is being processed.
|
||||
* @exception IOException if an error occurred while accessing
|
||||
|
@ -1409,10 +1416,10 @@ public class DFSAdmin extends FsShell {
|
|||
*/
|
||||
private static void printUsage(String cmd) {
|
||||
if ("-report".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-report] [-live] [-dead] [-decommissioning]");
|
||||
} else if ("-safemode".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-safemode enter | leave | get | wait]");
|
||||
} else if ("-setStoragePolicy".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
|
@ -1421,114 +1428,84 @@ public class DFSAdmin extends FsShell {
|
|||
System.err.println("Usage: java DFSAdmin"
|
||||
+ " [-getStoragePolicy path]");
|
||||
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-allowSnapshot <snapshotDir>]");
|
||||
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-disallowSnapshot <snapshotDir>]");
|
||||
} else if ("-saveNamespace".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-saveNamespace]");
|
||||
} else if ("-rollEdits".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-rollEdits]");
|
||||
} else if ("-restoreFailedStorage".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-restoreFailedStorage true|false|check ]");
|
||||
} else if ("-refreshNodes".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshNodes]");
|
||||
} else if ("-finalizeUpgrade".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-finalizeUpgrade]");
|
||||
} else if (RollingUpgradeCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [" + RollingUpgradeCommand.USAGE+"]");
|
||||
} else if ("-metasave".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-metasave filename]");
|
||||
} else if (SetQuotaCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [" + SetQuotaCommand.USAGE+"]");
|
||||
} else if (ClearQuotaCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " ["+ClearQuotaCommand.USAGE+"]");
|
||||
} else if (SetSpaceQuotaCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [" + SetSpaceQuotaCommand.USAGE+"]");
|
||||
} else if (ClearSpaceQuotaCommand.matches(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " ["+ClearSpaceQuotaCommand.USAGE+"]");
|
||||
} else if ("-refreshServiceAcl".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshServiceAcl]");
|
||||
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshUserToGroupsMappings]");
|
||||
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshSuperUserGroupsConfiguration]");
|
||||
} else if ("-refreshCallQueue".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshCallQueue]");
|
||||
} else if ("-refresh".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refresh <hostname:port> <resource_identifier> [arg1..argn]");
|
||||
} else if ("-printTopology".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-printTopology]");
|
||||
} else if ("-refreshNamenodes".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-refreshNamenodes datanode-host:port]");
|
||||
} else if ("-deleteBlockPool".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-deleteBlockPool datanode-host:port blockpoolId [force]]");
|
||||
} else if ("-setBalancerBandwidth".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-setBalancerBandwidth <bandwidth in bytes per second>]");
|
||||
} else if ("-fetchImage".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-fetchImage <local directory>]");
|
||||
} else if ("-shutdownDatanode".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
|
||||
} else if ("-getDatanodeInfo".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
System.err.println("Usage: hdfs dfsadmin"
|
||||
+ " [-getDatanodeInfo <datanode_host:ipc_port>]");
|
||||
} else {
|
||||
System.err.println("Usage: java DFSAdmin");
|
||||
System.err.println("Usage: hdfs dfsadmin");
|
||||
System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
|
||||
System.err.println(" [-report]");
|
||||
System.err.println(" [-safemode enter | leave | get | wait]");
|
||||
System.err.println(" [-allowSnapshot <snapshotDir>]");
|
||||
System.err.println(" [-disallowSnapshot <snapshotDir>]");
|
||||
System.err.println(" [-saveNamespace]");
|
||||
System.err.println(" [-rollEdits]");
|
||||
System.err.println(" [-restoreFailedStorage true|false|check]");
|
||||
System.err.println(" [-refreshNodes]");
|
||||
System.err.println(" [-finalizeUpgrade]");
|
||||
System.err.println(" ["+RollingUpgradeCommand.USAGE+"]");
|
||||
System.err.println(" [-metasave filename]");
|
||||
System.err.println(" [-refreshServiceAcl]");
|
||||
System.err.println(" [-refreshUserToGroupsMappings]");
|
||||
System.err.println(" [-refreshSuperUserGroupsConfiguration]");
|
||||
System.err.println(" [-refreshCallQueue]");
|
||||
System.err.println(" [-refresh]");
|
||||
System.err.println(" [-printTopology]");
|
||||
System.err.println(" [-refreshNamenodes datanodehost:port]");
|
||||
System.err.println(" [-deleteBlockPool datanode-host:port blockpoolId [force]]");
|
||||
System.err.println(" ["+SetQuotaCommand.USAGE+"]");
|
||||
System.err.println(" ["+ClearQuotaCommand.USAGE+"]");
|
||||
System.err.println(" ["+SetSpaceQuotaCommand.USAGE+"]");
|
||||
System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]");
|
||||
System.err.println(" [-setBalancerBandwidth <bandwidth in bytes per second>]");
|
||||
System.err.println(" [-fetchImage <local directory>]");
|
||||
System.err.println(" [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
|
||||
System.err.println(" [-getDatanodeInfo <datanode_host:ipc_port>]");
|
||||
System.err.println(" [-setStoragePolicy path policyName]");
|
||||
System.err.println(" [-getStoragePolicy path]");
|
||||
System.err.println(" [-help [cmd]]");
|
||||
System.err.println();
|
||||
System.err.println(commonUsageSummary);
|
||||
ToolRunner.printGenericCommandUsage(System.err);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@ public class GetConf extends Configured implements Tool {
|
|||
static class NameNodesCommandHandler extends CommandHandler {
|
||||
@Override
|
||||
int doWorkInternal(GetConf tool, String []args) throws IOException {
|
||||
tool.printMap(DFSUtil.getNNServiceRpcAddresses(tool.getConf()));
|
||||
tool.printMap(DFSUtil.getNNServiceRpcAddressesForCluster(tool.getConf()));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ public class GetConf extends Configured implements Tool {
|
|||
public int doWorkInternal(GetConf tool, String []args) throws IOException {
|
||||
Configuration config = tool.getConf();
|
||||
List<ConfiguredNNAddress> cnnlist = DFSUtil.flattenAddressMap(
|
||||
DFSUtil.getNNServiceRpcAddresses(config));
|
||||
DFSUtil.getNNServiceRpcAddressesForCluster(config));
|
||||
if (!cnnlist.isEmpty()) {
|
||||
for (ConfiguredNNAddress cnn : cnnlist) {
|
||||
InetSocketAddress rpc = cnn.getAddress();
|
||||
|
|
|
@ -72,6 +72,7 @@ message CreateEventProto {
|
|||
required FsPermissionProto perms = 6;
|
||||
optional int32 replication = 7;
|
||||
optional string symlinkTarget = 8;
|
||||
optional bool overwrite = 9;
|
||||
}
|
||||
|
||||
message CloseEventProto {
|
||||
|
|
|
@ -1116,6 +1116,16 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.internal.nameservices</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Comma-separated list of nameservices that belong to this cluster.
|
||||
Datanode will report to all the nameservices in this list. By default
|
||||
this is set to the value of dfs.nameservices.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.EXAMPLENAMESERVICE</name>
|
||||
<value></value>
|
||||
|
@ -2099,4 +2109,12 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.user.home.dir.prefix</name>
|
||||
<value>/user</value>
|
||||
<description>The directory to prepend to user name to get the user's
|
||||
home direcotry.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -35,14 +36,25 @@ import org.junit.Test;
|
|||
* scheduled to a datanode.
|
||||
*/
|
||||
public class TestBlocksScheduledCounter {
|
||||
MiniDFSCluster cluster = null;
|
||||
FileSystem fs = null;
|
||||
|
||||
@After
|
||||
public void tearDown() throws IOException {
|
||||
if (fs != null) {
|
||||
fs.close();
|
||||
}
|
||||
if(cluster!=null){
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBlocksScheduledCounter() throws IOException {
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
|
||||
.build();
|
||||
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
|
||||
|
||||
cluster.waitActive();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
fs = cluster.getFileSystem();
|
||||
|
||||
//open a file an write a few bytes:
|
||||
FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
|
||||
|
|
|
@ -147,12 +147,7 @@ public class TestDFSInotifyEventInputStream {
|
|||
Assert.assertTrue(re2.getSrcPath().equals("/file4"));
|
||||
Assert.assertTrue(re.getTimestamp() > 0);
|
||||
|
||||
// DeleteOp
|
||||
next = waitForNextEvent(eis);
|
||||
Assert.assertTrue(next.getEventType() == Event.EventType.UNLINK);
|
||||
Assert.assertTrue(((Event.UnlinkEvent) next).getPath().equals("/file2"));
|
||||
|
||||
// AddOp
|
||||
// AddOp with overwrite
|
||||
next = waitForNextEvent(eis);
|
||||
Assert.assertTrue(next.getEventType() == Event.EventType.CREATE);
|
||||
Event.CreateEvent ce = (Event.CreateEvent) next;
|
||||
|
@ -161,6 +156,7 @@ public class TestDFSInotifyEventInputStream {
|
|||
Assert.assertTrue(ce.getCtime() > 0);
|
||||
Assert.assertTrue(ce.getReplication() > 0);
|
||||
Assert.assertTrue(ce.getSymlinkTarget() == null);
|
||||
Assert.assertTrue(ce.getOverwrite());
|
||||
|
||||
// CloseOp
|
||||
next = waitForNextEvent(eis);
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
|
|||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
|
||||
|
@ -865,4 +866,29 @@ public class TestDFSUtil {
|
|||
// let's make sure that a password that doesn't exist returns null
|
||||
Assert.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetNNServiceRpcAddressesForNsIds() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.set(DFS_NAMESERVICES, "nn1,nn2");
|
||||
conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn1");
|
||||
// Test - configured list of namenodes are returned
|
||||
final String NN1_ADDRESS = "localhost:9000";
|
||||
final String NN2_ADDRESS = "localhost:9001";
|
||||
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
||||
NN1_ADDRESS);
|
||||
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
||||
NN2_ADDRESS);
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
|
||||
.getNNServiceRpcAddressesForCluster(conf);
|
||||
assertEquals(1, nnMap.size());
|
||||
assertTrue(nnMap.containsKey("nn1"));
|
||||
conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn3");
|
||||
try {
|
||||
DFSUtil.getNNServiceRpcAddressesForCluster(conf);
|
||||
fail("Should fail for misconfiguration");
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ import static org.junit.Assert.fail;
|
|||
import static org.junit.Assume.assumeTrue;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileReader;
|
||||
|
@ -70,6 +71,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
|
@ -78,6 +80,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
@ -86,6 +90,7 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -1210,4 +1215,118 @@ public class TestFileCreation {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. Check the blocks of old file are cleaned after creating with overwrite
|
||||
* 2. Restart NN, check the file
|
||||
* 3. Save new checkpoint and restart NN, check the file
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testFileCreationWithOverwrite() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt("dfs.blocksize", blockSize);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
|
||||
numDataNodes(3).build();
|
||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
try {
|
||||
dfs.mkdirs(new Path("/foo/dir"));
|
||||
String file = "/foo/dir/file";
|
||||
Path filePath = new Path(file);
|
||||
|
||||
// Case 1: Create file with overwrite, check the blocks of old file
|
||||
// are cleaned after creating with overwrite
|
||||
NameNode nn = cluster.getNameNode();
|
||||
FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
|
||||
BlockManager bm = fsn.getBlockManager();
|
||||
|
||||
FSDataOutputStream out = dfs.create(filePath);
|
||||
byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
|
||||
try {
|
||||
out.write(oldData);
|
||||
} finally {
|
||||
out.close();
|
||||
}
|
||||
|
||||
LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
|
||||
nn, file, 0, fileSize);
|
||||
assertBlocks(bm, oldBlocks, true);
|
||||
|
||||
out = dfs.create(filePath, true);
|
||||
byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
|
||||
try {
|
||||
out.write(newData);
|
||||
} finally {
|
||||
out.close();
|
||||
}
|
||||
dfs.deleteOnExit(filePath);
|
||||
|
||||
LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
|
||||
nn, file, 0, fileSize);
|
||||
assertBlocks(bm, newBlocks, true);
|
||||
assertBlocks(bm, oldBlocks, false);
|
||||
|
||||
FSDataInputStream in = dfs.open(filePath);
|
||||
byte[] result = null;
|
||||
try {
|
||||
result = readAll(in);
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
Assert.assertArrayEquals(newData, result);
|
||||
|
||||
// Case 2: Restart NN, check the file
|
||||
cluster.restartNameNode();
|
||||
nn = cluster.getNameNode();
|
||||
in = dfs.open(filePath);
|
||||
try {
|
||||
result = readAll(in);
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
Assert.assertArrayEquals(newData, result);
|
||||
|
||||
// Case 3: Save new checkpoint and restart NN, check the file
|
||||
NameNodeAdapter.enterSafeMode(nn, false);
|
||||
NameNodeAdapter.saveNamespace(nn);
|
||||
cluster.restartNameNode();
|
||||
nn = cluster.getNameNode();
|
||||
|
||||
in = dfs.open(filePath);
|
||||
try {
|
||||
result = readAll(in);
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
Assert.assertArrayEquals(newData, result);
|
||||
} finally {
|
||||
if (dfs != null) {
|
||||
dfs.close();
|
||||
}
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertBlocks(BlockManager bm, LocatedBlocks lbs,
|
||||
boolean exist) {
|
||||
for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
|
||||
if (exist) {
|
||||
assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
|
||||
getLocalBlock()) != null);
|
||||
} else {
|
||||
assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
|
||||
getLocalBlock()) == null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] readAll(FSDataInputStream in) throws IOException {
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
byte[] buffer = new byte[1024];
|
||||
int n = 0;
|
||||
while((n = in.read(buffer)) > -1) {
|
||||
out.write(buffer, 0, n);
|
||||
}
|
||||
return out.toByteArray();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,9 @@ public class TestLocalDFS {
|
|||
|
||||
// test home directory
|
||||
Path home =
|
||||
fileSys.makeQualified(new Path("/user/" + getUserName(fileSys)));
|
||||
fileSys.makeQualified(
|
||||
new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
|
||||
+ "/" + getUserName(fileSys)));
|
||||
Path fsHome = fileSys.getHomeDirectory();
|
||||
assertEquals(home, fsHome);
|
||||
|
||||
|
@ -99,4 +101,29 @@ public class TestLocalDFS {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests get/set working directory in DFS.
|
||||
*/
|
||||
@Test(timeout=30000)
|
||||
public void testHomeDirectory() throws IOException {
|
||||
final String[] homeBases = new String[] {"/home", "/home/user"};
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
for (final String homeBase : homeBases) {
|
||||
conf.set(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
FileSystem fileSys = cluster.getFileSystem();
|
||||
try {
|
||||
// test home directory
|
||||
Path home =
|
||||
fileSys.makeQualified(
|
||||
new Path(homeBase + "/" + getUserName(fileSys)));
|
||||
Path fsHome = fileSys.getHomeDirectory();
|
||||
assertEquals(home, fsHome);
|
||||
} finally {
|
||||
fileSys.close();
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,15 +23,18 @@ import java.io.IOException;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.internal.util.reflection.Whitebox;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
|
@ -130,6 +133,25 @@ public class TestBlockPoolManager {
|
|||
"refresh #2\n", log.toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInternalNameService() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1,ns2,ns3");
|
||||
addNN(conf, "ns1", "mock1:8020");
|
||||
addNN(conf, "ns2", "mock1:8020");
|
||||
addNN(conf, "ns3", "mock1:8020");
|
||||
conf.set(DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
|
||||
bpm.refreshNamenodes(conf);
|
||||
assertEquals("create #1\n", log.toString());
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, BPOfferService> map = (Map<String, BPOfferService>) Whitebox
|
||||
.getInternalState(bpm, "bpByNameserviceId");
|
||||
Assert.assertFalse(map.containsKey("ns2"));
|
||||
Assert.assertFalse(map.containsKey("ns3"));
|
||||
Assert.assertTrue(map.containsKey("ns1"));
|
||||
log.setLength(0);
|
||||
}
|
||||
|
||||
private static void addNN(Configuration conf, String ns, String addr) {
|
||||
String key = DFSUtil.addKeySuffixes(
|
||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, ns);
|
||||
|
|
|
@ -99,7 +99,7 @@ public class CreateEditsLog {
|
|||
INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
|
||||
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0);
|
||||
fileUc.toUnderConstruction("", "");
|
||||
editLog.logOpenFile(filePath, fileUc, false);
|
||||
editLog.logOpenFile(filePath, fileUc, false, false);
|
||||
editLog.logCloseFile(filePath, inode);
|
||||
|
||||
if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks
|
||||
|
|
|
@ -197,7 +197,7 @@ public class TestEditLog {
|
|||
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, (byte)0);
|
||||
inode.toUnderConstruction("", "");
|
||||
|
||||
editLog.logOpenFile("/filename" + (startIndex + i), inode, false);
|
||||
editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
|
||||
editLog.logCloseFile("/filename" + (startIndex + i), inode);
|
||||
editLog.logSync();
|
||||
}
|
||||
|
|
|
@ -492,4 +492,40 @@ public class TestSnapshotDiffReport {
|
|||
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
|
||||
DFSUtil.string2Bytes("bar")));
|
||||
}
|
||||
|
||||
/**
|
||||
* Nested renamed dir/file and the withNameList in the WithCount node of the
|
||||
* parental directory is empty due to snapshot deletion. See HDFS-6996 for
|
||||
* details.
|
||||
*/
|
||||
@Test
|
||||
public void testDiffReportWithRenameAndSnapshotDeletion() throws Exception {
|
||||
final Path root = new Path("/");
|
||||
final Path foo = new Path(root, "foo");
|
||||
final Path bar = new Path(foo, "bar");
|
||||
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
|
||||
// rename /foo to /foo2
|
||||
final Path foo2 = new Path(root, "foo2");
|
||||
hdfs.rename(foo, foo2);
|
||||
// now /foo/bar becomes /foo2/bar
|
||||
final Path bar2 = new Path(foo2, "bar");
|
||||
|
||||
// delete snapshot s0 so that the withNameList inside of the WithCount node
|
||||
// of foo becomes empty
|
||||
hdfs.deleteSnapshot(root, "s0");
|
||||
|
||||
// create snapshot s1 and rename bar again
|
||||
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
|
||||
final Path bar3 = new Path(foo2, "bar-new");
|
||||
hdfs.rename(bar2, bar3);
|
||||
|
||||
// we always put modification on the file before rename
|
||||
verifyDiffReport(root, "s1", "",
|
||||
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
|
||||
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo2")),
|
||||
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo2/bar"),
|
||||
DFSUtil.string2Bytes("foo2/bar-new")));
|
||||
}
|
||||
}
|
|
@ -18,11 +18,13 @@
|
|||
package org.apache.hadoop.hdfs.tools;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -121,13 +123,13 @@ public class TestGetConf {
|
|||
TestType type, HdfsConfiguration conf) throws IOException {
|
||||
switch (type) {
|
||||
case NAMENODE:
|
||||
return DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
|
||||
case BACKUP:
|
||||
return DFSUtil.getBackupNodeAddresses(conf);
|
||||
case SECONDARY:
|
||||
return DFSUtil.getSecondaryNameNodeAddresses(conf);
|
||||
case NNRPCADDRESSES:
|
||||
return DFSUtil.getNNServiceRpcAddresses(conf);
|
||||
return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -226,7 +228,7 @@ public class TestGetConf {
|
|||
String[] actual = toStringArray(list);
|
||||
Arrays.sort(actual);
|
||||
Arrays.sort(expected);
|
||||
assertTrue(Arrays.equals(expected, actual));
|
||||
assertArrayEquals(expected, actual);
|
||||
|
||||
// Test GetConf returned addresses
|
||||
getAddressListFromTool(type, conf, checkPort, list);
|
||||
|
@ -425,7 +427,23 @@ public class TestGetConf {
|
|||
assertEquals(hostsFile.toUri().getPath(),ret.trim());
|
||||
cleanupFile(localFileSys, excludeFile.getParent());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testIncludeInternalNameServices() throws Exception {
|
||||
final int nsCount = 10;
|
||||
final int remoteNsCount = 4;
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
setupNameServices(conf, nsCount);
|
||||
setupAddress(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsCount, 1000);
|
||||
setupAddress(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1500);
|
||||
conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
|
||||
setupStaticHostResolution(nsCount);
|
||||
|
||||
String[] includedNN = new String[] {"nn1:1001"};
|
||||
verifyAddresses(conf, TestType.NAMENODE, false, includedNN);
|
||||
verifyAddresses(conf, TestType.NNRPCADDRESSES, true, includedNN);
|
||||
}
|
||||
|
||||
private void writeConfigFile(Path name, ArrayList<String> nodes)
|
||||
throws IOException {
|
||||
// delete if it already exists
|
||||
|
|
|
@ -90,7 +90,7 @@ public class TestTools {
|
|||
fail("testDFSAdminHelp error" + e);
|
||||
}
|
||||
|
||||
String pattern = "Usage: java DFSAdmin";
|
||||
String pattern = "Usage: hdfs dfsadmin";
|
||||
checkOutput(new String[] { "-cancel", "-renew" }, pattern, System.err,
|
||||
DFSAdmin.class);
|
||||
}
|
||||
|
|
Binary file not shown.
File diff suppressed because it is too large
Load Diff
|
@ -267,6 +267,12 @@ Release 2.6.0 - UNRELEASED
|
|||
MAPREDUCE-5931. Validate SleepJob command line parameters (Gera Shegalov
|
||||
via jlowe)
|
||||
|
||||
MAPREDUCE-6063. Correct spill size calculation for spills wrapping the
|
||||
circular buffer. (zhihai xu via cdouglas)
|
||||
|
||||
MAPREDUCE-6071. JobImpl#makeUberDecision doesn't log that Uber mode is
|
||||
disabled because of too much CPUs (Tsuyoshi OZAWA via jlowe)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -1285,6 +1285,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
|||
msg.append(" too many reduces;");
|
||||
if (!smallInput)
|
||||
msg.append(" too much input;");
|
||||
if (!smallCpu)
|
||||
msg.append(" too much CPU;");
|
||||
if (!smallMemory)
|
||||
msg.append(" too much RAM;");
|
||||
if (!notChainJob)
|
||||
|
|
|
@ -1575,9 +1575,7 @@ public class MapTask extends Task {
|
|||
InterruptedException {
|
||||
//approximate the length of the output file to be the length of the
|
||||
//buffer + header lengths for the partitions
|
||||
final long size = (bufend >= bufstart
|
||||
? bufend - bufstart
|
||||
: (bufvoid - bufend) + bufstart) +
|
||||
final long size = distanceTo(bufstart, bufend, bufvoid) +
|
||||
partitions * APPROX_HEADER_LENGTH;
|
||||
FSDataOutputStream out = null;
|
||||
try {
|
||||
|
|
|
@ -175,6 +175,12 @@ Release 2.6.0 - UNRELEASED
|
|||
YARN-1506. Changed RMNode/SchedulerNode to update resource with event
|
||||
notification. (Junping Du via jianhe)
|
||||
|
||||
YARN-2509. Enable Cross Origin Filter for timeline server only and not all
|
||||
Yarn servers (Mit Desai via jeagles)
|
||||
|
||||
YARN-2511. Allowed all origins by default when CrossOriginFilter is
|
||||
enabled. (Jonathan Eagles via zjshen)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -275,6 +281,9 @@ Release 2.6.0 - UNRELEASED
|
|||
YARN-2462. TestNodeManagerResync#testBlockNewContainerRequestsOnStartAndResync
|
||||
should have a test timeout (Eric Payne via jlowe)
|
||||
|
||||
YARN-2431. NM restart: cgroup is not removed for reacquired containers
|
||||
(jlowe)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -1239,6 +1239,14 @@ public class YarnConfiguration extends Configuration {
|
|||
public static final String TIMELINE_SERVICE_KEYTAB =
|
||||
TIMELINE_SERVICE_PREFIX + "keytab";
|
||||
|
||||
/** Enables cross origin support for timeline server.*/
|
||||
public static final String TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED =
|
||||
TIMELINE_SERVICE_PREFIX + "http-cross-origin.enabled";
|
||||
|
||||
/** Default value for cross origin support for timeline server.*/
|
||||
public static final boolean
|
||||
TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT = false;
|
||||
|
||||
////////////////////////////////
|
||||
// Other Configs
|
||||
////////////////////////////////
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore;
|
|||
import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
|
||||
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
|
||||
import org.apache.hadoop.yarn.server.timeline.security.TimelineDelegationTokenSecretManagerService;
|
||||
import org.apache.hadoop.yarn.server.timeline.webapp.CrossOriginFilterInitializer;
|
||||
import org.apache.hadoop.yarn.webapp.WebApp;
|
||||
import org.apache.hadoop.yarn.webapp.WebApps;
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
|
@ -197,17 +198,27 @@ public class ApplicationHistoryServer extends CompositeService {
|
|||
// the customized filter will be loaded by the timeline server to do Kerberos
|
||||
// + DT authentication.
|
||||
String initializers = conf.get("hadoop.http.filter.initializers");
|
||||
boolean modifiedInitialiers = false;
|
||||
boolean modifiedInitializers = false;
|
||||
|
||||
initializers =
|
||||
initializers == null || initializers.length() == 0 ? "" : initializers;
|
||||
|
||||
if (!initializers.contains(CrossOriginFilterInitializer.class.getName())) {
|
||||
if(conf.getBoolean(YarnConfiguration
|
||||
.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED, YarnConfiguration
|
||||
.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT)) {
|
||||
initializers = CrossOriginFilterInitializer.class.getName() + ","
|
||||
+ initializers;
|
||||
modifiedInitializers = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!initializers.contains(TimelineAuthenticationFilterInitializer.class
|
||||
.getName())) {
|
||||
initializers =
|
||||
TimelineAuthenticationFilterInitializer.class.getName() + ","
|
||||
+ initializers;
|
||||
modifiedInitialiers = true;
|
||||
modifiedInitializers = true;
|
||||
}
|
||||
|
||||
String[] parts = initializers.split(",");
|
||||
|
@ -216,14 +227,14 @@ public class ApplicationHistoryServer extends CompositeService {
|
|||
filterInitializer = filterInitializer.trim();
|
||||
if (filterInitializer.equals(AuthenticationFilterInitializer.class
|
||||
.getName())) {
|
||||
modifiedInitialiers = true;
|
||||
modifiedInitializers = true;
|
||||
continue;
|
||||
}
|
||||
target.add(filterInitializer);
|
||||
}
|
||||
String actualInitializers =
|
||||
org.apache.commons.lang.StringUtils.join(target, ",");
|
||||
if (modifiedInitialiers) {
|
||||
if (modifiedInitializers) {
|
||||
conf.set("hadoop.http.filter.initializers", actualInitializers);
|
||||
}
|
||||
String bindAddress = WebAppUtils.getWebAppBindURL(conf,
|
||||
|
|
|
@ -76,6 +76,7 @@ public class CrossOriginFilter implements Filter {
|
|||
private List<String> allowedMethods = new ArrayList<String>();
|
||||
private List<String> allowedHeaders = new ArrayList<String>();
|
||||
private List<String> allowedOrigins = new ArrayList<String>();
|
||||
private boolean allowAllOrigins = true;
|
||||
private String maxAge;
|
||||
|
||||
@Override
|
||||
|
@ -171,7 +172,9 @@ public class CrossOriginFilter implements Filter {
|
|||
}
|
||||
allowedOrigins =
|
||||
Arrays.asList(allowedOriginsConfig.trim().split("\\s*,\\s*"));
|
||||
allowAllOrigins = allowedOrigins.contains("*");
|
||||
LOG.info("Allowed Origins: " + StringUtils.join(allowedOrigins, ','));
|
||||
LOG.info("Allow All Origins: " + allowAllOrigins);
|
||||
}
|
||||
|
||||
private void initializeMaxAge(FilterConfig filterConfig) {
|
||||
|
@ -199,8 +202,9 @@ public class CrossOriginFilter implements Filter {
|
|||
return origin != null;
|
||||
}
|
||||
|
||||
private boolean isOriginAllowed(String origin) {
|
||||
return allowedOrigins.contains(origin);
|
||||
@VisibleForTesting
|
||||
boolean isOriginAllowed(String origin) {
|
||||
return allowAllOrigins || allowedOrigins.contains(origin);
|
||||
}
|
||||
|
||||
private boolean areHeadersAllowed(String accessControlRequestHeaders) {
|
||||
|
@ -213,7 +217,7 @@ public class CrossOriginFilter implements Filter {
|
|||
|
||||
private boolean isMethodAllowed(String accessControlRequestMethod) {
|
||||
if (accessControlRequestMethod == null) {
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
return allowedMethods.contains(accessControlRequestMethod);
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import javax.servlet.ServletException;
|
|||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.mockito.Mockito.when;
|
||||
|
@ -65,6 +66,20 @@ public class TestCrossOriginFilter {
|
|||
verify(mockChain).doFilter(mockReq, mockRes);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAllowAllOrigins() throws ServletException, IOException {
|
||||
|
||||
// Setup the configuration settings of the server
|
||||
Map<String, String> conf = new HashMap<String, String>();
|
||||
conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "*");
|
||||
FilterConfig filterConfig = new FilterConfigTest(conf);
|
||||
|
||||
// Object under test
|
||||
CrossOriginFilter filter = new CrossOriginFilter();
|
||||
filter.init(filterConfig);
|
||||
Assert.assertTrue(filter.isOriginAllowed("example.org"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDisallowedOrigin() throws ServletException, IOException {
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.yarn.server.nodemanager;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
@ -341,6 +342,16 @@ public class LinuxContainerExecutor extends ContainerExecutor {
|
|||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int reacquireContainer(String user, ContainerId containerId)
|
||||
throws IOException {
|
||||
try {
|
||||
return super.reacquireContainer(user, containerId);
|
||||
} finally {
|
||||
resourcesHandler.postExecute(containerId);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean signalContainer(String user, String pid, Signal signal)
|
||||
throws IOException {
|
||||
|
|
|
@ -30,6 +30,8 @@ import java.io.FileOutputStream;
|
|||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -42,11 +44,15 @@ import org.apache.hadoop.fs.FileUtil;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
@ -353,4 +359,58 @@ public class TestLinuxContainerExecutor {
|
|||
}
|
||||
}
|
||||
|
||||
@Test(timeout=10000)
|
||||
public void testPostExecuteAfterReacquisition() throws Exception {
|
||||
// make up some bogus container ID
|
||||
ApplicationId appId = ApplicationId.newInstance(12345, 67890);
|
||||
ApplicationAttemptId attemptId =
|
||||
ApplicationAttemptId.newInstance(appId, 54321);
|
||||
ContainerId cid = ContainerId.newInstance(attemptId, 9876);
|
||||
|
||||
Configuration conf = new YarnConfiguration();
|
||||
conf.setClass(YarnConfiguration.NM_LINUX_CONTAINER_RESOURCES_HANDLER,
|
||||
TestResourceHandler.class, LCEResourcesHandler.class);
|
||||
LinuxContainerExecutor lce = new LinuxContainerExecutor();
|
||||
lce.setConf(conf);
|
||||
try {
|
||||
lce.init();
|
||||
} catch (IOException e) {
|
||||
// expected if LCE isn't setup right, but not necessary for this test
|
||||
}
|
||||
lce.reacquireContainer("foouser", cid);
|
||||
Assert.assertTrue("postExec not called after reacquisition",
|
||||
TestResourceHandler.postExecContainers.contains(cid));
|
||||
}
|
||||
|
||||
private static class TestResourceHandler implements LCEResourcesHandler {
|
||||
static Set<ContainerId> postExecContainers = new HashSet<ContainerId>();
|
||||
|
||||
@Override
|
||||
public void setConf(Configuration conf) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getConf() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(LinuxContainerExecutor lce) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preExecute(ContainerId containerId, Resource containerResource)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postExecute(ContainerId containerId) {
|
||||
postExecContainers.add(containerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getResourcesOption(ContainerId containerId) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue