Merge from trunk to branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1610853 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
56c0bd4d37
|
@ -177,6 +177,11 @@ Trunk (Unreleased)
|
|||
HADOOP-10769. Create KeyProvider extension to handle delegation tokens.
|
||||
(Arun Suresh via atm)
|
||||
|
||||
HADOOP-10812. Delegate KeyProviderExtension#toString to underlying
|
||||
KeyProvider. (wang)
|
||||
|
||||
HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
||||
|
@ -374,6 +379,10 @@ Trunk (Unreleased)
|
|||
NativeAzureFileSystem#NativeAzureFsInputStream#close().
|
||||
(Chen He via cnauroth)
|
||||
|
||||
HADOOP-10831. UserProvider is not thread safe. (Benoy Antony via umamahesh)
|
||||
|
||||
HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||
|
@ -388,10 +397,25 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-10808. Remove unused native code for munlock. (cnauroth)
|
||||
|
||||
HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
|
||||
Sivachenko via Colin Patrick McCabe)
|
||||
|
||||
HADOOP-10507. FsShell setfacl can throw ArrayIndexOutOfBoundsException when
|
||||
no perm is specified. (Stephen Chu and Sathish Gurram via cnauroth)
|
||||
|
||||
HADOOP-10780. hadoop_user_info_alloc fails on FreeBSD due to incorrect
|
||||
sysconf use (Dmitry Sivachenko via Colin Patrick McCabe)
|
||||
|
||||
HADOOP-10810. Clean up native code compilation warnings. (cnauroth)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -679,6 +703,8 @@ Release 2.5.0 - UNRELEASED
|
|||
HADOOP-10419 BufferedFSInputStream NPEs on getPos() on a closed stream
|
||||
(stevel)
|
||||
|
||||
HADOOP-10801 dead link in site.xml (Akira AJISAKA via stevel)
|
||||
|
||||
BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HADOOP-10520. Extended attributes definition and FileSystem APIs for
|
||||
|
|
|
@ -23,9 +23,7 @@ import java.io.ByteArrayOutputStream;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.net.URI;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
|
@ -37,7 +35,6 @@ import com.google.gson.stream.JsonWriter;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import javax.crypto.KeyGenerator;
|
||||
|
||||
|
@ -137,9 +134,26 @@ public abstract class KeyProvider {
|
|||
}
|
||||
|
||||
public String toString() {
|
||||
return MessageFormat.format(
|
||||
"cipher: {0}, length: {1} description: {2} created: {3} version: {4}",
|
||||
cipher, bitLength, description, created, versions);
|
||||
final StringBuilder metaSB = new StringBuilder();
|
||||
metaSB.append("cipher: ").append(cipher).append(", ");
|
||||
metaSB.append("length: ").append(bitLength).append(", ");
|
||||
metaSB.append("description: ").append(description).append(", ");
|
||||
metaSB.append("created: ").append(created).append(", ");
|
||||
metaSB.append("version: ").append(versions).append(", ");
|
||||
metaSB.append("attributes: ");
|
||||
if ((attributes != null) && !attributes.isEmpty()) {
|
||||
for (Map.Entry<String, String> attribute : attributes.entrySet()) {
|
||||
metaSB.append("[");
|
||||
metaSB.append(attribute.getKey());
|
||||
metaSB.append("=");
|
||||
metaSB.append(attribute.getValue());
|
||||
metaSB.append("], ");
|
||||
}
|
||||
metaSB.deleteCharAt(metaSB.length() - 2); // remove last ', '
|
||||
} else {
|
||||
metaSB.append("null");
|
||||
}
|
||||
return metaSB.toString();
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
|
|
|
@ -120,4 +120,9 @@ public abstract class KeyProviderExtension
|
|||
public void flush() throws IOException {
|
||||
keyProvider.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + ": " + keyProvider.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,9 @@ import java.io.IOException;
|
|||
import java.io.PrintStream;
|
||||
import java.security.InvalidParameterException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
|
@ -90,6 +92,7 @@ public class KeyShell extends Configured implements Tool {
|
|||
*/
|
||||
private int init(String[] args) throws IOException {
|
||||
final Options options = KeyProvider.options(getConf());
|
||||
final Map<String, String> attributes = new HashMap<String, String>();
|
||||
|
||||
for (int i = 0; i < args.length; i++) { // parse command line
|
||||
boolean moreTokens = (i < args.length - 1);
|
||||
|
@ -134,6 +137,23 @@ public class KeyShell extends Configured implements Tool {
|
|||
options.setCipher(args[++i]);
|
||||
} else if ("--description".equals(args[i]) && moreTokens) {
|
||||
options.setDescription(args[++i]);
|
||||
} else if ("--attr".equals(args[i]) && moreTokens) {
|
||||
final String attrval[] = args[++i].split("=", 2);
|
||||
final String attr = attrval[0].trim();
|
||||
final String val = attrval[1].trim();
|
||||
if (attr.isEmpty() || val.isEmpty()) {
|
||||
out.println("\nAttributes must be in attribute=value form, " +
|
||||
"or quoted\nlike \"attribute = value\"\n");
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
}
|
||||
if (attributes.containsKey(attr)) {
|
||||
out.println("\nEach attribute must correspond to only one value:\n" +
|
||||
"atttribute \"" + attr + "\" was repeated\n" );
|
||||
printKeyShellUsage();
|
||||
return -1;
|
||||
}
|
||||
attributes.put(attr, val);
|
||||
} else if ("--provider".equals(args[i]) && moreTokens) {
|
||||
userSuppliedProvider = true;
|
||||
getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
|
||||
|
@ -156,6 +176,10 @@ public class KeyShell extends Configured implements Tool {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (!attributes.isEmpty()) {
|
||||
options.setAttributes(attributes);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -404,6 +428,7 @@ public class KeyShell extends Configured implements Tool {
|
|||
public static final String USAGE =
|
||||
"create <keyname> [--cipher <cipher>] [--size <size>]\n" +
|
||||
" [--description <description>]\n" +
|
||||
" [--attr <attribute=value>]\n" +
|
||||
" [--provider <provider>] [--help]";
|
||||
public static final String DESC =
|
||||
"The create subcommand creates a new key for the name specified\n" +
|
||||
|
@ -411,7 +436,9 @@ public class KeyShell extends Configured implements Tool {
|
|||
"--provider argument. You may specify a cipher with the --cipher\n" +
|
||||
"argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
|
||||
"The default keysize is 256. You may specify the requested key\n" +
|
||||
"length using the --size argument.\n";
|
||||
"length using the --size argument. Arbitrary attribute=value\n" +
|
||||
"style attributes may be specified using the --attr argument.\n" +
|
||||
"--attr may be specified multiple times, once per attribute.\n";
|
||||
|
||||
final String keyName;
|
||||
final Options options;
|
||||
|
|
|
@ -278,7 +278,7 @@ public class AclEntry {
|
|||
}
|
||||
|
||||
if (includePermission) {
|
||||
if (split.length < index) {
|
||||
if (split.length <= index) {
|
||||
throw new HadoopIllegalArgumentException("Invalid <aclSpec> : "
|
||||
+ aclStr);
|
||||
}
|
||||
|
|
|
@ -292,8 +292,6 @@ public class NativeIO {
|
|||
|
||||
static native void mlock_native(
|
||||
ByteBuffer buffer, long len) throws NativeIOException;
|
||||
static native void munlock_native(
|
||||
ByteBuffer buffer, long len) throws NativeIOException;
|
||||
|
||||
/**
|
||||
* Locks the provided direct ByteBuffer into memory, preventing it from
|
||||
|
@ -312,23 +310,6 @@ public class NativeIO {
|
|||
}
|
||||
mlock_native(buffer, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlocks a locked direct ByteBuffer, allowing it to swap out of memory.
|
||||
* This is a no-op if the ByteBuffer was not previously locked.
|
||||
*
|
||||
* See the munlock(2) man page for more information.
|
||||
*
|
||||
* @throws NativeIOException
|
||||
*/
|
||||
public static void munlock(ByteBuffer buffer, long len)
|
||||
throws IOException {
|
||||
assertCodeLoaded();
|
||||
if (!buffer.isDirect()) {
|
||||
throw new IOException("Cannot munlock a non-direct ByteBuffer");
|
||||
}
|
||||
munlock_native(buffer, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unmaps the block from memory. See munmap(2).
|
||||
|
@ -570,6 +551,19 @@ public class NativeIO {
|
|||
return access0(path, desiredAccess.accessRight());
|
||||
}
|
||||
|
||||
/**
|
||||
* Extends both the minimum and maximum working set size of the current
|
||||
* process. This method gets the current minimum and maximum working set
|
||||
* size, adds the requested amount to each and then sets the minimum and
|
||||
* maximum working set size to the new values. Controlling the working set
|
||||
* size of the process also controls the amount of memory it can lock.
|
||||
*
|
||||
* @param delta amount to increment minimum and maximum working set size
|
||||
* @throws IOException for any error
|
||||
* @see POSIX#mlock(ByteBuffer, long)
|
||||
*/
|
||||
public static native void extendWorkingSetSize(long delta) throws IOException;
|
||||
|
||||
static {
|
||||
if (NativeCodeLoader.isNativeCodeLoaded()) {
|
||||
try {
|
||||
|
|
|
@ -85,7 +85,7 @@ class MetricsConfig extends SubsetConfiguration {
|
|||
private ClassLoader pluginLoader;
|
||||
|
||||
MetricsConfig(Configuration c, String prefix) {
|
||||
super(c, prefix, ".");
|
||||
super(c, prefix.toLowerCase(Locale.US), ".");
|
||||
}
|
||||
|
||||
static MetricsConfig create(String prefix) {
|
||||
|
|
|
@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
* abstraction to separate credential storage from users of them. It
|
||||
* is intended to support getting or storing passwords in a variety of ways,
|
||||
* including third party bindings.
|
||||
*
|
||||
* <code>CredentialProvider</code> implementations must be thread safe.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Unstable
|
||||
|
|
|
@ -264,7 +264,7 @@ public class CredentialShell extends Configured implements Tool {
|
|||
alias + " from CredentialProvider " + provider.toString() +
|
||||
". Continue?:");
|
||||
if (!cont) {
|
||||
out.println("Nothing has been be deleted.");
|
||||
out.println("Nothing has been deleted.");
|
||||
}
|
||||
return cont;
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -55,7 +55,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CredentialEntry getCredentialEntry(String alias) {
|
||||
public synchronized CredentialEntry getCredentialEntry(String alias) {
|
||||
byte[] bytes = credentials.getSecretKey(new Text(alias));
|
||||
if (bytes == null) {
|
||||
return null;
|
||||
|
@ -64,7 +64,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public CredentialEntry createCredentialEntry(String name, char[] credential)
|
||||
public synchronized CredentialEntry createCredentialEntry(String name, char[] credential)
|
||||
throws IOException {
|
||||
Text nameT = new Text(name);
|
||||
if (credentials.getSecretKey(nameT) != null) {
|
||||
|
@ -77,7 +77,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void deleteCredentialEntry(String name) throws IOException {
|
||||
public synchronized void deleteCredentialEntry(String name) throws IOException {
|
||||
byte[] cred = credentials.getSecretKey(new Text(name));
|
||||
if (cred != null) {
|
||||
credentials.removeSecretKey(new Text(name));
|
||||
|
@ -95,7 +95,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void flush() {
|
||||
public synchronized void flush() {
|
||||
user.addCredentials(credentials);
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ public class UserProvider extends CredentialProvider {
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<String> getAliases() throws IOException {
|
||||
public synchronized List<String> getAliases() throws IOException {
|
||||
List<String> list = new ArrayList<String>();
|
||||
List<Text> aliases = credentials.getAllSecretKeys();
|
||||
for (Text key : aliases) {
|
||||
|
|
|
@ -379,6 +379,7 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_getLibraryName(JNIEnv *en
|
|||
return (*env)->NewStringUTF(env, dl_info.dli_fname);
|
||||
}
|
||||
}
|
||||
return (*env)->NewStringUTF(env, "Unavailable");
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
|
|
|
@ -388,10 +388,10 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mlock_1native(
|
|||
JNIEnv *env, jclass clazz,
|
||||
jobject buffer, jlong len)
|
||||
{
|
||||
#ifdef UNIX
|
||||
void* buf = (void*)(*env)->GetDirectBufferAddress(env, buffer);
|
||||
PASS_EXCEPTIONS(env);
|
||||
|
||||
#ifdef UNIX
|
||||
if (mlock(buf, len)) {
|
||||
CHECK_DIRECT_BUFFER_ADDRESS(buf);
|
||||
throw_ioe(env, errno);
|
||||
|
@ -399,37 +399,11 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mlock_1native(
|
|||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function POSIX.mlock_native() is not supported on Windows");
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* public static native void munlock_native(
|
||||
* ByteBuffer buffer, long offset);
|
||||
*
|
||||
* The "00024" in the function name is an artifact of how JNI encodes
|
||||
* special characters. U+0024 is '$'.
|
||||
*/
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native(
|
||||
JNIEnv *env, jclass clazz,
|
||||
jobject buffer, jlong len)
|
||||
{
|
||||
#ifdef UNIX
|
||||
void* buf = (void*)(*env)->GetDirectBufferAddress(env, buffer);
|
||||
PASS_EXCEPTIONS(env);
|
||||
|
||||
if (munlock(buf, len)) {
|
||||
if (!VirtualLock(buf, len)) {
|
||||
CHECK_DIRECT_BUFFER_ADDRESS(buf);
|
||||
throw_ioe(env, errno);
|
||||
throw_ioe(env, GetLastError());
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function POSIX.munlock_native() is not supported on Windows");
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
|
@ -606,6 +580,8 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getUserName(
|
|||
JNIEnv *env, jclass clazz, jint uid)
|
||||
{
|
||||
#ifdef UNIX
|
||||
jstring jstr_username = NULL;
|
||||
char *pw_buf = NULL;
|
||||
int pw_lock_locked = 0;
|
||||
if (pw_lock_object != NULL) {
|
||||
if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) {
|
||||
|
@ -614,7 +590,6 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getUserName(
|
|||
pw_lock_locked = 1;
|
||||
}
|
||||
|
||||
char *pw_buf = NULL;
|
||||
int rc;
|
||||
size_t pw_buflen = get_pw_buflen();
|
||||
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
||||
|
@ -649,7 +624,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getUserName(
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
jstring jstr_username = (*env)->NewStringUTF(env, pwd.pw_name);
|
||||
jstr_username = (*env)->NewStringUTF(env, pwd.pw_name);
|
||||
|
||||
cleanup:
|
||||
if (pw_lock_locked) {
|
||||
|
@ -690,7 +665,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mmap(
|
|||
#ifdef WINDOWS
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function POSIX.mmap() is not supported on Windows");
|
||||
return NULL;
|
||||
return (jlong)(intptr_t)NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -710,7 +685,6 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munmap(
|
|||
#ifdef WINDOWS
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function POSIX.munmap() is not supported on Windows");
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -726,6 +700,8 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getGroupName(
|
|||
JNIEnv *env, jclass clazz, jint gid)
|
||||
{
|
||||
#ifdef UNIX
|
||||
jstring jstr_groupname = NULL;
|
||||
char *pw_buf = NULL;
|
||||
int pw_lock_locked = 0;
|
||||
|
||||
if (pw_lock_object != NULL) {
|
||||
|
@ -735,7 +711,6 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getGroupName(
|
|||
pw_lock_locked = 1;
|
||||
}
|
||||
|
||||
char *pw_buf = NULL;
|
||||
int rc;
|
||||
size_t pw_buflen = get_pw_buflen();
|
||||
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
||||
|
@ -770,7 +745,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getGroupName(
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
jstring jstr_groupname = (*env)->NewStringUTF(env, grp.gr_name);
|
||||
jstr_groupname = (*env)->NewStringUTF(env, grp.gr_name);
|
||||
PASS_EXCEPTIONS_GOTO(env, cleanup);
|
||||
|
||||
cleanup:
|
||||
|
@ -948,7 +923,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_setFilePointer
|
|||
#ifdef UNIX
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function setFilePointer(FileDescriptor) is not supported on Unix");
|
||||
return NULL;
|
||||
return (jlong)(intptr_t)NULL;
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
|
@ -983,7 +958,7 @@ JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024Wind
|
|||
#ifdef UNIX
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function access0(path, access) is not supported on Unix");
|
||||
return NULL;
|
||||
return (jlong)(intptr_t)NULL;
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
|
@ -1008,6 +983,40 @@ cleanup:
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_apache_hadoop_io_nativeio_NativeIO_Windows
|
||||
* Method: extendWorkingSetSize
|
||||
* Signature: (J)V
|
||||
*
|
||||
* The "00024" in the function name is an artifact of how JNI encodes
|
||||
* special characters. U+0024 is '$'.
|
||||
*/
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_extendWorkingSetSize(
|
||||
JNIEnv *env, jclass clazz, jlong delta)
|
||||
{
|
||||
#ifdef UNIX
|
||||
THROW(env, "java/io/IOException",
|
||||
"The function extendWorkingSetSize(delta) is not supported on Unix");
|
||||
#endif
|
||||
|
||||
#ifdef WINDOWS
|
||||
SIZE_T min, max;
|
||||
HANDLE hProcess = GetCurrentProcess();
|
||||
if (!GetProcessWorkingSetSize(hProcess, &min, &max)) {
|
||||
throw_ioe(env, GetLastError());
|
||||
return;
|
||||
}
|
||||
if (!SetProcessWorkingSetSizeEx(hProcess, min + delta, max + delta,
|
||||
QUOTA_LIMITS_HARDWS_MIN_DISABLE | QUOTA_LIMITS_HARDWS_MAX_DISABLE)) {
|
||||
throw_ioe(env, GetLastError());
|
||||
return;
|
||||
}
|
||||
// There is no need to call CloseHandle on the pseudo-handle returned from
|
||||
// GetCurrentProcess.
|
||||
#endif
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_apache_hadoop_io_nativeio_NativeIO_renameTo0(JNIEnv *env,
|
||||
jclass clazz, jstring jsrc, jstring jdst)
|
||||
|
|
|
@ -120,17 +120,19 @@ Java_org_apache_hadoop_net_unix_DomainSocketWatcher_00024FdSet_remove(
|
|||
JNIEnv *env, jobject obj, jint fd)
|
||||
{
|
||||
struct fd_set_data *sd;
|
||||
struct pollfd *pollfd, *last_pollfd;
|
||||
struct pollfd *pollfd = NULL, *last_pollfd;
|
||||
int used_size, i;
|
||||
|
||||
sd = (struct fd_set_data*)(intptr_t)(*env)->
|
||||
GetLongField(env, obj, fd_set_data_fid);
|
||||
used_size = sd->used_size;
|
||||
for (i = 0; i < used_size; i++) {
|
||||
pollfd = sd->pollfd + i;
|
||||
if (pollfd->fd == fd) break;
|
||||
if (sd->pollfd[i].fd == fd) {
|
||||
pollfd = sd->pollfd + i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == used_size) {
|
||||
if (pollfd == NULL) {
|
||||
(*env)->Throw(env, newRuntimeException(env, "failed to remove fd %d "
|
||||
"from the FdSet because it was never present.", fd));
|
||||
return;
|
||||
|
|
|
@ -45,7 +45,7 @@ static void throw_ioexception(JNIEnv* env, DWORD errnum)
|
|||
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
|
||||
NULL, *(DWORD*) (&errnum), // reinterpret cast
|
||||
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
||||
(LPSTR*)&buffer, 0, NULL);
|
||||
buffer, 0, NULL);
|
||||
|
||||
if (len > 0)
|
||||
{
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
struct hadoop_user_info *hadoop_user_info_alloc(void)
|
||||
{
|
||||
struct hadoop_user_info *uinfo;
|
||||
size_t buf_sz;
|
||||
long buf_sz;
|
||||
char *buf;
|
||||
|
||||
uinfo = calloc(1, sizeof(struct hadoop_user_info));
|
||||
|
@ -193,7 +193,7 @@ int hadoop_user_info_getgroups(struct hadoop_user_info *uinfo)
|
|||
ngroups = uinfo->gids_size;
|
||||
ret = getgrouplist(uinfo->pwd.pw_name, uinfo->pwd.pw_gid,
|
||||
uinfo->gids, &ngroups);
|
||||
if (ret > 0) {
|
||||
if (ret >= 0) {
|
||||
uinfo->num_gids = ngroups;
|
||||
ret = put_primary_gid_first(uinfo);
|
||||
if (ret) {
|
||||
|
|
|
@ -17,35 +17,41 @@
|
|||
*/
|
||||
package org.apache.hadoop.crypto.key;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class TestKeyShell {
|
||||
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
|
||||
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
|
||||
|
||||
private static File tmpDir;
|
||||
|
||||
private PrintStream initialStdOut;
|
||||
private PrintStream initialStdErr;
|
||||
|
||||
/* The default JCEKS provider - for testing purposes */
|
||||
private String jceksProvider;
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
outContent.reset();
|
||||
errContent.reset();
|
||||
tmpDir = new File(System.getProperty("test.build.data", "target"),
|
||||
final File tmpDir = new File(System.getProperty("test.build.data", "target"),
|
||||
UUID.randomUUID().toString());
|
||||
tmpDir.mkdirs();
|
||||
if (!tmpDir.mkdirs()) {
|
||||
throw new IOException("Unable to create " + tmpDir);
|
||||
}
|
||||
jceksProvider = "jceks://file" + tmpDir + "/keystore.jceks";
|
||||
initialStdOut = System.out;
|
||||
initialStdErr = System.err;
|
||||
System.setOut(new PrintStream(outContent));
|
||||
|
@ -58,65 +64,80 @@ public class TestKeyShell {
|
|||
System.setErr(initialStdErr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a key from the default jceksProvider
|
||||
* @param ks The KeyShell instance
|
||||
* @param keyName The key to delete
|
||||
* @throws Exception
|
||||
*/
|
||||
private void deleteKey(KeyShell ks, String keyName) throws Exception {
|
||||
int rc;
|
||||
outContent.reset();
|
||||
final String[] delArgs = {"delete", keyName, "--provider", jceksProvider};
|
||||
rc = ks.run(delArgs);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains(keyName + " has been " +
|
||||
"successfully deleted."));
|
||||
}
|
||||
|
||||
/**
|
||||
* Lists the keys in the jceksProvider
|
||||
* @param ks The KeyShell instance
|
||||
* @param wantMetadata True if you want metadata returned with the keys
|
||||
* @return The output from the "list" call
|
||||
* @throws Exception
|
||||
*/
|
||||
private String listKeys(KeyShell ks, boolean wantMetadata) throws Exception {
|
||||
int rc;
|
||||
outContent.reset();
|
||||
final String[] listArgs = {"list", "--provider", jceksProvider };
|
||||
final String[] listArgsM = {"list", "--metadata", "--provider", jceksProvider };
|
||||
rc = ks.run(wantMetadata ? listArgsM : listArgs);
|
||||
assertEquals(0, rc);
|
||||
return outContent.toString();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeySuccessfulKeyLifecycle() throws Exception {
|
||||
outContent.reset();
|
||||
String[] args1 = {"create", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
int rc = 0;
|
||||
String keyName = "key1";
|
||||
|
||||
KeyShell ks = new KeyShell();
|
||||
ks.setConf(new Configuration());
|
||||
|
||||
outContent.reset();
|
||||
final String[] args1 = {"create", keyName, "--provider", jceksProvider};
|
||||
rc = ks.run(args1);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"created."));
|
||||
assertTrue(outContent.toString().contains(keyName + " has been " +
|
||||
"successfully created."));
|
||||
|
||||
String listOut = listKeys(ks, false);
|
||||
assertTrue(listOut.contains(keyName));
|
||||
|
||||
listOut = listKeys(ks, true);
|
||||
assertTrue(listOut.contains(keyName));
|
||||
assertTrue(listOut.contains("description"));
|
||||
assertTrue(listOut.contains("created"));
|
||||
|
||||
outContent.reset();
|
||||
String[] args2 = {"list", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
final String[] args2 = {"roll", keyName, "--provider", jceksProvider};
|
||||
rc = ks.run(args2);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1"));
|
||||
|
||||
outContent.reset();
|
||||
String[] args2a = {"list", "--metadata", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args2a);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1"));
|
||||
assertTrue(outContent.toString().contains("description"));
|
||||
assertTrue(outContent.toString().contains("created"));
|
||||
|
||||
outContent.reset();
|
||||
String[] args3 = {"roll", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args3);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"rolled."));
|
||||
|
||||
outContent.reset();
|
||||
String[] args4 = {"delete", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args4);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"deleted."));
|
||||
deleteKey(ks, keyName);
|
||||
|
||||
outContent.reset();
|
||||
String[] args5 = {"list", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args5);
|
||||
assertEquals(0, rc);
|
||||
assertFalse(outContent.toString(), outContent.toString().contains("key1"));
|
||||
listOut = listKeys(ks, false);
|
||||
assertFalse(listOut, listOut.contains(keyName));
|
||||
}
|
||||
|
||||
/* HADOOP-10586 KeyShell didn't allow -description. */
|
||||
@Test
|
||||
public void testKeySuccessfulCreationWithDescription() throws Exception {
|
||||
outContent.reset();
|
||||
String[] args1 = {"create", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks",
|
||||
final String[] args1 = {"create", "key1", "--provider", jceksProvider,
|
||||
"--description", "someDescription"};
|
||||
int rc = 0;
|
||||
KeyShell ks = new KeyShell();
|
||||
|
@ -126,20 +147,16 @@ public class TestKeyShell {
|
|||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"created."));
|
||||
|
||||
outContent.reset();
|
||||
String[] args2a = {"list", "--metadata", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
rc = ks.run(args2a);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("description"));
|
||||
assertTrue(outContent.toString().contains("someDescription"));
|
||||
String listOut = listKeys(ks, true);
|
||||
assertTrue(listOut.contains("description"));
|
||||
assertTrue(listOut.contains("someDescription"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInvalidKeySize() throws Exception {
|
||||
String[] args1 = {"create", "key1", "--size", "56", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
|
||||
final String[] args1 = {"create", "key1", "--size", "56", "--provider",
|
||||
jceksProvider};
|
||||
|
||||
int rc = 0;
|
||||
KeyShell ks = new KeyShell();
|
||||
ks.setConf(new Configuration());
|
||||
|
@ -150,9 +167,9 @@ public class TestKeyShell {
|
|||
|
||||
@Test
|
||||
public void testInvalidCipher() throws Exception {
|
||||
String[] args1 = {"create", "key1", "--cipher", "LJM", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
|
||||
final String[] args1 = {"create", "key1", "--cipher", "LJM", "--provider",
|
||||
jceksProvider};
|
||||
|
||||
int rc = 0;
|
||||
KeyShell ks = new KeyShell();
|
||||
ks.setConf(new Configuration());
|
||||
|
@ -163,7 +180,7 @@ public class TestKeyShell {
|
|||
|
||||
@Test
|
||||
public void testInvalidProvider() throws Exception {
|
||||
String[] args1 = {"create", "key1", "--cipher", "AES", "--provider",
|
||||
final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider",
|
||||
"sdff://file/tmp/keystore.jceks"};
|
||||
|
||||
int rc = 0;
|
||||
|
@ -177,7 +194,7 @@ public class TestKeyShell {
|
|||
|
||||
@Test
|
||||
public void testTransientProviderWarning() throws Exception {
|
||||
String[] args1 = {"create", "key1", "--cipher", "AES", "--provider",
|
||||
final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider",
|
||||
"user:///"};
|
||||
|
||||
int rc = 0;
|
||||
|
@ -191,7 +208,7 @@ public class TestKeyShell {
|
|||
|
||||
@Test
|
||||
public void testTransientProviderOnlyConfig() throws Exception {
|
||||
String[] args1 = {"create", "key1"};
|
||||
final String[] args1 = {"create", "key1"};
|
||||
|
||||
int rc = 0;
|
||||
KeyShell ks = new KeyShell();
|
||||
|
@ -206,23 +223,96 @@ public class TestKeyShell {
|
|||
|
||||
@Test
|
||||
public void testFullCipher() throws Exception {
|
||||
String[] args1 = {"create", "key1", "--cipher", "AES/CBC/pkcs5Padding",
|
||||
"--provider", "jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
final String keyName = "key1";
|
||||
final String[] args1 = {"create", keyName, "--cipher", "AES/CBC/pkcs5Padding",
|
||||
"--provider", jceksProvider};
|
||||
|
||||
int rc = 0;
|
||||
KeyShell ks = new KeyShell();
|
||||
ks.setConf(new Configuration());
|
||||
rc = ks.run(args1);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"created."));
|
||||
assertTrue(outContent.toString().contains(keyName + " has been " +
|
||||
"successfully " + "created."));
|
||||
|
||||
deleteKey(ks, keyName);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAttributes() throws Exception {
|
||||
int rc;
|
||||
KeyShell ks = new KeyShell();
|
||||
ks.setConf(new Configuration());
|
||||
|
||||
/* Simple creation test */
|
||||
final String[] args1 = {"create", "keyattr1", "--provider", jceksProvider,
|
||||
"--attr", "foo=bar"};
|
||||
rc = ks.run(args1);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("keyattr1 has been " +
|
||||
"successfully " + "created."));
|
||||
|
||||
/* ...and list to see that we have the attr */
|
||||
String listOut = listKeys(ks, true);
|
||||
assertTrue(listOut.contains("keyattr1"));
|
||||
assertTrue(listOut.contains("attributes: [foo=bar]"));
|
||||
|
||||
/* Negative tests: no attribute */
|
||||
outContent.reset();
|
||||
String[] args2 = {"delete", "key1", "--provider",
|
||||
"jceks://file" + tmpDir + "/keystore.jceks"};
|
||||
final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
|
||||
"--attr", "=bar"};
|
||||
rc = ks.run(args2);
|
||||
assertEquals(-1, rc);
|
||||
|
||||
/* Not in attribute = value form */
|
||||
outContent.reset();
|
||||
args2[5] = "foo";
|
||||
rc = ks.run(args2);
|
||||
assertEquals(-1, rc);
|
||||
|
||||
/* No attribute or value */
|
||||
outContent.reset();
|
||||
args2[5] = "=";
|
||||
rc = ks.run(args2);
|
||||
assertEquals(-1, rc);
|
||||
|
||||
/* Legal: attribute is a, value is b=c */
|
||||
outContent.reset();
|
||||
args2[5] = "a=b=c";
|
||||
rc = ks.run(args2);
|
||||
assertEquals(0, rc);
|
||||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||
"deleted."));
|
||||
|
||||
listOut = listKeys(ks, true);
|
||||
assertTrue(listOut.contains("keyattr2"));
|
||||
assertTrue(listOut.contains("attributes: [a=b=c]"));
|
||||
|
||||
/* Test several attrs together... */
|
||||
outContent.reset();
|
||||
final String[] args3 = {"create", "keyattr3", "--provider", jceksProvider,
|
||||
"--attr", "foo = bar",
|
||||
"--attr", " glarch =baz ",
|
||||
"--attr", "abc=def"};
|
||||
rc = ks.run(args3);
|
||||
assertEquals(0, rc);
|
||||
|
||||
/* ...and list to ensure they're there. */
|
||||
listOut = listKeys(ks, true);
|
||||
assertTrue(listOut.contains("keyattr3"));
|
||||
assertTrue(listOut.contains("[foo=bar]"));
|
||||
assertTrue(listOut.contains("[glarch=baz]"));
|
||||
assertTrue(listOut.contains("[abc=def]"));
|
||||
|
||||
/* Negative test - repeated attributes should fail */
|
||||
outContent.reset();
|
||||
final String[] args4 = {"create", "keyattr4", "--provider", jceksProvider,
|
||||
"--attr", "foo=bar",
|
||||
"--attr", "foo=glarch"};
|
||||
rc = ks.run(args4);
|
||||
assertEquals(-1, rc);
|
||||
|
||||
/* Clean up to be a good citizen */
|
||||
deleteKey(ks, "keyattr1");
|
||||
deleteKey(ks, "keyattr2");
|
||||
deleteKey(ks, "keyattr3");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,6 +83,19 @@ public class TestAclCommands {
|
|||
"", "/path" }));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetfaclValidationsWithoutPermissions() throws Exception {
|
||||
List<AclEntry> parsedList = new ArrayList<AclEntry>();
|
||||
try {
|
||||
parsedList = AclEntry.parseAclSpec("user:user1:", true);
|
||||
} catch (IllegalArgumentException e) {
|
||||
}
|
||||
assertTrue(parsedList.size() == 0);
|
||||
assertFalse("setfacl should fail with less arguments",
|
||||
0 == runCommand(new String[] { "-setfacl", "-m", "user:user1:",
|
||||
"/path" }));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMultipleAclSpecParsing() throws Exception {
|
||||
List<AclEntry> parsedList = AclEntry.parseAclSpec(
|
||||
|
|
|
@ -49,7 +49,6 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
public class TestNativeIO {
|
||||
|
@ -572,7 +571,6 @@ public class TestNativeIO {
|
|||
@Test(timeout=10000)
|
||||
public void testMlock() throws Exception {
|
||||
assumeTrue(NativeIO.isAvailable());
|
||||
assumeTrue(Shell.LINUX);
|
||||
final File TEST_FILE = new File(new File(
|
||||
System.getProperty("test.build.data","build/test/data")),
|
||||
"testMlockFile");
|
||||
|
@ -607,8 +605,8 @@ public class TestNativeIO {
|
|||
sum += mapbuf.get(i);
|
||||
}
|
||||
assertEquals("Expected sums to be equal", bufSum, sum);
|
||||
// munlock the buffer
|
||||
NativeIO.POSIX.munlock(mapbuf, fileSize);
|
||||
// munmap the buffer, which also implicitly unlocks it
|
||||
NativeIO.POSIX.munmap(mapbuf);
|
||||
} finally {
|
||||
if (channel != null) {
|
||||
channel.close();
|
||||
|
|
|
@ -60,12 +60,12 @@ public class TestGangliaMetrics {
|
|||
@Test
|
||||
public void testTagsForPrefix() throws Exception {
|
||||
ConfigBuilder cb = new ConfigBuilder()
|
||||
.add("Test.sink.ganglia.tagsForPrefix.all", "*")
|
||||
.add("Test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
|
||||
.add("test.sink.ganglia.tagsForPrefix.all", "*")
|
||||
.add("test.sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " +
|
||||
"NumActiveSources")
|
||||
.add("Test.sink.ganglia.tagsForPrefix.none", "");
|
||||
.add("test.sink.ganglia.tagsForPrefix.none", "");
|
||||
GangliaSink30 sink = new GangliaSink30();
|
||||
sink.init(cb.subset("Test.sink.ganglia"));
|
||||
sink.init(cb.subset("test.sink.ganglia"));
|
||||
|
||||
List<MetricsTag> tags = new ArrayList<MetricsTag>();
|
||||
tags.add(new MetricsTag(MsInfo.Context, "all"));
|
||||
|
@ -98,8 +98,8 @@ public class TestGangliaMetrics {
|
|||
|
||||
@Test public void testGangliaMetrics2() throws Exception {
|
||||
ConfigBuilder cb = new ConfigBuilder().add("default.period", 10)
|
||||
.add("Test.sink.gsink30.context", "test") // filter out only "test"
|
||||
.add("Test.sink.gsink31.context", "test") // filter out only "test"
|
||||
.add("test.sink.gsink30.context", "test") // filter out only "test"
|
||||
.add("test.sink.gsink31.context", "test") // filter out only "test"
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
|
||||
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
|
|
|
@ -88,11 +88,11 @@ public class TestMetricsSystemImpl {
|
|||
DefaultMetricsSystem.shutdown();
|
||||
new ConfigBuilder().add("*.period", 8)
|
||||
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
|
||||
.add("Test.sink.test.class", TestSink.class.getName())
|
||||
.add("Test.*.source.filter.exclude", "s0")
|
||||
.add("Test.source.s1.metric.filter.exclude", "X*")
|
||||
.add("Test.sink.sink1.metric.filter.exclude", "Y*")
|
||||
.add("Test.sink.sink2.metric.filter.exclude", "Y*")
|
||||
.add("test.sink.test.class", TestSink.class.getName())
|
||||
.add("test.*.source.filter.exclude", "s0")
|
||||
.add("test.source.s1.metric.filter.exclude", "X*")
|
||||
.add("test.sink.sink1.metric.filter.exclude", "Y*")
|
||||
.add("test.sink.sink2.metric.filter.exclude", "Y*")
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
ms.start();
|
||||
|
@ -130,11 +130,11 @@ public class TestMetricsSystemImpl {
|
|||
DefaultMetricsSystem.shutdown();
|
||||
new ConfigBuilder().add("*.period", 8)
|
||||
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
|
||||
.add("Test.sink.test.class", TestSink.class.getName())
|
||||
.add("Test.*.source.filter.exclude", "s0")
|
||||
.add("Test.source.s1.metric.filter.exclude", "X*")
|
||||
.add("Test.sink.sink1.metric.filter.exclude", "Y*")
|
||||
.add("Test.sink.sink2.metric.filter.exclude", "Y*")
|
||||
.add("test.sink.test.class", TestSink.class.getName())
|
||||
.add("test.*.source.filter.exclude", "s0")
|
||||
.add("test.source.s1.metric.filter.exclude", "X*")
|
||||
.add("test.sink.sink1.metric.filter.exclude", "Y*")
|
||||
.add("test.sink.sink2.metric.filter.exclude", "Y*")
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
ms.start();
|
||||
|
@ -169,13 +169,14 @@ public class TestMetricsSystemImpl {
|
|||
@Test public void testMultiThreadedPublish() throws Exception {
|
||||
final int numThreads = 10;
|
||||
new ConfigBuilder().add("*.period", 80)
|
||||
.add("Test.sink.Collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
|
||||
.add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,
|
||||
numThreads)
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
final MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
ms.start();
|
||||
|
||||
final CollectingSink sink = new CollectingSink(numThreads);
|
||||
ms.registerSink("Collector",
|
||||
ms.registerSink("collector",
|
||||
"Collector of values from all threads.", sink);
|
||||
final TestSource[] sources = new TestSource[numThreads];
|
||||
final Thread[] threads = new Thread[numThreads];
|
||||
|
@ -280,10 +281,10 @@ public class TestMetricsSystemImpl {
|
|||
|
||||
@Test public void testHangingSink() {
|
||||
new ConfigBuilder().add("*.period", 8)
|
||||
.add("Test.sink.test.class", TestSink.class.getName())
|
||||
.add("Test.sink.hanging.retry.delay", "1")
|
||||
.add("Test.sink.hanging.retry.backoff", "1.01")
|
||||
.add("Test.sink.hanging.retry.count", "0")
|
||||
.add("test.sink.test.class", TestSink.class.getName())
|
||||
.add("test.sink.hanging.retry.delay", "1")
|
||||
.add("test.sink.hanging.retry.backoff", "1.01")
|
||||
.add("test.sink.hanging.retry.count", "0")
|
||||
.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
|
||||
MetricsSystemImpl ms = new MetricsSystemImpl("Test");
|
||||
ms.start();
|
||||
|
|
|
@ -19,12 +19,16 @@ package org.apache.hadoop.mount;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.oncrpc.RpcProgram;
|
||||
import org.apache.hadoop.oncrpc.SimpleTcpServer;
|
||||
import org.apache.hadoop.oncrpc.SimpleUdpServer;
|
||||
import org.apache.hadoop.portmap.PortmapMapping;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||
|
||||
/**
|
||||
* Main class for starting mountd daemon. This daemon implements the NFS
|
||||
* mount protocol. When receiving a MOUNT request from an NFS client, it checks
|
||||
|
@ -33,6 +37,7 @@ import org.apache.hadoop.util.ShutdownHookManager;
|
|||
* handle for requested directory and returns it to the client.
|
||||
*/
|
||||
abstract public class MountdBase {
|
||||
public static final Log LOG = LogFactory.getLog(MountdBase.class);
|
||||
private final RpcProgram rpcProgram;
|
||||
private int udpBoundPort; // Will set after server starts
|
||||
private int tcpBoundPort; // Will set after server starts
|
||||
|
@ -40,11 +45,11 @@ abstract public class MountdBase {
|
|||
public RpcProgram getRpcProgram() {
|
||||
return rpcProgram;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param program
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
*/
|
||||
public MountdBase(RpcProgram program) throws IOException {
|
||||
rpcProgram = program;
|
||||
|
@ -74,11 +79,16 @@ abstract public class MountdBase {
|
|||
if (register) {
|
||||
ShutdownHookManager.get().addShutdownHook(new Unregister(),
|
||||
SHUTDOWN_HOOK_PRIORITY);
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
|
||||
try {
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
|
||||
} catch (Throwable e) {
|
||||
LOG.fatal("Failed to start the server. Cause:", e);
|
||||
terminate(1, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Priority of the mountd shutdown hook.
|
||||
*/
|
||||
|
@ -91,5 +101,5 @@ abstract public class MountdBase {
|
|||
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -71,7 +71,16 @@ public class NfsExports {
|
|||
|
||||
private static final Pattern CIDR_FORMAT_LONG =
|
||||
Pattern.compile(SLASH_FORMAT_LONG);
|
||||
|
||||
|
||||
// Hostnames are composed of series of 'labels' concatenated with dots.
|
||||
// Labels can be between 1-63 characters long, and can only take
|
||||
// letters, digits & hyphens. They cannot start and end with hyphens. For
|
||||
// more details, refer RFC-1123 & http://en.wikipedia.org/wiki/Hostname
|
||||
private static final String LABEL_FORMAT =
|
||||
"[a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?";
|
||||
private static final Pattern HOSTNAME_FORMAT =
|
||||
Pattern.compile("^(" + LABEL_FORMAT + "\\.)*" + LABEL_FORMAT + "$");
|
||||
|
||||
static class AccessCacheEntry implements LightWeightCache.Entry{
|
||||
private final String hostAddr;
|
||||
private AccessPrivilege access;
|
||||
|
@ -381,10 +390,14 @@ public class NfsExports {
|
|||
LOG.debug("Using Regex match for '" + host + "' and " + privilege);
|
||||
}
|
||||
return new RegexMatch(privilege, host);
|
||||
} else if (HOSTNAME_FORMAT.matcher(host).matches()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Using exact match for '" + host + "' and " + privilege);
|
||||
}
|
||||
return new ExactMatch(privilege, host);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Invalid hostname provided '" + host
|
||||
+ "'");
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Using exact match for '" + host + "' and " + privilege);
|
||||
}
|
||||
return new ExactMatch(privilege, host);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.apache.hadoop.oncrpc.SimpleTcpServer;
|
|||
import org.apache.hadoop.portmap.PortmapMapping;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||
|
||||
/**
|
||||
* Nfs server. Supports NFS v3 using {@link RpcProgram}.
|
||||
* Currently Mountd program is also started inside this class.
|
||||
|
@ -34,7 +36,7 @@ public abstract class Nfs3Base {
|
|||
public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
|
||||
private final RpcProgram rpcProgram;
|
||||
private int nfsBoundPort; // Will set after server starts
|
||||
|
||||
|
||||
public RpcProgram getRpcProgram() {
|
||||
return rpcProgram;
|
||||
}
|
||||
|
@ -46,11 +48,16 @@ public abstract class Nfs3Base {
|
|||
|
||||
public void start(boolean register) {
|
||||
startTCPServer(); // Start TCP server
|
||||
|
||||
|
||||
if (register) {
|
||||
ShutdownHookManager.get().addShutdownHook(new Unregister(),
|
||||
SHUTDOWN_HOOK_PRIORITY);
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
|
||||
try {
|
||||
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
|
||||
} catch (Throwable e) {
|
||||
LOG.fatal("Failed to start the server. Cause:", e);
|
||||
terminate(1, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,7 +68,7 @@ public abstract class Nfs3Base {
|
|||
tcpServer.run();
|
||||
nfsBoundPort = tcpServer.getBoundPort();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Priority of the nfsd shutdown hook.
|
||||
*/
|
||||
|
|
|
@ -131,7 +131,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
|
|||
} catch (IOException e) {
|
||||
String request = set ? "Registration" : "Unregistration";
|
||||
LOG.error(request + " failure with " + host + ":" + port
|
||||
+ ", portmap entry: " + mapEntry, e);
|
||||
+ ", portmap entry: " + mapEntry);
|
||||
throw new RuntimeException(request + " failure", e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ public class SimpleUdpClient {
|
|||
DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
|
||||
IPAddress, port);
|
||||
socket.send(sendPacket);
|
||||
socket.setSoTimeout(500);
|
||||
DatagramPacket receivePacket = new DatagramPacket(receiveData,
|
||||
receiveData.length);
|
||||
socket.receive(receivePacket);
|
||||
|
|
|
@ -194,4 +194,16 @@ public class TestNfsExports {
|
|||
} while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
|
||||
Assert.assertEquals(AccessPrivilege.NONE, ap);
|
||||
}
|
||||
|
||||
@Test(expected=IllegalArgumentException.class)
|
||||
public void testInvalidHost() {
|
||||
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
|
||||
"foo#bar");
|
||||
}
|
||||
|
||||
@Test(expected=IllegalArgumentException.class)
|
||||
public void testInvalidSeparator() {
|
||||
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
|
||||
"foo ro : bar rw");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,8 +53,6 @@ Trunk (Unreleased)
|
|||
HDFS-3030. Remove getProtocolVersion and getProtocolSignature from translators.
|
||||
(jitendra)
|
||||
|
||||
HDFS-2976. Remove unnecessary method (tokenRefetchNeeded) in DFSClient.
|
||||
|
||||
HDFS-3111. Missing license headers in trunk. (umamahesh)
|
||||
|
||||
HDFS-3091. Update the usage limitations of ReplaceDatanodeOnFailure policy in
|
||||
|
@ -95,8 +93,6 @@ Trunk (Unreleased)
|
|||
HDFS-3768. Exception in TestJettyHelper is incorrect.
|
||||
(Eli Reisman via jghoman)
|
||||
|
||||
HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh)
|
||||
|
||||
HDFS-2580. NameNode#main(...) can make use of GenericOptionsParser. (harsh)
|
||||
|
||||
HDFS-2127. Add a test that ensure AccessControlExceptions contain
|
||||
|
@ -129,6 +125,9 @@ Trunk (Unreleased)
|
|||
|
||||
HDFS-6252. Phase out the old web UI in HDFS. (wheat9)
|
||||
|
||||
HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable
|
||||
directory. (Jing Zhao via wheat9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -197,9 +196,6 @@ Trunk (Unreleased)
|
|||
HDFS-3834. Remove unused static fields NAME, DESCRIPTION and Usage from
|
||||
Command. (Jing Zhao via suresh)
|
||||
|
||||
HADOOP-8158. Interrupting hadoop fs -put from the command line
|
||||
causes a LeaseExpiredException. (daryn via harsh)
|
||||
|
||||
HDFS-2434. TestNameNodeMetrics.testCorruptBlock fails intermittently.
|
||||
(Jing Zhao via suresh)
|
||||
|
||||
|
@ -266,6 +262,31 @@ Release 2.6.0 - UNRELEASED
|
|||
|
||||
HDFS-6511. BlockManager#computeInvalidateWork() could do nothing. (Juan Yu via wang)
|
||||
|
||||
HDFS-6638. Shorten test run time with a smaller retry timeout setting.
|
||||
(Liang Xie via cnauroth)
|
||||
|
||||
HDFS-6627. Rename DataNode#checkWriteAccess to checkReadAccess.
|
||||
(Liang Xie via cnauroth)
|
||||
|
||||
HDFS-6645. Add test for successive Snapshots between XAttr modifications.
|
||||
(Stephen Chu via jing9)
|
||||
|
||||
HDFS-6643. Refactor INodeWithAdditionalFields.PermissionStatusFormat and
|
||||
INodeFile.HeaderFormat. (szetszwo)
|
||||
|
||||
HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly
|
||||
in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9)
|
||||
|
||||
HDFS-5202. Support Centralized Cache Management on Windows. (cnauroth)
|
||||
|
||||
HDFS-2976. Remove unnecessary method (tokenRefetchNeeded) in DFSClient.
|
||||
(Uma Maheswara Rao G)
|
||||
|
||||
HDFS-3851. DFSOutputStream class code cleanup. (Jing Zhao via suresh)
|
||||
|
||||
HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
|
||||
(cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -273,6 +294,24 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
|
||||
due to a long edit log sync op. (Liang Xie via cnauroth)
|
||||
|
||||
HDFS-6646. [ HDFS Rolling Upgrade - Shell ] shutdownDatanode and getDatanodeInfo
|
||||
usage is missed ( Brahma Reddy Battula via vinayakumarb)
|
||||
|
||||
HDFS-6630. Unable to fetch the block information by Browsing the file system on
|
||||
Namenode UI through IE9 ( Haohui Mai via vinayakumarb)
|
||||
|
||||
HADOOP-8158. Interrupting hadoop fs -put from the command line
|
||||
causes a LeaseExpiredException. (daryn via harsh)
|
||||
|
||||
HDFS-6678. MiniDFSCluster may still be partially running after initialization
|
||||
fails. (cnauroth)
|
||||
|
||||
HDFS-5809. BlockPoolSliceScanner and high speed hdfs appending make
|
||||
datanode to drop into infinite loop (cmccabe)
|
||||
|
||||
HDFS-6456. NFS should throw error for invalid entry in
|
||||
dfs.nfs.exports.allowed.hosts (Abhiraj Butala via brandonli)
|
||||
|
||||
Release 2.5.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -781,6 +820,23 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6604. The short-circuit cache doesn't correctly time out replicas that
|
||||
haven't been used in a while (cmccabe)
|
||||
|
||||
HDFS-4286. Changes from BOOKKEEPER-203 broken capability of including
|
||||
bookkeeper-server jar in hidden package of BKJM (Rakesh R via umamahesh)
|
||||
|
||||
HDFS-4221. Remove the format limitation point from BKJM documentation as HDFS-3810
|
||||
closed. (Rakesh R via umamahesh)
|
||||
|
||||
HDFS-5411. Update Bookkeeper dependency to 4.2.3. (Rakesh R via umamahesh)
|
||||
|
||||
HDFS-6631. TestPread#testHedgedReadLoopTooManyTimes fails intermittently.
|
||||
(Liang Xie via cnauroth)
|
||||
|
||||
HDFS-6647. Edit log corruption when pipeline recovery occurs for deleted
|
||||
file present in snapshot (kihwal)
|
||||
|
||||
HDFS-6378. NFS registration should timeout instead of hanging when
|
||||
portmap/rpcbind is not available (Abhiraj Butala via brandonli)
|
||||
|
||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||
|
@ -853,6 +909,12 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6312. WebHdfs HA failover is broken on secure clusters.
|
||||
(daryn via tucu)
|
||||
|
||||
HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes
|
||||
from the tree and deleting them from the inode map (kihwal via cmccabe)
|
||||
|
||||
HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
|
||||
via cmccabe)
|
||||
|
||||
Release 2.4.1 - 2014-06-23
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -141,6 +141,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<artifactId>junit</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-minikdc</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-all</artifactId>
|
||||
|
|
|
@ -163,38 +163,24 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>1.5</version>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
<version>2.8</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>dist</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
<goal>copy</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<createDependencyReducedPom>false</createDependencyReducedPom>
|
||||
<artifactSet>
|
||||
<includes>
|
||||
<include>org.apache.bookkeeper:bookkeeper-server</include>
|
||||
<include>org.apache.zookeeper:zookeeper</include>
|
||||
<include>org.jboss.netty:netty</include>
|
||||
</includes>
|
||||
</artifactSet>
|
||||
<relocations>
|
||||
<relocation>
|
||||
<pattern>org.apache.bookkeeper</pattern>
|
||||
<shadedPattern>hidden.bkjournal.org.apache.bookkeeper</shadedPattern>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.apache.zookeeper</pattern>
|
||||
<shadedPattern>hidden.bkjournal.org.apache.zookeeper</shadedPattern>
|
||||
</relocation>
|
||||
<relocation>
|
||||
<pattern>org.jboss.netty</pattern>
|
||||
<shadedPattern>hidden.bkjournal.org.jboss.netty</shadedPattern>
|
||||
</relocation>
|
||||
</relocations>
|
||||
<artifactItems>
|
||||
<artifactItem>
|
||||
<groupId>org.apache.bookkeeper</groupId>
|
||||
<artifactId>bookkeeper-server</artifactId>
|
||||
<type>jar</type>
|
||||
</artifactItem>
|
||||
</artifactItems>
|
||||
<outputDirectory>${project.build.directory}/lib</outputDirectory>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
|
|
|
@ -237,7 +237,7 @@ public class BookKeeperJournalManager implements JournalManager {
|
|||
zkPathLatch.countDown();
|
||||
}
|
||||
};
|
||||
ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
|
||||
ZkUtils.asyncCreateFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
|
||||
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
|
||||
|
||||
try {
|
||||
|
|
|
@ -149,13 +149,16 @@ class BKJMUtil {
|
|||
int checkBookiesUp(int count, int timeout) throws Exception {
|
||||
ZooKeeper zkc = connectZooKeeper();
|
||||
try {
|
||||
boolean up = false;
|
||||
int mostRecentSize = 0;
|
||||
for (int i = 0; i < timeout; i++) {
|
||||
try {
|
||||
List<String> children = zkc.getChildren("/ledgers/available",
|
||||
false);
|
||||
mostRecentSize = children.size();
|
||||
// Skip 'readonly znode' which is used for keeping R-O bookie details
|
||||
if (children.contains("readonly")) {
|
||||
mostRecentSize = children.size() - 1;
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Found " + mostRecentSize + " bookies up, "
|
||||
+ "waiting for " + count);
|
||||
|
@ -166,7 +169,6 @@ class BKJMUtil {
|
|||
}
|
||||
}
|
||||
if (mostRecentSize == count) {
|
||||
up = true;
|
||||
break;
|
||||
}
|
||||
} catch (KeeperException e) {
|
||||
|
|
|
@ -47,7 +47,7 @@ if "%1" == "--config" (
|
|||
goto print_usage
|
||||
)
|
||||
|
||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir
|
||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin
|
||||
for %%i in ( %hdfscommands% ) do (
|
||||
if %hdfs-command% == %%i set hdfscommand=true
|
||||
)
|
||||
|
@ -146,6 +146,10 @@ goto :eof
|
|||
set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
|
||||
goto :eof
|
||||
|
||||
:cacheadmin
|
||||
set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
|
||||
goto :eof
|
||||
|
||||
@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
|
||||
:make_command_arguments
|
||||
if "%1" == "--config" (
|
||||
|
@ -193,6 +197,7 @@ goto :eof
|
|||
@echo current directory contents with a snapshot
|
||||
@echo lsSnapshottableDir list all snapshottable dirs owned by the current user
|
||||
@echo Use -help to see options
|
||||
@echo cacheadmin configure the HDFS cache
|
||||
@echo.
|
||||
@echo Most commands print help when invoked w/o parameters.
|
||||
|
||||
|
|
|
@ -744,7 +744,8 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
}
|
||||
}
|
||||
try {
|
||||
Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress);
|
||||
Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress, token,
|
||||
datanode);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("nextTcpPeer: created newConnectedPeer " + peer);
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
||||
|
@ -140,6 +142,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolIterator;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
|
@ -158,16 +161,19 @@ import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
|||
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
|
@ -214,7 +220,8 @@ import com.google.common.net.InetAddresses;
|
|||
*
|
||||
********************************************************/
|
||||
@InterfaceAudience.Private
|
||||
public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
||||
public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||
DataEncryptionKeyFactory {
|
||||
public static final Log LOG = LogFactory.getLog(DFSClient.class);
|
||||
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
|
||||
static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
|
||||
|
@ -238,7 +245,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
private final Random r = new Random();
|
||||
private SocketAddress[] localInterfaceAddrs;
|
||||
private DataEncryptionKey encryptionKey;
|
||||
final TrustedChannelResolver trustedChannelResolver;
|
||||
final SaslDataTransferClient saslClient;
|
||||
private final CachingStrategy defaultReadCachingStrategy;
|
||||
private final CachingStrategy defaultWriteCachingStrategy;
|
||||
private final ClientContext clientContext;
|
||||
|
@ -646,7 +653,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
if (numThreads > 0) {
|
||||
this.initThreadsNumForHedgedReads(numThreads);
|
||||
}
|
||||
this.trustedChannelResolver = TrustedChannelResolver.getInstance(getConfiguration());
|
||||
this.saslClient = new SaslDataTransferClient(
|
||||
DataTransferSaslUtil.getSaslPropertiesResolver(conf),
|
||||
TrustedChannelResolver.getInstance(conf),
|
||||
conf.getBoolean(
|
||||
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
|
||||
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1864,23 +1876,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
UnresolvedPathException.class);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the checksum of the whole file of a range of the file. Note that the
|
||||
* range always starts from the beginning of the file.
|
||||
* @param src The file path
|
||||
* @param length The length of the range
|
||||
* @return The checksum
|
||||
* @see DistributedFileSystem#getFileChecksum(Path)
|
||||
*/
|
||||
public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
Preconditions.checkArgument(length >= 0);
|
||||
return getFileChecksum(src, length, clientName, namenode,
|
||||
socketFactory, dfsClientConf.socketTimeout, getDataEncryptionKey(),
|
||||
dfsClientConf.connectToDnViaHostname);
|
||||
}
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public void clearDataEncryptionKey() {
|
||||
|
@ -1900,11 +1895,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
return d == null ? false : d.getEncryptDataTransfer();
|
||||
}
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public DataEncryptionKey getDataEncryptionKey()
|
||||
throws IOException {
|
||||
if (shouldEncryptData() &&
|
||||
!this.trustedChannelResolver.isTrusted()) {
|
||||
@Override
|
||||
public DataEncryptionKey newDataEncryptionKey() throws IOException {
|
||||
if (shouldEncryptData()) {
|
||||
synchronized (this) {
|
||||
if (encryptionKey == null ||
|
||||
encryptionKey.expiryDate < Time.now()) {
|
||||
|
@ -1919,22 +1912,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the checksum of the whole file or a range of the file.
|
||||
* Get the checksum of the whole file of a range of the file. Note that the
|
||||
* range always starts from the beginning of the file.
|
||||
* @param src The file path
|
||||
* @param length the length of the range, i.e., the range is [0, length]
|
||||
* @param clientName the name of the client requesting the checksum.
|
||||
* @param namenode the RPC proxy for the namenode
|
||||
* @param socketFactory to create sockets to connect to DNs
|
||||
* @param socketTimeout timeout to use when connecting and waiting for a response
|
||||
* @param encryptionKey the key needed to communicate with DNs in this cluster
|
||||
* @param connectToDnViaHostname whether the client should use hostnames instead of IPs
|
||||
* @return The checksum
|
||||
* @see DistributedFileSystem#getFileChecksum(Path)
|
||||
*/
|
||||
private static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
long length, String clientName, ClientProtocol namenode,
|
||||
SocketFactory socketFactory, int socketTimeout,
|
||||
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
|
||||
public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
Preconditions.checkArgument(length >= 0);
|
||||
//get block locations for the file range
|
||||
LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0,
|
||||
length);
|
||||
|
@ -1969,7 +1957,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
final DatanodeInfo[] datanodes = lb.getLocations();
|
||||
|
||||
//try each datanode location of the block
|
||||
final int timeout = 3000 * datanodes.length + socketTimeout;
|
||||
final int timeout = 3000 * datanodes.length + dfsClientConf.socketTimeout;
|
||||
boolean done = false;
|
||||
for(int j = 0; !done && j < datanodes.length; j++) {
|
||||
DataOutputStream out = null;
|
||||
|
@ -1977,8 +1965,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
|
||||
try {
|
||||
//connect to a datanode
|
||||
IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
|
||||
encryptionKey, datanodes[j], timeout);
|
||||
IOStreamPair pair = connectToDN(datanodes[j], timeout, lb);
|
||||
out = new DataOutputStream(new BufferedOutputStream(pair.out,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
in = new DataInputStream(pair.in);
|
||||
|
@ -2034,9 +2021,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
} else {
|
||||
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
|
||||
"inferring checksum by reading first byte");
|
||||
ct = inferChecksumTypeByReading(
|
||||
clientName, socketFactory, socketTimeout, lb, datanodes[j],
|
||||
encryptionKey, connectToDnViaHostname);
|
||||
ct = inferChecksumTypeByReading(lb, datanodes[j]);
|
||||
}
|
||||
|
||||
if (i == 0) { // first block
|
||||
|
@ -2110,16 +2095,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
* Connect to the given datanode's datantrasfer port, and return
|
||||
* the resulting IOStreamPair. This includes encryption wrapping, etc.
|
||||
*/
|
||||
private static IOStreamPair connectToDN(
|
||||
SocketFactory socketFactory, boolean connectToDnViaHostname,
|
||||
DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
|
||||
throws IOException
|
||||
{
|
||||
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
|
||||
LocatedBlock lb) throws IOException {
|
||||
boolean success = false;
|
||||
Socket sock = null;
|
||||
try {
|
||||
sock = socketFactory.createSocket();
|
||||
String dnAddr = dn.getXferAddr(connectToDnViaHostname);
|
||||
String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Connecting to datanode " + dnAddr);
|
||||
}
|
||||
|
@ -2128,13 +2110,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
|
||||
OutputStream unbufOut = NetUtils.getOutputStream(sock);
|
||||
InputStream unbufIn = NetUtils.getInputStream(sock);
|
||||
IOStreamPair ret;
|
||||
if (encryptionKey != null) {
|
||||
ret = DataTransferEncryptor.getEncryptedStreams(
|
||||
unbufOut, unbufIn, encryptionKey);
|
||||
} else {
|
||||
ret = new IOStreamPair(unbufIn, unbufOut);
|
||||
}
|
||||
IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
|
||||
lb.getBlockToken(), dn);
|
||||
success = true;
|
||||
return ret;
|
||||
} finally {
|
||||
|
@ -2150,21 +2127,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
* with older HDFS versions which did not include the checksum type in
|
||||
* OpBlockChecksumResponseProto.
|
||||
*
|
||||
* @param in input stream from datanode
|
||||
* @param out output stream to datanode
|
||||
* @param lb the located block
|
||||
* @param clientName the name of the DFSClient requesting the checksum
|
||||
* @param dn the connected datanode
|
||||
* @return the inferred checksum type
|
||||
* @throws IOException if an error occurs
|
||||
*/
|
||||
private static Type inferChecksumTypeByReading(
|
||||
String clientName, SocketFactory socketFactory, int socketTimeout,
|
||||
LocatedBlock lb, DatanodeInfo dn,
|
||||
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
|
||||
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
|
||||
throws IOException {
|
||||
IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
|
||||
encryptionKey, dn, socketTimeout);
|
||||
IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);
|
||||
|
||||
try {
|
||||
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
|
||||
|
@ -2938,7 +2908,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
}
|
||||
|
||||
@Override // RemotePeerFactory
|
||||
public Peer newConnectedPeer(InetSocketAddress addr) throws IOException {
|
||||
public Peer newConnectedPeer(InetSocketAddress addr,
|
||||
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
Peer peer = null;
|
||||
boolean success = false;
|
||||
Socket sock = null;
|
||||
|
@ -2947,8 +2919,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
|
|||
NetUtils.connect(sock, addr,
|
||||
getRandomLocalInterfaceAddr(),
|
||||
dfsClientConf.socketTimeout);
|
||||
peer = TcpPeerServer.peerFromSocketAndKey(sock,
|
||||
getDataEncryptionKey());
|
||||
peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
|
||||
blockToken, datanodeId);
|
||||
success = true;
|
||||
return peer;
|
||||
} finally {
|
||||
|
|
|
@ -52,4 +52,6 @@ public class DFSClientFaultInjector {
|
|||
public void startFetchFromDatanode() {}
|
||||
|
||||
public void fetchFromDatanodeException() {}
|
||||
|
||||
public void readFromDatanodeDelay() {}
|
||||
}
|
||||
|
|
|
@ -561,6 +561,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
|
||||
public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
|
||||
public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
|
||||
public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
|
||||
public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
|
||||
public static final String DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_KEY = "dfs.namenode.key.version.refresh.interval.ms";
|
||||
public static final int DFS_NAMENODE_KEY_VERSION_REFRESH_INTERVAL_MS_DEFAULT = 5*60*1000;
|
||||
|
||||
|
|
|
@ -1048,6 +1048,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
|||
throw new IOException("truncated return from reader.read(): " +
|
||||
"excpected " + len + ", got " + nread);
|
||||
}
|
||||
DFSClientFaultInjector.get().readFromDatanodeDelay();
|
||||
return;
|
||||
} catch (ChecksumException e) {
|
||||
String msg = "fetchBlockByteRange(). Got a checksum exception for "
|
||||
|
|
|
@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
|||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
|
||||
|
@ -1050,14 +1049,10 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
|
||||
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
|
||||
InputStream unbufIn = NetUtils.getInputStream(sock);
|
||||
if (dfsClient.shouldEncryptData() &&
|
||||
!dfsClient.trustedChannelResolver.isTrusted(sock.getInetAddress())) {
|
||||
IOStreamPair encryptedStreams =
|
||||
DataTransferEncryptor.getEncryptedStreams(
|
||||
unbufOut, unbufIn, dfsClient.getDataEncryptionKey());
|
||||
unbufOut = encryptedStreams.out;
|
||||
unbufIn = encryptedStreams.in;
|
||||
}
|
||||
IOStreamPair saslStreams = dfsClient.saslClient.socketSend(sock,
|
||||
unbufOut, unbufIn, dfsClient, blockToken, src);
|
||||
unbufOut = saslStreams.out;
|
||||
unbufIn = saslStreams.in;
|
||||
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
in = new DataInputStream(unbufIn);
|
||||
|
@ -1328,14 +1323,10 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
|
||||
OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
|
||||
InputStream unbufIn = NetUtils.getInputStream(s);
|
||||
if (dfsClient.shouldEncryptData() &&
|
||||
!dfsClient.trustedChannelResolver.isTrusted(s.getInetAddress())) {
|
||||
IOStreamPair encryptedStreams =
|
||||
DataTransferEncryptor.getEncryptedStreams(unbufOut,
|
||||
unbufIn, dfsClient.getDataEncryptionKey());
|
||||
unbufOut = encryptedStreams.out;
|
||||
unbufIn = encryptedStreams.in;
|
||||
}
|
||||
IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s,
|
||||
unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
|
||||
unbufOut = saslStreams.out;
|
||||
unbufIn = saslStreams.in;
|
||||
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
blockReplyStream = new DataInputStream(unbufIn);
|
||||
|
|
|
@ -21,15 +21,21 @@ import java.io.IOException;
|
|||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
public interface RemotePeerFactory {
|
||||
/**
|
||||
* @param addr The address to connect to.
|
||||
*
|
||||
* @param blockToken Token used during optional SASL negotiation
|
||||
* @param datanodeId ID of destination DataNode
|
||||
* @return A new Peer connected to the address.
|
||||
*
|
||||
* @throws IOException If there was an error connecting or creating
|
||||
* the remote socket, encrypted stream, etc.
|
||||
*/
|
||||
Peer newConnectedPeer(InetSocketAddress addr) throws IOException;
|
||||
Peer newConnectedPeer(InetSocketAddress addr,
|
||||
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
||||
throws IOException;
|
||||
}
|
||||
|
|
|
@ -19,9 +19,7 @@ package org.apache.hadoop.hdfs.net;
|
|||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.net.unix.DomainSocket;
|
||||
|
||||
import java.io.InputStream;
|
||||
|
@ -51,11 +49,8 @@ public class EncryptedPeer implements Peer {
|
|||
*/
|
||||
private final ReadableByteChannel channel;
|
||||
|
||||
public EncryptedPeer(Peer enclosedPeer, DataEncryptionKey key)
|
||||
throws IOException {
|
||||
public EncryptedPeer(Peer enclosedPeer, IOStreamPair ios) {
|
||||
this.enclosedPeer = enclosedPeer;
|
||||
IOStreamPair ios = DataTransferEncryptor.getEncryptedStreams(
|
||||
enclosedPeer.getOutputStream(), enclosedPeer.getInputStream(), key);
|
||||
this.in = ios.in;
|
||||
this.out = ios.out;
|
||||
this.channel = ios.in instanceof ReadableByteChannel ?
|
||||
|
|
|
@ -28,10 +28,14 @@ import java.nio.channels.SocketChannel;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public class TcpPeerServer implements PeerServer {
|
||||
|
@ -74,15 +78,16 @@ public class TcpPeerServer implements PeerServer {
|
|||
}
|
||||
}
|
||||
|
||||
public static Peer peerFromSocketAndKey(Socket s,
|
||||
DataEncryptionKey key) throws IOException {
|
||||
public static Peer peerFromSocketAndKey(
|
||||
SaslDataTransferClient saslClient, Socket s,
|
||||
DataEncryptionKeyFactory keyFactory,
|
||||
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
Peer peer = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
peer = peerFromSocket(s);
|
||||
if (key != null) {
|
||||
peer = new EncryptedPeer(peer, key);
|
||||
}
|
||||
peer = peerFromSocket(s);
|
||||
peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
|
||||
success = true;
|
||||
return peer;
|
||||
} finally {
|
||||
|
|
|
@ -1,506 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import javax.security.auth.callback.Callback;
|
||||
import javax.security.auth.callback.CallbackHandler;
|
||||
import javax.security.auth.callback.NameCallback;
|
||||
import javax.security.auth.callback.PasswordCallback;
|
||||
import javax.security.auth.callback.UnsupportedCallbackException;
|
||||
import javax.security.sasl.AuthorizeCallback;
|
||||
import javax.security.sasl.RealmCallback;
|
||||
import javax.security.sasl.RealmChoiceCallback;
|
||||
import javax.security.sasl.Sasl;
|
||||
import javax.security.sasl.SaslClient;
|
||||
import javax.security.sasl.SaslException;
|
||||
import javax.security.sasl.SaslServer;
|
||||
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.security.SaslInputStream;
|
||||
import org.apache.hadoop.security.SaslOutputStream;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
/**
|
||||
* A class which, given connected input/output streams, will perform a
|
||||
* handshake using those streams based on SASL to produce new Input/Output
|
||||
* streams which will encrypt/decrypt all data written/read from said streams.
|
||||
* Much of this is inspired by or borrowed from the TSaslTransport in Apache
|
||||
* Thrift, but with some HDFS-specific tweaks.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class DataTransferEncryptor {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(DataTransferEncryptor.class);
|
||||
|
||||
/**
|
||||
* Sent by clients and validated by servers. We use a number that's unlikely
|
||||
* to ever be sent as the value of the DATA_TRANSFER_VERSION.
|
||||
*/
|
||||
private static final int ENCRYPTED_TRANSFER_MAGIC_NUMBER = 0xDEADBEEF;
|
||||
|
||||
/**
|
||||
* Delimiter for the three-part SASL username string.
|
||||
*/
|
||||
private static final String NAME_DELIMITER = " ";
|
||||
|
||||
// This has to be set as part of the SASL spec, but it don't matter for
|
||||
// our purposes, but may not be empty. It's sent over the wire, so use
|
||||
// a short string.
|
||||
private static final String SERVER_NAME = "0";
|
||||
|
||||
private static final String PROTOCOL = "hdfs";
|
||||
private static final String MECHANISM = "DIGEST-MD5";
|
||||
private static final Map<String, String> SASL_PROPS = new TreeMap<String, String>();
|
||||
|
||||
static {
|
||||
SASL_PROPS.put(Sasl.QOP, "auth-conf");
|
||||
SASL_PROPS.put(Sasl.SERVER_AUTH, "true");
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method for DNs, where the nonce, keyId, and encryption key are not
|
||||
* yet known. The nonce and keyId will be sent by the client, and the DN
|
||||
* will then use those pieces of info and the secret key shared with the NN
|
||||
* to determine the encryptionKey used for the SASL handshake/encryption.
|
||||
*
|
||||
* Establishes a secure connection assuming that the party on the other end
|
||||
* has the same shared secret. This does a SASL connection handshake, but not
|
||||
* a general-purpose one. It's specific to the MD5-DIGEST SASL mechanism with
|
||||
* auth-conf enabled. In particular, it doesn't support an arbitrary number of
|
||||
* challenge/response rounds, and we know that the client will never have an
|
||||
* initial response, so we don't check for one.
|
||||
*
|
||||
* @param underlyingOut output stream to write to the other party
|
||||
* @param underlyingIn input stream to read from the other party
|
||||
* @param blockPoolTokenSecretManager secret manager capable of constructing
|
||||
* encryption key based on keyId, blockPoolId, and nonce
|
||||
* @return a pair of streams which wrap the given streams and encrypt/decrypt
|
||||
* all data read/written
|
||||
* @throws IOException in the event of error
|
||||
*/
|
||||
public static IOStreamPair getEncryptedStreams(
|
||||
OutputStream underlyingOut, InputStream underlyingIn,
|
||||
BlockPoolTokenSecretManager blockPoolTokenSecretManager,
|
||||
String encryptionAlgorithm) throws IOException {
|
||||
|
||||
DataInputStream in = new DataInputStream(underlyingIn);
|
||||
DataOutputStream out = new DataOutputStream(underlyingOut);
|
||||
|
||||
Map<String, String> saslProps = Maps.newHashMap(SASL_PROPS);
|
||||
saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Server using encryption algorithm " + encryptionAlgorithm);
|
||||
}
|
||||
|
||||
SaslParticipant sasl = new SaslParticipant(Sasl.createSaslServer(MECHANISM,
|
||||
PROTOCOL, SERVER_NAME, saslProps,
|
||||
new SaslServerCallbackHandler(blockPoolTokenSecretManager)));
|
||||
|
||||
int magicNumber = in.readInt();
|
||||
if (magicNumber != ENCRYPTED_TRANSFER_MAGIC_NUMBER) {
|
||||
throw new InvalidMagicNumberException(magicNumber);
|
||||
}
|
||||
try {
|
||||
// step 1
|
||||
performSaslStep1(out, in, sasl);
|
||||
|
||||
// step 2 (server-side only)
|
||||
byte[] remoteResponse = readSaslMessage(in);
|
||||
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
|
||||
sendSaslMessage(out, localResponse);
|
||||
|
||||
// SASL handshake is complete
|
||||
checkSaslComplete(sasl);
|
||||
|
||||
return sasl.createEncryptedStreamPair(out, in);
|
||||
} catch (IOException ioe) {
|
||||
if (ioe instanceof SaslException &&
|
||||
ioe.getCause() != null &&
|
||||
ioe.getCause() instanceof InvalidEncryptionKeyException) {
|
||||
// This could just be because the client is long-lived and hasn't gotten
|
||||
// a new encryption key from the NN in a while. Upon receiving this
|
||||
// error, the client will get a new encryption key from the NN and retry
|
||||
// connecting to this DN.
|
||||
sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
|
||||
} else {
|
||||
sendGenericSaslErrorMessage(out, ioe.getMessage());
|
||||
}
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method for clients, where the encryption token is already created.
|
||||
*
|
||||
* Establishes a secure connection assuming that the party on the other end
|
||||
* has the same shared secret. This does a SASL connection handshake, but not
|
||||
* a general-purpose one. It's specific to the MD5-DIGEST SASL mechanism with
|
||||
* auth-conf enabled. In particular, it doesn't support an arbitrary number of
|
||||
* challenge/response rounds, and we know that the client will never have an
|
||||
* initial response, so we don't check for one.
|
||||
*
|
||||
* @param underlyingOut output stream to write to the other party
|
||||
* @param underlyingIn input stream to read from the other party
|
||||
* @param encryptionKey all info required to establish an encrypted stream
|
||||
* @return a pair of streams which wrap the given streams and encrypt/decrypt
|
||||
* all data read/written
|
||||
* @throws IOException in the event of error
|
||||
*/
|
||||
public static IOStreamPair getEncryptedStreams(
|
||||
OutputStream underlyingOut, InputStream underlyingIn,
|
||||
DataEncryptionKey encryptionKey)
|
||||
throws IOException {
|
||||
|
||||
Map<String, String> saslProps = Maps.newHashMap(SASL_PROPS);
|
||||
saslProps.put("com.sun.security.sasl.digest.cipher",
|
||||
encryptionKey.encryptionAlgorithm);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Client using encryption algorithm " +
|
||||
encryptionKey.encryptionAlgorithm);
|
||||
}
|
||||
|
||||
DataOutputStream out = new DataOutputStream(underlyingOut);
|
||||
DataInputStream in = new DataInputStream(underlyingIn);
|
||||
|
||||
String userName = getUserNameFromEncryptionKey(encryptionKey);
|
||||
SaslParticipant sasl = new SaslParticipant(Sasl.createSaslClient(
|
||||
new String[] { MECHANISM }, userName, PROTOCOL, SERVER_NAME, saslProps,
|
||||
new SaslClientCallbackHandler(encryptionKey.encryptionKey, userName)));
|
||||
|
||||
out.writeInt(ENCRYPTED_TRANSFER_MAGIC_NUMBER);
|
||||
out.flush();
|
||||
|
||||
try {
|
||||
// Start of handshake - "initial response" in SASL terminology.
|
||||
sendSaslMessage(out, new byte[0]);
|
||||
|
||||
// step 1
|
||||
performSaslStep1(out, in, sasl);
|
||||
|
||||
// step 2 (client-side only)
|
||||
byte[] remoteResponse = readSaslMessage(in);
|
||||
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
|
||||
assert localResponse == null;
|
||||
|
||||
// SASL handshake is complete
|
||||
checkSaslComplete(sasl);
|
||||
|
||||
return sasl.createEncryptedStreamPair(out, in);
|
||||
} catch (IOException ioe) {
|
||||
sendGenericSaslErrorMessage(out, ioe.getMessage());
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
private static void performSaslStep1(DataOutputStream out, DataInputStream in,
|
||||
SaslParticipant sasl) throws IOException {
|
||||
byte[] remoteResponse = readSaslMessage(in);
|
||||
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
|
||||
sendSaslMessage(out, localResponse);
|
||||
}
|
||||
|
||||
private static void checkSaslComplete(SaslParticipant sasl) throws IOException {
|
||||
if (!sasl.isComplete()) {
|
||||
throw new IOException("Failed to complete SASL handshake");
|
||||
}
|
||||
|
||||
if (!sasl.supportsConfidentiality()) {
|
||||
throw new IOException("SASL handshake completed, but channel does not " +
|
||||
"support encryption");
|
||||
}
|
||||
}
|
||||
|
||||
private static void sendSaslMessage(DataOutputStream out, byte[] payload)
|
||||
throws IOException {
|
||||
sendSaslMessage(out, DataTransferEncryptorStatus.SUCCESS, payload, null);
|
||||
}
|
||||
|
||||
private static void sendInvalidKeySaslErrorMessage(DataOutputStream out,
|
||||
String message) throws IOException {
|
||||
sendSaslMessage(out, DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY, null,
|
||||
message);
|
||||
}
|
||||
|
||||
private static void sendGenericSaslErrorMessage(DataOutputStream out,
|
||||
String message) throws IOException {
|
||||
sendSaslMessage(out, DataTransferEncryptorStatus.ERROR, null, message);
|
||||
}
|
||||
|
||||
private static void sendSaslMessage(OutputStream out,
|
||||
DataTransferEncryptorStatus status, byte[] payload, String message)
|
||||
throws IOException {
|
||||
DataTransferEncryptorMessageProto.Builder builder =
|
||||
DataTransferEncryptorMessageProto.newBuilder();
|
||||
|
||||
builder.setStatus(status);
|
||||
if (payload != null) {
|
||||
builder.setPayload(ByteString.copyFrom(payload));
|
||||
}
|
||||
if (message != null) {
|
||||
builder.setMessage(message);
|
||||
}
|
||||
|
||||
DataTransferEncryptorMessageProto proto = builder.build();
|
||||
proto.writeDelimitedTo(out);
|
||||
out.flush();
|
||||
}
|
||||
|
||||
private static byte[] readSaslMessage(DataInputStream in) throws IOException {
|
||||
DataTransferEncryptorMessageProto proto =
|
||||
DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
|
||||
if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
|
||||
throw new InvalidEncryptionKeyException(proto.getMessage());
|
||||
} else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
|
||||
throw new IOException(proto.getMessage());
|
||||
} else {
|
||||
return proto.getPayload().toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the encryption key when asked by the server-side SASL object.
|
||||
*/
|
||||
private static class SaslServerCallbackHandler implements CallbackHandler {
|
||||
|
||||
private final BlockPoolTokenSecretManager blockPoolTokenSecretManager;
|
||||
|
||||
public SaslServerCallbackHandler(BlockPoolTokenSecretManager
|
||||
blockPoolTokenSecretManager) {
|
||||
this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(Callback[] callbacks) throws IOException,
|
||||
UnsupportedCallbackException {
|
||||
NameCallback nc = null;
|
||||
PasswordCallback pc = null;
|
||||
AuthorizeCallback ac = null;
|
||||
for (Callback callback : callbacks) {
|
||||
if (callback instanceof AuthorizeCallback) {
|
||||
ac = (AuthorizeCallback) callback;
|
||||
} else if (callback instanceof PasswordCallback) {
|
||||
pc = (PasswordCallback) callback;
|
||||
} else if (callback instanceof NameCallback) {
|
||||
nc = (NameCallback) callback;
|
||||
} else if (callback instanceof RealmCallback) {
|
||||
continue; // realm is ignored
|
||||
} else {
|
||||
throw new UnsupportedCallbackException(callback,
|
||||
"Unrecognized SASL DIGEST-MD5 Callback: " + callback);
|
||||
}
|
||||
}
|
||||
|
||||
if (pc != null) {
|
||||
byte[] encryptionKey = getEncryptionKeyFromUserName(
|
||||
blockPoolTokenSecretManager, nc.getDefaultName());
|
||||
pc.setPassword(encryptionKeyToPassword(encryptionKey));
|
||||
}
|
||||
|
||||
if (ac != null) {
|
||||
ac.setAuthorized(true);
|
||||
ac.setAuthorizedID(ac.getAuthorizationID());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the encryption key when asked by the client-side SASL object.
|
||||
*/
|
||||
private static class SaslClientCallbackHandler implements CallbackHandler {
|
||||
|
||||
private final byte[] encryptionKey;
|
||||
private final String userName;
|
||||
|
||||
public SaslClientCallbackHandler(byte[] encryptionKey, String userName) {
|
||||
this.encryptionKey = encryptionKey;
|
||||
this.userName = userName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(Callback[] callbacks) throws IOException,
|
||||
UnsupportedCallbackException {
|
||||
NameCallback nc = null;
|
||||
PasswordCallback pc = null;
|
||||
RealmCallback rc = null;
|
||||
for (Callback callback : callbacks) {
|
||||
if (callback instanceof RealmChoiceCallback) {
|
||||
continue;
|
||||
} else if (callback instanceof NameCallback) {
|
||||
nc = (NameCallback) callback;
|
||||
} else if (callback instanceof PasswordCallback) {
|
||||
pc = (PasswordCallback) callback;
|
||||
} else if (callback instanceof RealmCallback) {
|
||||
rc = (RealmCallback) callback;
|
||||
} else {
|
||||
throw new UnsupportedCallbackException(callback,
|
||||
"Unrecognized SASL client callback");
|
||||
}
|
||||
}
|
||||
if (nc != null) {
|
||||
nc.setName(userName);
|
||||
}
|
||||
if (pc != null) {
|
||||
pc.setPassword(encryptionKeyToPassword(encryptionKey));
|
||||
}
|
||||
if (rc != null) {
|
||||
rc.setText(rc.getDefaultText());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* The SASL username consists of the keyId, blockPoolId, and nonce with the
|
||||
* first two encoded as Strings, and the third encoded using Base64. The
|
||||
* fields are each separated by a single space.
|
||||
*
|
||||
* @param encryptionKey the encryption key to encode as a SASL username.
|
||||
* @return encoded username containing keyId, blockPoolId, and nonce
|
||||
*/
|
||||
private static String getUserNameFromEncryptionKey(
|
||||
DataEncryptionKey encryptionKey) {
|
||||
return encryptionKey.keyId + NAME_DELIMITER +
|
||||
encryptionKey.blockPoolId + NAME_DELIMITER +
|
||||
new String(Base64.encodeBase64(encryptionKey.nonce, false), Charsets.UTF_8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a secret manager and a username encoded as described above, determine
|
||||
* the encryption key.
|
||||
*
|
||||
* @param blockPoolTokenSecretManager to determine the encryption key.
|
||||
* @param userName containing the keyId, blockPoolId, and nonce.
|
||||
* @return secret encryption key.
|
||||
* @throws IOException
|
||||
*/
|
||||
private static byte[] getEncryptionKeyFromUserName(
|
||||
BlockPoolTokenSecretManager blockPoolTokenSecretManager, String userName)
|
||||
throws IOException {
|
||||
String[] nameComponents = userName.split(NAME_DELIMITER);
|
||||
if (nameComponents.length != 3) {
|
||||
throw new IOException("Provided name '" + userName + "' has " +
|
||||
nameComponents.length + " components instead of the expected 3.");
|
||||
}
|
||||
int keyId = Integer.parseInt(nameComponents[0]);
|
||||
String blockPoolId = nameComponents[1];
|
||||
byte[] nonce = Base64.decodeBase64(nameComponents[2]);
|
||||
return blockPoolTokenSecretManager.retrieveDataEncryptionKey(keyId,
|
||||
blockPoolId, nonce);
|
||||
}
|
||||
|
||||
private static char[] encryptionKeyToPassword(byte[] encryptionKey) {
|
||||
return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8).toCharArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Strongly inspired by Thrift's TSaslTransport class.
|
||||
*
|
||||
* Used to abstract over the <code>SaslServer</code> and
|
||||
* <code>SaslClient</code> classes, which share a lot of their interface, but
|
||||
* unfortunately don't share a common superclass.
|
||||
*/
|
||||
private static class SaslParticipant {
|
||||
// One of these will always be null.
|
||||
public SaslServer saslServer;
|
||||
public SaslClient saslClient;
|
||||
|
||||
public SaslParticipant(SaslServer saslServer) {
|
||||
this.saslServer = saslServer;
|
||||
}
|
||||
|
||||
public SaslParticipant(SaslClient saslClient) {
|
||||
this.saslClient = saslClient;
|
||||
}
|
||||
|
||||
public byte[] evaluateChallengeOrResponse(byte[] challengeOrResponse) throws SaslException {
|
||||
if (saslClient != null) {
|
||||
return saslClient.evaluateChallenge(challengeOrResponse);
|
||||
} else {
|
||||
return saslServer.evaluateResponse(challengeOrResponse);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isComplete() {
|
||||
if (saslClient != null)
|
||||
return saslClient.isComplete();
|
||||
else
|
||||
return saslServer.isComplete();
|
||||
}
|
||||
|
||||
public boolean supportsConfidentiality() {
|
||||
String qop = null;
|
||||
if (saslClient != null) {
|
||||
qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP);
|
||||
} else {
|
||||
qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
|
||||
}
|
||||
return qop != null && qop.equals("auth-conf");
|
||||
}
|
||||
|
||||
// Return some input/output streams that will henceforth have their
|
||||
// communication encrypted.
|
||||
private IOStreamPair createEncryptedStreamPair(
|
||||
DataOutputStream out, DataInputStream in) {
|
||||
if (saslClient != null) {
|
||||
return new IOStreamPair(
|
||||
new SaslInputStream(in, saslClient),
|
||||
new SaslOutputStream(out, saslClient));
|
||||
} else {
|
||||
return new IOStreamPair(
|
||||
new SaslInputStream(in, saslServer),
|
||||
new SaslOutputStream(out, saslServer));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public static class InvalidMagicNumberException extends IOException {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public InvalidMagicNumberException(int magicNumber) {
|
||||
super(String.format("Received %x instead of %x from client.",
|
||||
magicNumber, ENCRYPTED_TRANSFER_MAGIC_NUMBER));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
|
||||
/**
|
||||
* Creates a new {@link DataEncryptionKey} on demand.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface DataEncryptionKeyFactory {
|
||||
|
||||
/**
|
||||
* Creates a new DataEncryptionKey.
|
||||
*
|
||||
* @return DataEncryptionKey newly created
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
DataEncryptionKey newDataEncryptionKey() throws IOException;
|
||||
}
|
|
@ -0,0 +1,267 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
|
||||
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import javax.security.sasl.Sasl;
|
||||
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
|
||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
||||
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.net.InetAddresses;
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
/**
|
||||
* Utility methods implementing SASL negotiation for DataTransferProtocol.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class DataTransferSaslUtil {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
DataTransferSaslUtil.class);
|
||||
|
||||
/**
|
||||
* Delimiter for the three-part SASL username string.
|
||||
*/
|
||||
public static final String NAME_DELIMITER = " ";
|
||||
|
||||
/**
|
||||
* Sent by clients and validated by servers. We use a number that's unlikely
|
||||
* to ever be sent as the value of the DATA_TRANSFER_VERSION.
|
||||
*/
|
||||
public static final int SASL_TRANSFER_MAGIC_NUMBER = 0xDEADBEEF;
|
||||
|
||||
/**
|
||||
* Checks that SASL negotiation has completed for the given participant, and
|
||||
* the negotiated quality of protection is included in the given SASL
|
||||
* properties and therefore acceptable.
|
||||
*
|
||||
* @param sasl participant to check
|
||||
* @param saslProps properties of SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public static void checkSaslComplete(SaslParticipant sasl,
|
||||
Map<String, String> saslProps) throws IOException {
|
||||
if (!sasl.isComplete()) {
|
||||
throw new IOException("Failed to complete SASL handshake");
|
||||
}
|
||||
Set<String> requestedQop = ImmutableSet.copyOf(Arrays.asList(
|
||||
saslProps.get(Sasl.QOP).split(",")));
|
||||
String negotiatedQop = sasl.getNegotiatedQop();
|
||||
LOG.debug("Verifying QOP, requested QOP = {}, negotiated QOP = {}",
|
||||
requestedQop, negotiatedQop);
|
||||
if (!requestedQop.contains(negotiatedQop)) {
|
||||
throw new IOException(String.format("SASL handshake completed, but " +
|
||||
"channel does not have acceptable quality of protection, " +
|
||||
"requested = %s, negotiated = %s", requestedQop, negotiatedQop));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates SASL properties required for an encrypted SASL negotiation.
|
||||
*
|
||||
* @param encryptionAlgorithm to use for SASL negotation
|
||||
* @return properties of encrypted SASL negotiation
|
||||
*/
|
||||
public static Map<String, String> createSaslPropertiesForEncryption(
|
||||
String encryptionAlgorithm) {
|
||||
Map<String, String> saslProps = Maps.newHashMapWithExpectedSize(3);
|
||||
saslProps.put(Sasl.QOP, QualityOfProtection.PRIVACY.getSaslQop());
|
||||
saslProps.put(Sasl.SERVER_AUTH, "true");
|
||||
saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm);
|
||||
return saslProps;
|
||||
}
|
||||
|
||||
/**
|
||||
* For an encrypted SASL negotiation, encodes an encryption key to a SASL
|
||||
* password.
|
||||
*
|
||||
* @param encryptionKey to encode
|
||||
* @return key encoded as SASL password
|
||||
*/
|
||||
public static char[] encryptionKeyToPassword(byte[] encryptionKey) {
|
||||
return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8)
|
||||
.toCharArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns InetAddress from peer. The getRemoteAddressString has the form
|
||||
* [host][/ip-address]:port. The host may be missing. The IP address (and
|
||||
* preceding '/') may be missing. The port preceded by ':' is always present.
|
||||
*
|
||||
* @param peer
|
||||
* @return InetAddress from peer
|
||||
*/
|
||||
public static InetAddress getPeerAddress(Peer peer) {
|
||||
String remoteAddr = peer.getRemoteAddressString().split(":")[0];
|
||||
int slashIdx = remoteAddr.indexOf('/');
|
||||
return InetAddresses.forString(slashIdx != -1 ?
|
||||
remoteAddr.substring(slashIdx + 1, remoteAddr.length()) :
|
||||
remoteAddr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a SaslPropertiesResolver from the given configuration. This method
|
||||
* works by cloning the configuration, translating configuration properties
|
||||
* specific to DataTransferProtocol to what SaslPropertiesResolver expects,
|
||||
* and then delegating to SaslPropertiesResolver for initialization. This
|
||||
* method returns null if SASL protection has not been configured for
|
||||
* DataTransferProtocol.
|
||||
*
|
||||
* @param conf configuration to read
|
||||
* @return SaslPropertiesResolver for DataTransferProtocol, or null if not
|
||||
* configured
|
||||
*/
|
||||
public static SaslPropertiesResolver getSaslPropertiesResolver(
|
||||
Configuration conf) {
|
||||
String qops = conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY);
|
||||
if (qops == null || qops.isEmpty()) {
|
||||
LOG.debug("DataTransferProtocol not using SaslPropertiesResolver, no " +
|
||||
"QOP found in configuration for {}", DFS_DATA_TRANSFER_PROTECTION_KEY);
|
||||
return null;
|
||||
}
|
||||
Configuration saslPropsResolverConf = new Configuration(conf);
|
||||
saslPropsResolverConf.set(HADOOP_RPC_PROTECTION, qops);
|
||||
Class<? extends SaslPropertiesResolver> resolverClass = conf.getClass(
|
||||
DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY,
|
||||
SaslPropertiesResolver.class, SaslPropertiesResolver.class);
|
||||
saslPropsResolverConf.setClass(HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
|
||||
resolverClass, SaslPropertiesResolver.class);
|
||||
SaslPropertiesResolver resolver = SaslPropertiesResolver.getInstance(
|
||||
saslPropsResolverConf);
|
||||
LOG.debug("DataTransferProtocol using SaslPropertiesResolver, configured " +
|
||||
"QOP {} = {}, configured class {} = {}", DFS_DATA_TRANSFER_PROTECTION_KEY, qops,
|
||||
DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY, resolverClass);
|
||||
return resolver;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs the first step of SASL negotiation.
|
||||
*
|
||||
* @param out connection output stream
|
||||
* @param in connection input stream
|
||||
* @param sasl participant
|
||||
*/
|
||||
public static void performSaslStep1(OutputStream out, InputStream in,
|
||||
SaslParticipant sasl) throws IOException {
|
||||
byte[] remoteResponse = readSaslMessage(in);
|
||||
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
|
||||
sendSaslMessage(out, localResponse);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a SASL negotiation message.
|
||||
*
|
||||
* @param in stream to read
|
||||
* @return bytes of SASL negotiation messsage
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public static byte[] readSaslMessage(InputStream in) throws IOException {
|
||||
DataTransferEncryptorMessageProto proto =
|
||||
DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in));
|
||||
if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
|
||||
throw new InvalidEncryptionKeyException(proto.getMessage());
|
||||
} else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
|
||||
throw new IOException(proto.getMessage());
|
||||
} else {
|
||||
return proto.getPayload().toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a SASL negotiation message indicating an error.
|
||||
*
|
||||
* @param out stream to receive message
|
||||
* @param message to send
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public static void sendGenericSaslErrorMessage(OutputStream out,
|
||||
String message) throws IOException {
|
||||
sendSaslMessage(out, DataTransferEncryptorStatus.ERROR, null, message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a SASL negotiation message.
|
||||
*
|
||||
* @param out stream to receive message
|
||||
* @param payload to send
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public static void sendSaslMessage(OutputStream out, byte[] payload)
|
||||
throws IOException {
|
||||
sendSaslMessage(out, DataTransferEncryptorStatus.SUCCESS, payload, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a SASL negotiation message.
|
||||
*
|
||||
* @param out stream to receive message
|
||||
* @param status negotiation status
|
||||
* @param payload to send
|
||||
* @param message to send
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public static void sendSaslMessage(OutputStream out,
|
||||
DataTransferEncryptorStatus status, byte[] payload, String message)
|
||||
throws IOException {
|
||||
DataTransferEncryptorMessageProto.Builder builder =
|
||||
DataTransferEncryptorMessageProto.newBuilder();
|
||||
|
||||
builder.setStatus(status);
|
||||
if (payload != null) {
|
||||
builder.setPayload(ByteString.copyFrom(payload));
|
||||
}
|
||||
if (message != null) {
|
||||
builder.setMessage(message);
|
||||
}
|
||||
|
||||
DataTransferEncryptorMessageProto proto = builder.build();
|
||||
proto.writeDelimitedTo(out);
|
||||
out.flush();
|
||||
}
|
||||
|
||||
/**
|
||||
* There is no reason to instantiate this class.
|
||||
*/
|
||||
private DataTransferSaslUtil() {
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.SASL_TRANSFER_MAGIC_NUMBER;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Indicates that SASL protocol negotiation expected to read a pre-defined magic
|
||||
* number, but the expected value was not seen.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class InvalidMagicNumberException extends IOException {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
/**
|
||||
* Creates a new InvalidMagicNumberException.
|
||||
*
|
||||
* @param magicNumber expected value
|
||||
*/
|
||||
public InvalidMagicNumberException(int magicNumber) {
|
||||
super(String.format("Received %x instead of %x from client.",
|
||||
magicNumber, SASL_TRANSFER_MAGIC_NUMBER));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,439 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||
import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.Socket;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.security.auth.callback.Callback;
|
||||
import javax.security.auth.callback.CallbackHandler;
|
||||
import javax.security.auth.callback.NameCallback;
|
||||
import javax.security.auth.callback.PasswordCallback;
|
||||
import javax.security.auth.callback.UnsupportedCallbackException;
|
||||
import javax.security.sasl.RealmCallback;
|
||||
import javax.security.sasl.RealmChoiceCallback;
|
||||
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.net.EncryptedPeer;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* Negotiates SASL for DataTransferProtocol on behalf of a client. There are
|
||||
* two possible supported variants of SASL negotiation: either a general-purpose
|
||||
* negotiation supporting any quality of protection, or a specialized
|
||||
* negotiation that enforces privacy as the quality of protection using a
|
||||
* cryptographically strong encryption key.
|
||||
*
|
||||
* This class is used in both the HDFS client and the DataNode. The DataNode
|
||||
* needs it, because it acts as a client to other DataNodes during write
|
||||
* pipelines and block transfers.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class SaslDataTransferClient {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
SaslDataTransferClient.class);
|
||||
|
||||
private final boolean fallbackToSimpleAuthAllowed;
|
||||
private final SaslPropertiesResolver saslPropsResolver;
|
||||
private final TrustedChannelResolver trustedChannelResolver;
|
||||
|
||||
/**
|
||||
* Creates a new SaslDataTransferClient.
|
||||
*
|
||||
* @param saslPropsResolver for determining properties of SASL negotiation
|
||||
* @param trustedChannelResolver for identifying trusted connections that do
|
||||
* not require SASL negotiation
|
||||
*/
|
||||
public SaslDataTransferClient(SaslPropertiesResolver saslPropsResolver,
|
||||
TrustedChannelResolver trustedChannelResolver,
|
||||
boolean fallbackToSimpleAuthAllowed) {
|
||||
this.fallbackToSimpleAuthAllowed = fallbackToSimpleAuthAllowed;
|
||||
this.saslPropsResolver = saslPropsResolver;
|
||||
this.trustedChannelResolver = trustedChannelResolver;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends client SASL negotiation for a newly allocated socket if required.
|
||||
*
|
||||
* @param socket connection socket
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param encryptionKeyFactory for creation of an encryption key
|
||||
* @param accessToken connection block access token
|
||||
* @param datanodeId ID of destination DataNode
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public IOStreamPair newSocketSend(Socket socket, OutputStream underlyingOut,
|
||||
InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory,
|
||||
Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
// The encryption key factory only returns a key if encryption is enabled.
|
||||
DataEncryptionKey encryptionKey = !trustedChannelResolver.isTrusted() ?
|
||||
encryptionKeyFactory.newDataEncryptionKey() : null;
|
||||
IOStreamPair ios = send(socket.getInetAddress(), underlyingOut,
|
||||
underlyingIn, encryptionKey, accessToken, datanodeId);
|
||||
return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends client SASL negotiation for a peer if required.
|
||||
*
|
||||
* @param peer connection peer
|
||||
* @param encryptionKeyFactory for creation of an encryption key
|
||||
* @param accessToken connection block access token
|
||||
* @param datanodeId ID of destination DataNode
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public Peer peerSend(Peer peer, DataEncryptionKeyFactory encryptionKeyFactory,
|
||||
Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
IOStreamPair ios = checkTrustAndSend(getPeerAddress(peer),
|
||||
peer.getOutputStream(), peer.getInputStream(), encryptionKeyFactory,
|
||||
accessToken, datanodeId);
|
||||
// TODO: Consider renaming EncryptedPeer to SaslPeer.
|
||||
return ios != null ? new EncryptedPeer(peer, ios) : peer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends client SASL negotiation for a socket if required.
|
||||
*
|
||||
* @param socket connection socket
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param encryptionKeyFactory for creation of an encryption key
|
||||
* @param accessToken connection block access token
|
||||
* @param datanodeId ID of destination DataNode
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public IOStreamPair socketSend(Socket socket, OutputStream underlyingOut,
|
||||
InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory,
|
||||
Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
IOStreamPair ios = checkTrustAndSend(socket.getInetAddress(), underlyingOut,
|
||||
underlyingIn, encryptionKeyFactory, accessToken, datanodeId);
|
||||
return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if an address is already trusted and then sends client SASL
|
||||
* negotiation if required.
|
||||
*
|
||||
* @param addr connection address
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param encryptionKeyFactory for creation of an encryption key
|
||||
* @param accessToken connection block access token
|
||||
* @param datanodeId ID of destination DataNode
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private IOStreamPair checkTrustAndSend(InetAddress addr,
|
||||
OutputStream underlyingOut, InputStream underlyingIn,
|
||||
DataEncryptionKeyFactory encryptionKeyFactory,
|
||||
Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
if (!trustedChannelResolver.isTrusted() &&
|
||||
!trustedChannelResolver.isTrusted(addr)) {
|
||||
// The encryption key factory only returns a key if encryption is enabled.
|
||||
DataEncryptionKey encryptionKey =
|
||||
encryptionKeyFactory.newDataEncryptionKey();
|
||||
return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
|
||||
datanodeId);
|
||||
} else {
|
||||
LOG.debug(
|
||||
"SASL client skipping handshake on trusted connection for addr = {}, "
|
||||
+ "datanodeId = {}", addr, datanodeId);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends client SASL negotiation if required. Determines the correct type of
|
||||
* SASL handshake based on configuration.
|
||||
*
|
||||
* @param addr connection address
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param encryptionKey for an encrypted SASL handshake
|
||||
* @param accessToken connection block access token
|
||||
* @param datanodeId ID of destination DataNode
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private IOStreamPair send(InetAddress addr, OutputStream underlyingOut,
|
||||
InputStream underlyingIn, DataEncryptionKey encryptionKey,
|
||||
Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
if (encryptionKey != null) {
|
||||
LOG.debug(
|
||||
"SASL client doing encrypted handshake for addr = {}, datanodeId = {}",
|
||||
addr, datanodeId);
|
||||
return getEncryptedStreams(underlyingOut, underlyingIn,
|
||||
encryptionKey);
|
||||
} else if (!UserGroupInformation.isSecurityEnabled()) {
|
||||
LOG.debug(
|
||||
"SASL client skipping handshake in unsecured configuration for "
|
||||
+ "addr = {}, datanodeId = {}", addr, datanodeId);
|
||||
return null;
|
||||
} else if (datanodeId.getXferPort() < 1024) {
|
||||
LOG.debug(
|
||||
"SASL client skipping handshake in secured configuration with "
|
||||
+ "privileged port for addr = {}, datanodeId = {}", addr, datanodeId);
|
||||
return null;
|
||||
} else if (accessToken.getIdentifier().length == 0) {
|
||||
if (!fallbackToSimpleAuthAllowed) {
|
||||
throw new IOException(
|
||||
"No block access token was provided (insecure cluster), but this " +
|
||||
"client is configured to allow only secure connections.");
|
||||
}
|
||||
LOG.debug(
|
||||
"SASL client skipping handshake in secured configuration with "
|
||||
+ "unsecured cluster for addr = {}, datanodeId = {}", addr, datanodeId);
|
||||
return null;
|
||||
} else {
|
||||
LOG.debug(
|
||||
"SASL client doing general handshake for addr = {}, datanodeId = {}",
|
||||
addr, datanodeId);
|
||||
return getSaslStreams(addr, underlyingOut, underlyingIn, accessToken,
|
||||
datanodeId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends client SASL negotiation for specialized encrypted handshake.
|
||||
*
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param encryptionKey for an encrypted SASL handshake
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private IOStreamPair getEncryptedStreams(OutputStream underlyingOut,
|
||||
InputStream underlyingIn, DataEncryptionKey encryptionKey)
|
||||
throws IOException {
|
||||
Map<String, String> saslProps = createSaslPropertiesForEncryption(
|
||||
encryptionKey.encryptionAlgorithm);
|
||||
|
||||
LOG.debug("Client using encryption algorithm {}",
|
||||
encryptionKey.encryptionAlgorithm);
|
||||
|
||||
String userName = getUserNameFromEncryptionKey(encryptionKey);
|
||||
char[] password = encryptionKeyToPassword(encryptionKey.encryptionKey);
|
||||
CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
|
||||
password);
|
||||
return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
|
||||
callbackHandler);
|
||||
}
|
||||
|
||||
/**
|
||||
* The SASL username for an encrypted handshake consists of the keyId,
|
||||
* blockPoolId, and nonce with the first two encoded as Strings, and the third
|
||||
* encoded using Base64. The fields are each separated by a single space.
|
||||
*
|
||||
* @param encryptionKey the encryption key to encode as a SASL username.
|
||||
* @return encoded username containing keyId, blockPoolId, and nonce
|
||||
*/
|
||||
private static String getUserNameFromEncryptionKey(
|
||||
DataEncryptionKey encryptionKey) {
|
||||
return encryptionKey.keyId + NAME_DELIMITER +
|
||||
encryptionKey.blockPoolId + NAME_DELIMITER +
|
||||
new String(Base64.encodeBase64(encryptionKey.nonce, false), Charsets.UTF_8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets user name and password when asked by the client-side SASL object.
|
||||
*/
|
||||
private static final class SaslClientCallbackHandler
|
||||
implements CallbackHandler {
|
||||
|
||||
private final char[] password;
|
||||
private final String userName;
|
||||
|
||||
/**
|
||||
* Creates a new SaslClientCallbackHandler.
|
||||
*
|
||||
* @param userName SASL user name
|
||||
* @Param password SASL password
|
||||
*/
|
||||
public SaslClientCallbackHandler(String userName, char[] password) {
|
||||
this.password = password;
|
||||
this.userName = userName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(Callback[] callbacks) throws IOException,
|
||||
UnsupportedCallbackException {
|
||||
NameCallback nc = null;
|
||||
PasswordCallback pc = null;
|
||||
RealmCallback rc = null;
|
||||
for (Callback callback : callbacks) {
|
||||
if (callback instanceof RealmChoiceCallback) {
|
||||
continue;
|
||||
} else if (callback instanceof NameCallback) {
|
||||
nc = (NameCallback) callback;
|
||||
} else if (callback instanceof PasswordCallback) {
|
||||
pc = (PasswordCallback) callback;
|
||||
} else if (callback instanceof RealmCallback) {
|
||||
rc = (RealmCallback) callback;
|
||||
} else {
|
||||
throw new UnsupportedCallbackException(callback,
|
||||
"Unrecognized SASL client callback");
|
||||
}
|
||||
}
|
||||
if (nc != null) {
|
||||
nc.setName(userName);
|
||||
}
|
||||
if (pc != null) {
|
||||
pc.setPassword(password);
|
||||
}
|
||||
if (rc != null) {
|
||||
rc.setText(rc.getDefaultText());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends client SASL negotiation for general-purpose handshake.
|
||||
*
|
||||
* @param addr connection address
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param accessToken connection block access token
|
||||
* @param datanodeId ID of destination DataNode
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private IOStreamPair getSaslStreams(InetAddress addr,
|
||||
OutputStream underlyingOut, InputStream underlyingIn,
|
||||
Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
if (saslPropsResolver == null) {
|
||||
throw new IOException(String.format("Cannot create a secured " +
|
||||
"connection if DataNode listens on unprivileged port (%d) and no " +
|
||||
"protection is defined in configuration property %s.",
|
||||
datanodeId.getXferPort(), DFS_DATA_TRANSFER_PROTECTION_KEY));
|
||||
}
|
||||
Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr);
|
||||
|
||||
String userName = buildUserName(accessToken);
|
||||
char[] password = buildClientPassword(accessToken);
|
||||
CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
|
||||
password);
|
||||
return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
|
||||
callbackHandler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the client's user name for the general-purpose handshake, consisting
|
||||
* of the base64-encoded serialized block access token identifier. Note that
|
||||
* this includes only the token identifier, not the token itself, which would
|
||||
* include the password. The password is a shared secret, and we must not
|
||||
* write it on the network during the SASL authentication exchange.
|
||||
*
|
||||
* @param blockToken for block access
|
||||
* @return SASL user name
|
||||
*/
|
||||
private static String buildUserName(Token<BlockTokenIdentifier> blockToken) {
|
||||
return new String(Base64.encodeBase64(blockToken.getIdentifier(), false),
|
||||
Charsets.UTF_8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the password on the client side for the general-purpose
|
||||
* handshake. The password consists of the block access token's password.
|
||||
*
|
||||
* @param blockToken for block access
|
||||
* @return SASL password
|
||||
*/
|
||||
private char[] buildClientPassword(Token<BlockTokenIdentifier> blockToken) {
|
||||
return new String(Base64.encodeBase64(blockToken.getPassword(), false),
|
||||
Charsets.UTF_8).toCharArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* This method actually executes the client-side SASL handshake.
|
||||
*
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param userName SASL user name
|
||||
* @param saslProps properties of SASL negotiation
|
||||
* @param callbackHandler for responding to SASL callbacks
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private IOStreamPair doSaslHandshake(OutputStream underlyingOut,
|
||||
InputStream underlyingIn, String userName, Map<String, String> saslProps,
|
||||
CallbackHandler callbackHandler) throws IOException {
|
||||
|
||||
DataOutputStream out = new DataOutputStream(underlyingOut);
|
||||
DataInputStream in = new DataInputStream(underlyingIn);
|
||||
|
||||
SaslParticipant sasl= SaslParticipant.createClientSaslParticipant(userName,
|
||||
saslProps, callbackHandler);
|
||||
|
||||
out.writeInt(SASL_TRANSFER_MAGIC_NUMBER);
|
||||
out.flush();
|
||||
|
||||
try {
|
||||
// Start of handshake - "initial response" in SASL terminology.
|
||||
sendSaslMessage(out, new byte[0]);
|
||||
|
||||
// step 1
|
||||
performSaslStep1(out, in, sasl);
|
||||
|
||||
// step 2 (client-side only)
|
||||
byte[] remoteResponse = readSaslMessage(in);
|
||||
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
|
||||
assert localResponse == null;
|
||||
|
||||
// SASL handshake is complete
|
||||
checkSaslComplete(sasl, saslProps);
|
||||
|
||||
return sasl.createStreamPair(out, in);
|
||||
} catch (IOException ioe) {
|
||||
sendGenericSaslErrorMessage(out, ioe.getMessage());
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,381 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||
import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.security.auth.callback.Callback;
|
||||
import javax.security.auth.callback.CallbackHandler;
|
||||
import javax.security.auth.callback.NameCallback;
|
||||
import javax.security.auth.callback.PasswordCallback;
|
||||
import javax.security.auth.callback.UnsupportedCallbackException;
|
||||
import javax.security.sasl.AuthorizeCallback;
|
||||
import javax.security.sasl.RealmCallback;
|
||||
import javax.security.sasl.SaslException;
|
||||
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DNConf;
|
||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
/**
|
||||
* Negotiates SASL for DataTransferProtocol on behalf of a server. There are
|
||||
* two possible supported variants of SASL negotiation: either a general-purpose
|
||||
* negotiation supporting any quality of protection, or a specialized
|
||||
* negotiation that enforces privacy as the quality of protection using a
|
||||
* cryptographically strong encryption key.
|
||||
*
|
||||
* This class is used in the DataNode for handling inbound connections.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class SaslDataTransferServer {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
SaslDataTransferServer.class);
|
||||
|
||||
private final BlockPoolTokenSecretManager blockPoolTokenSecretManager;
|
||||
private final DNConf dnConf;
|
||||
|
||||
/**
|
||||
* Creates a new SaslDataTransferServer.
|
||||
*
|
||||
* @param dnConf configuration of DataNode
|
||||
* @param blockPoolTokenSecretManager used for checking block access tokens
|
||||
* and encryption keys
|
||||
*/
|
||||
public SaslDataTransferServer(DNConf dnConf,
|
||||
BlockPoolTokenSecretManager blockPoolTokenSecretManager) {
|
||||
this.blockPoolTokenSecretManager = blockPoolTokenSecretManager;
|
||||
this.dnConf = dnConf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives SASL negotiation from a peer on behalf of a server.
|
||||
*
|
||||
* @param peer connection peer
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param datanodeId ID of DataNode accepting connection
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
public IOStreamPair receive(Peer peer, OutputStream underlyingOut,
|
||||
InputStream underlyingIn, DatanodeID datanodeId) throws IOException {
|
||||
if (dnConf.getEncryptDataTransfer()) {
|
||||
LOG.debug(
|
||||
"SASL server doing encrypted handshake for peer = {}, datanodeId = {}",
|
||||
peer, datanodeId);
|
||||
return getEncryptedStreams(peer, underlyingOut, underlyingIn);
|
||||
} else if (!UserGroupInformation.isSecurityEnabled()) {
|
||||
LOG.debug(
|
||||
"SASL server skipping handshake in unsecured configuration for "
|
||||
+ "peer = {}, datanodeId = {}", peer, datanodeId);
|
||||
return new IOStreamPair(underlyingIn, underlyingOut);
|
||||
} else if (datanodeId.getXferPort() < 1024) {
|
||||
LOG.debug(
|
||||
"SASL server skipping handshake in unsecured configuration for "
|
||||
+ "peer = {}, datanodeId = {}", peer, datanodeId);
|
||||
return new IOStreamPair(underlyingIn, underlyingOut);
|
||||
} else {
|
||||
LOG.debug(
|
||||
"SASL server doing general handshake for peer = {}, datanodeId = {}",
|
||||
peer, datanodeId);
|
||||
return getSaslStreams(peer, underlyingOut, underlyingIn, datanodeId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives SASL negotiation for specialized encrypted handshake.
|
||||
*
|
||||
* @param peer connection peer
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private IOStreamPair getEncryptedStreams(Peer peer,
|
||||
OutputStream underlyingOut, InputStream underlyingIn) throws IOException {
|
||||
if (peer.hasSecureChannel() ||
|
||||
dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) {
|
||||
return new IOStreamPair(underlyingIn, underlyingOut);
|
||||
}
|
||||
|
||||
Map<String, String> saslProps = createSaslPropertiesForEncryption(
|
||||
dnConf.getEncryptionAlgorithm());
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Server using encryption algorithm " +
|
||||
dnConf.getEncryptionAlgorithm());
|
||||
}
|
||||
|
||||
CallbackHandler callbackHandler = new SaslServerCallbackHandler(
|
||||
new PasswordFunction() {
|
||||
@Override
|
||||
public char[] apply(String userName) throws IOException {
|
||||
return encryptionKeyToPassword(getEncryptionKeyFromUserName(userName));
|
||||
}
|
||||
});
|
||||
return doSaslHandshake(underlyingOut, underlyingIn, saslProps,
|
||||
callbackHandler);
|
||||
}
|
||||
|
||||
/**
|
||||
* The SASL handshake for encrypted vs. general-purpose uses different logic
|
||||
* for determining the password. This interface is used to parameterize that
|
||||
* logic. It's similar to a Guava Function, but we need to let it throw
|
||||
* exceptions.
|
||||
*/
|
||||
private interface PasswordFunction {
|
||||
|
||||
/**
|
||||
* Returns the SASL password for the given user name.
|
||||
*
|
||||
* @param userName SASL user name
|
||||
* @return SASL password
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
char[] apply(String userName) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets user name and password when asked by the server-side SASL object.
|
||||
*/
|
||||
private static final class SaslServerCallbackHandler
|
||||
implements CallbackHandler {
|
||||
|
||||
private final PasswordFunction passwordFunction;
|
||||
|
||||
/**
|
||||
* Creates a new SaslServerCallbackHandler.
|
||||
*
|
||||
* @param passwordFunction for determing the user's password
|
||||
*/
|
||||
public SaslServerCallbackHandler(PasswordFunction passwordFunction) {
|
||||
this.passwordFunction = passwordFunction;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(Callback[] callbacks) throws IOException,
|
||||
UnsupportedCallbackException {
|
||||
NameCallback nc = null;
|
||||
PasswordCallback pc = null;
|
||||
AuthorizeCallback ac = null;
|
||||
for (Callback callback : callbacks) {
|
||||
if (callback instanceof AuthorizeCallback) {
|
||||
ac = (AuthorizeCallback) callback;
|
||||
} else if (callback instanceof PasswordCallback) {
|
||||
pc = (PasswordCallback) callback;
|
||||
} else if (callback instanceof NameCallback) {
|
||||
nc = (NameCallback) callback;
|
||||
} else if (callback instanceof RealmCallback) {
|
||||
continue; // realm is ignored
|
||||
} else {
|
||||
throw new UnsupportedCallbackException(callback,
|
||||
"Unrecognized SASL DIGEST-MD5 Callback: " + callback);
|
||||
}
|
||||
}
|
||||
|
||||
if (pc != null) {
|
||||
pc.setPassword(passwordFunction.apply(nc.getDefaultName()));
|
||||
}
|
||||
|
||||
if (ac != null) {
|
||||
ac.setAuthorized(true);
|
||||
ac.setAuthorizedID(ac.getAuthorizationID());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a secret manager and a username encoded for the encrypted handshake,
|
||||
* determine the encryption key.
|
||||
*
|
||||
* @param userName containing the keyId, blockPoolId, and nonce.
|
||||
* @return secret encryption key.
|
||||
* @throws IOException
|
||||
*/
|
||||
private byte[] getEncryptionKeyFromUserName(String userName)
|
||||
throws IOException {
|
||||
String[] nameComponents = userName.split(NAME_DELIMITER);
|
||||
if (nameComponents.length != 3) {
|
||||
throw new IOException("Provided name '" + userName + "' has " +
|
||||
nameComponents.length + " components instead of the expected 3.");
|
||||
}
|
||||
int keyId = Integer.parseInt(nameComponents[0]);
|
||||
String blockPoolId = nameComponents[1];
|
||||
byte[] nonce = Base64.decodeBase64(nameComponents[2]);
|
||||
return blockPoolTokenSecretManager.retrieveDataEncryptionKey(keyId,
|
||||
blockPoolId, nonce);
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives SASL negotiation for general-purpose handshake.
|
||||
*
|
||||
* @param peer connection peer
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param datanodeId ID of DataNode accepting connection
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut,
|
||||
InputStream underlyingIn, final DatanodeID datanodeId) throws IOException {
|
||||
SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
|
||||
if (saslPropsResolver == null) {
|
||||
throw new IOException(String.format("Cannot create a secured " +
|
||||
"connection if DataNode listens on unprivileged port (%d) and no " +
|
||||
"protection is defined in configuration property %s.",
|
||||
datanodeId.getXferPort(), DFS_DATA_TRANSFER_PROTECTION_KEY));
|
||||
}
|
||||
Map<String, String> saslProps = saslPropsResolver.getServerProperties(
|
||||
getPeerAddress(peer));
|
||||
|
||||
CallbackHandler callbackHandler = new SaslServerCallbackHandler(
|
||||
new PasswordFunction() {
|
||||
@Override
|
||||
public char[] apply(String userName) throws IOException {
|
||||
return buildServerPassword(userName);
|
||||
}
|
||||
});
|
||||
return doSaslHandshake(underlyingOut, underlyingIn, saslProps,
|
||||
callbackHandler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the expected correct password on the server side for the
|
||||
* general-purpose handshake. The password consists of the block access
|
||||
* token's password (known to the DataNode via its secret manager). This
|
||||
* expects that the client has supplied a user name consisting of its
|
||||
* serialized block access token identifier.
|
||||
*
|
||||
* @param userName SASL user name containing serialized block access token
|
||||
* identifier
|
||||
* @return expected correct SASL password
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private char[] buildServerPassword(String userName) throws IOException {
|
||||
BlockTokenIdentifier identifier = deserializeIdentifier(userName);
|
||||
byte[] tokenPassword = blockPoolTokenSecretManager.retrievePassword(
|
||||
identifier);
|
||||
return (new String(Base64.encodeBase64(tokenPassword, false),
|
||||
Charsets.UTF_8)).toCharArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Deserializes a base64-encoded binary representation of a block access
|
||||
* token.
|
||||
*
|
||||
* @param str String to deserialize
|
||||
* @return BlockTokenIdentifier deserialized from str
|
||||
* @throws IOException if there is any I/O error
|
||||
*/
|
||||
private BlockTokenIdentifier deserializeIdentifier(String str)
|
||||
throws IOException {
|
||||
BlockTokenIdentifier identifier = new BlockTokenIdentifier();
|
||||
identifier.readFields(new DataInputStream(new ByteArrayInputStream(
|
||||
Base64.decodeBase64(str))));
|
||||
return identifier;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method actually executes the server-side SASL handshake.
|
||||
*
|
||||
* @param underlyingOut connection output stream
|
||||
* @param underlyingIn connection input stream
|
||||
* @param saslProps properties of SASL negotiation
|
||||
* @param callbackHandler for responding to SASL callbacks
|
||||
* @return new pair of streams, wrapped after SASL negotiation
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private IOStreamPair doSaslHandshake(OutputStream underlyingOut,
|
||||
InputStream underlyingIn, Map<String, String> saslProps,
|
||||
CallbackHandler callbackHandler) throws IOException {
|
||||
|
||||
DataInputStream in = new DataInputStream(underlyingIn);
|
||||
DataOutputStream out = new DataOutputStream(underlyingOut);
|
||||
|
||||
SaslParticipant sasl = SaslParticipant.createServerSaslParticipant(saslProps,
|
||||
callbackHandler);
|
||||
|
||||
int magicNumber = in.readInt();
|
||||
if (magicNumber != SASL_TRANSFER_MAGIC_NUMBER) {
|
||||
throw new InvalidMagicNumberException(magicNumber);
|
||||
}
|
||||
try {
|
||||
// step 1
|
||||
performSaslStep1(out, in, sasl);
|
||||
|
||||
// step 2 (server-side only)
|
||||
byte[] remoteResponse = readSaslMessage(in);
|
||||
byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse);
|
||||
sendSaslMessage(out, localResponse);
|
||||
|
||||
// SASL handshake is complete
|
||||
checkSaslComplete(sasl, saslProps);
|
||||
|
||||
return sasl.createStreamPair(out, in);
|
||||
} catch (IOException ioe) {
|
||||
if (ioe instanceof SaslException &&
|
||||
ioe.getCause() != null &&
|
||||
ioe.getCause() instanceof InvalidEncryptionKeyException) {
|
||||
// This could just be because the client is long-lived and hasn't gotten
|
||||
// a new encryption key from the NN in a while. Upon receiving this
|
||||
// error, the client will get a new encryption key from the NN and retry
|
||||
// connecting to this DN.
|
||||
sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage());
|
||||
} else {
|
||||
sendGenericSaslErrorMessage(out, ioe.getMessage());
|
||||
}
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a SASL negotiation message indicating an invalid key error.
|
||||
*
|
||||
* @param out stream to receive message
|
||||
* @param message to send
|
||||
* @throws IOException for any error
|
||||
*/
|
||||
private static void sendInvalidKeySaslErrorMessage(DataOutputStream out,
|
||||
String message) throws IOException {
|
||||
sendSaslMessage(out, DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY, null,
|
||||
message);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.util.Map;
|
||||
import javax.security.auth.callback.CallbackHandler;
|
||||
import javax.security.sasl.Sasl;
|
||||
import javax.security.sasl.SaslClient;
|
||||
import javax.security.sasl.SaslException;
|
||||
import javax.security.sasl.SaslServer;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.security.SaslInputStream;
|
||||
import org.apache.hadoop.security.SaslOutputStream;
|
||||
|
||||
/**
|
||||
* Strongly inspired by Thrift's TSaslTransport class.
|
||||
*
|
||||
* Used to abstract over the <code>SaslServer</code> and
|
||||
* <code>SaslClient</code> classes, which share a lot of their interface, but
|
||||
* unfortunately don't share a common superclass.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class SaslParticipant {
|
||||
|
||||
// This has to be set as part of the SASL spec, but it don't matter for
|
||||
// our purposes, but may not be empty. It's sent over the wire, so use
|
||||
// a short string.
|
||||
private static final String SERVER_NAME = "0";
|
||||
private static final String PROTOCOL = "hdfs";
|
||||
private static final String MECHANISM = "DIGEST-MD5";
|
||||
|
||||
// One of these will always be null.
|
||||
private final SaslServer saslServer;
|
||||
private final SaslClient saslClient;
|
||||
|
||||
/**
|
||||
* Creates a SaslParticipant wrapping a SaslServer.
|
||||
*
|
||||
* @param saslProps properties of SASL negotiation
|
||||
* @param callbackHandler for handling all SASL callbacks
|
||||
* @return SaslParticipant wrapping SaslServer
|
||||
* @throws SaslException for any error
|
||||
*/
|
||||
public static SaslParticipant createServerSaslParticipant(
|
||||
Map<String, String> saslProps, CallbackHandler callbackHandler)
|
||||
throws SaslException {
|
||||
return new SaslParticipant(Sasl.createSaslServer(MECHANISM,
|
||||
PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a SaslParticipant wrapping a SaslClient.
|
||||
*
|
||||
* @param userName SASL user name
|
||||
* @param saslProps properties of SASL negotiation
|
||||
* @param callbackHandler for handling all SASL callbacks
|
||||
* @return SaslParticipant wrapping SaslClient
|
||||
* @throws SaslException for any error
|
||||
*/
|
||||
public static SaslParticipant createClientSaslParticipant(String userName,
|
||||
Map<String, String> saslProps, CallbackHandler callbackHandler)
|
||||
throws SaslException {
|
||||
return new SaslParticipant(Sasl.createSaslClient(new String[] { MECHANISM },
|
||||
userName, PROTOCOL, SERVER_NAME, saslProps, callbackHandler));
|
||||
}
|
||||
|
||||
/**
|
||||
* Private constructor wrapping a SaslServer.
|
||||
*
|
||||
* @param saslServer to wrap
|
||||
*/
|
||||
private SaslParticipant(SaslServer saslServer) {
|
||||
this.saslServer = saslServer;
|
||||
this.saslClient = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Private constructor wrapping a SaslClient.
|
||||
*
|
||||
* @param saslClient to wrap
|
||||
*/
|
||||
private SaslParticipant(SaslClient saslClient) {
|
||||
this.saslServer = null;
|
||||
this.saslClient = saslClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see {@link SaslServer#evaluateResponse}
|
||||
* @see {@link SaslClient#evaluateChallenge}
|
||||
*/
|
||||
public byte[] evaluateChallengeOrResponse(byte[] challengeOrResponse)
|
||||
throws SaslException {
|
||||
if (saslClient != null) {
|
||||
return saslClient.evaluateChallenge(challengeOrResponse);
|
||||
} else {
|
||||
return saslServer.evaluateResponse(challengeOrResponse);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* After successful SASL negotation, returns the negotiated quality of
|
||||
* protection.
|
||||
*
|
||||
* @return negotiated quality of protection
|
||||
*/
|
||||
public String getNegotiatedQop() {
|
||||
if (saslClient != null) {
|
||||
return (String) saslClient.getNegotiatedProperty(Sasl.QOP);
|
||||
} else {
|
||||
return (String) saslServer.getNegotiatedProperty(Sasl.QOP);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if SASL negotiation is complete.
|
||||
*
|
||||
* @return true if SASL negotiation is complete
|
||||
*/
|
||||
public boolean isComplete() {
|
||||
if (saslClient != null) {
|
||||
return saslClient.isComplete();
|
||||
} else {
|
||||
return saslServer.isComplete();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return some input/output streams that may henceforth have their
|
||||
* communication encrypted, depending on the negotiated quality of protection.
|
||||
*
|
||||
* @param out output stream to wrap
|
||||
* @param in input stream to wrap
|
||||
* @return IOStreamPair wrapping the streams
|
||||
*/
|
||||
public IOStreamPair createStreamPair(DataOutputStream out,
|
||||
DataInputStream in) {
|
||||
if (saslClient != null) {
|
||||
return new IOStreamPair(
|
||||
new SaslInputStream(in, saslClient),
|
||||
new SaslOutputStream(out, saslClient));
|
||||
} else {
|
||||
return new IOStreamPair(
|
||||
new SaslInputStream(in, saslServer),
|
||||
new SaslOutputStream(out, saslServer));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,6 +18,8 @@
|
|||
package org.apache.hadoop.hdfs.server.balancer;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
|
||||
import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
|
@ -62,9 +64,11 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
|
@ -202,6 +206,7 @@ public class Balancer {
|
|||
|
||||
private final NameNodeConnector nnc;
|
||||
private final BalancingPolicy policy;
|
||||
private final SaslDataTransferClient saslClient;
|
||||
private final double threshold;
|
||||
|
||||
// all data node lists
|
||||
|
@ -352,19 +357,18 @@ public class Balancer {
|
|||
|
||||
OutputStream unbufOut = sock.getOutputStream();
|
||||
InputStream unbufIn = sock.getInputStream();
|
||||
if (nnc.getDataEncryptionKey() != null) {
|
||||
IOStreamPair encryptedStreams =
|
||||
DataTransferEncryptor.getEncryptedStreams(
|
||||
unbufOut, unbufIn, nnc.getDataEncryptionKey());
|
||||
unbufOut = encryptedStreams.out;
|
||||
unbufIn = encryptedStreams.in;
|
||||
}
|
||||
ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock());
|
||||
Token<BlockTokenIdentifier> accessToken = nnc.getAccessToken(eb);
|
||||
IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
|
||||
unbufIn, nnc, accessToken, target.datanode);
|
||||
unbufOut = saslStreams.out;
|
||||
unbufIn = saslStreams.in;
|
||||
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
||||
HdfsConstants.IO_FILE_BUFFER_SIZE));
|
||||
in = new DataInputStream(new BufferedInputStream(unbufIn,
|
||||
HdfsConstants.IO_FILE_BUFFER_SIZE));
|
||||
|
||||
sendRequest(out);
|
||||
sendRequest(out, eb, accessToken);
|
||||
receiveResponse(in);
|
||||
bytesMoved.addAndGet(block.getNumBytes());
|
||||
LOG.info("Successfully moved " + this);
|
||||
|
@ -395,9 +399,8 @@ public class Balancer {
|
|||
}
|
||||
|
||||
/* Send a block replace request to the output stream*/
|
||||
private void sendRequest(DataOutputStream out) throws IOException {
|
||||
final ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock());
|
||||
final Token<BlockTokenIdentifier> accessToken = nnc.getAccessToken(eb);
|
||||
private void sendRequest(DataOutputStream out, ExtendedBlock eb,
|
||||
Token<BlockTokenIdentifier> accessToken) throws IOException {
|
||||
new Sender(out).replaceBlock(eb, accessToken,
|
||||
source.getStorageID(), proxySource.getDatanode());
|
||||
}
|
||||
|
@ -876,6 +879,12 @@ public class Balancer {
|
|||
this.maxConcurrentMovesPerNode =
|
||||
conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
|
||||
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
|
||||
this.saslClient = new SaslDataTransferClient(
|
||||
DataTransferSaslUtil.getSaslPropertiesResolver(conf),
|
||||
TrustedChannelResolver.getInstance(conf),
|
||||
conf.getBoolean(
|
||||
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
|
||||
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT));
|
||||
}
|
||||
|
||||
/* Given a data node set, build a network topology and decide
|
||||
|
|
|
@ -34,8 +34,8 @@ import org.apache.hadoop.hdfs.NameNodeProxies;
|
|||
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||
|
@ -50,7 +50,7 @@ import org.apache.hadoop.util.Daemon;
|
|||
* The class provides utilities for {@link Balancer} to access a NameNode
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class NameNodeConnector {
|
||||
class NameNodeConnector implements DataEncryptionKeyFactory {
|
||||
private static final Log LOG = Balancer.LOG;
|
||||
private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id");
|
||||
private static final int MAX_NOT_CHANGED_ITERATIONS = 5;
|
||||
|
@ -72,7 +72,6 @@ class NameNodeConnector {
|
|||
private BlockTokenSecretManager blockTokenSecretManager;
|
||||
private Daemon keyupdaterthread; // AccessKeyUpdater thread
|
||||
private DataEncryptionKey encryptionKey;
|
||||
private final TrustedChannelResolver trustedChannelResolver;
|
||||
|
||||
NameNodeConnector(URI nameNodeUri,
|
||||
Configuration conf) throws IOException {
|
||||
|
@ -122,7 +121,6 @@ class NameNodeConnector {
|
|||
if (out == null) {
|
||||
throw new IOException("Another balancer is running");
|
||||
}
|
||||
this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf);
|
||||
}
|
||||
|
||||
boolean shouldContinue(long dispatchBlockMoveBytes) {
|
||||
|
@ -154,10 +152,10 @@ class NameNodeConnector {
|
|||
BlockTokenSecretManager.AccessMode.COPY));
|
||||
}
|
||||
}
|
||||
|
||||
DataEncryptionKey getDataEncryptionKey()
|
||||
throws IOException {
|
||||
if (encryptDataTransfer && !this.trustedChannelResolver.isTrusted()) {
|
||||
|
||||
@Override
|
||||
public DataEncryptionKey newDataEncryptionKey() {
|
||||
if (encryptDataTransfer) {
|
||||
synchronized (this) {
|
||||
if (encryptionKey == null) {
|
||||
encryptionKey = blockTokenSecretManager.generateDataEncryptionKey();
|
||||
|
|
|
@ -310,18 +310,11 @@ class BlockPoolSliceScanner {
|
|||
}
|
||||
}
|
||||
|
||||
private synchronized void updateScanStatus(Block block,
|
||||
private synchronized void updateScanStatus(BlockScanInfo info,
|
||||
ScanType type,
|
||||
boolean scanOk) {
|
||||
BlockScanInfo info = blockMap.get(block);
|
||||
|
||||
if ( info != null ) {
|
||||
delBlockInfo(info);
|
||||
} else {
|
||||
// It might already be removed. Thats ok, it will be caught next time.
|
||||
info = new BlockScanInfo(block);
|
||||
}
|
||||
|
||||
delBlockInfo(info);
|
||||
|
||||
long now = Time.monotonicNow();
|
||||
info.lastScanType = type;
|
||||
info.lastScanTime = now;
|
||||
|
@ -334,8 +327,8 @@ class BlockPoolSliceScanner {
|
|||
}
|
||||
|
||||
if (verificationLog != null) {
|
||||
verificationLog.append(now, block.getGenerationStamp(),
|
||||
block.getBlockId());
|
||||
verificationLog.append(now, info.getGenerationStamp(),
|
||||
info.getBlockId());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -434,11 +427,13 @@ class BlockPoolSliceScanner {
|
|||
totalTransientErrors++;
|
||||
}
|
||||
|
||||
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, true);
|
||||
updateScanStatus((BlockScanInfo)block.getLocalBlock(),
|
||||
ScanType.VERIFICATION_SCAN, true);
|
||||
|
||||
return;
|
||||
} catch (IOException e) {
|
||||
updateScanStatus(block.getLocalBlock(), ScanType.VERIFICATION_SCAN, false);
|
||||
updateScanStatus((BlockScanInfo)block.getLocalBlock(),
|
||||
ScanType.VERIFICATION_SCAN, false);
|
||||
|
||||
// If the block does not exists anymore, then its not an error
|
||||
if (!dataset.contains(block)) {
|
||||
|
@ -497,7 +492,7 @@ class BlockPoolSliceScanner {
|
|||
|
||||
// Picks one block and verifies it
|
||||
private void verifyFirstBlock() {
|
||||
Block block = null;
|
||||
BlockScanInfo block = null;
|
||||
synchronized (this) {
|
||||
if (!blockInfoSet.isEmpty()) {
|
||||
block = blockInfoSet.first();
|
||||
|
|
|
@ -52,7 +52,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_RESTART_REPLICA_
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
||||
|
||||
/**
|
||||
* Simple class encapsulating all of the configuration that the DataNode
|
||||
|
@ -86,6 +88,7 @@ public class DNConf {
|
|||
|
||||
final String minimumNameNodeVersion;
|
||||
final String encryptionAlgorithm;
|
||||
final SaslPropertiesResolver saslPropsResolver;
|
||||
final TrustedChannelResolver trustedChannelResolver;
|
||||
|
||||
final long xceiverStopTimeout;
|
||||
|
@ -168,6 +171,8 @@ public class DNConf {
|
|||
DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
|
||||
this.encryptionAlgorithm = conf.get(DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
|
||||
this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf);
|
||||
this.saslPropsResolver = DataTransferSaslUtil.getSaslPropertiesResolver(
|
||||
conf);
|
||||
|
||||
this.xceiverStopTimeout = conf.getLong(
|
||||
DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY,
|
||||
|
@ -186,7 +191,26 @@ public class DNConf {
|
|||
String getMinimumNameNodeVersion() {
|
||||
return this.minimumNameNodeVersion;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns true if encryption enabled for DataTransferProtocol.
|
||||
*
|
||||
* @return boolean true if encryption enabled for DataTransferProtocol
|
||||
*/
|
||||
public boolean getEncryptDataTransfer() {
|
||||
return encryptDataTransfer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns encryption algorithm configured for DataTransferProtocol, or null
|
||||
* if not configured.
|
||||
*
|
||||
* @return encryption algorithm configured for DataTransferProtocol
|
||||
*/
|
||||
public String getEncryptionAlgorithm() {
|
||||
return encryptionAlgorithm;
|
||||
}
|
||||
|
||||
public long getXceiverStopTimeout() {
|
||||
return xceiverStopTimeout;
|
||||
}
|
||||
|
@ -194,4 +218,24 @@ public class DNConf {
|
|||
public long getMaxLockedMemory() {
|
||||
return maxLockedMemory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the SaslPropertiesResolver configured for use with
|
||||
* DataTransferProtocol, or null if not configured.
|
||||
*
|
||||
* @return SaslPropertiesResolver configured for use with DataTransferProtocol
|
||||
*/
|
||||
public SaslPropertiesResolver getSaslPropsResolver() {
|
||||
return saslPropsResolver;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the TrustedChannelResolver configured for use with
|
||||
* DataTransferProtocol, or null if not configured.
|
||||
*
|
||||
* @return TrustedChannelResolver configured for use with DataTransferProtocol
|
||||
*/
|
||||
public TrustedChannelResolver getTrustedChannelResolver() {
|
||||
return trustedChannelResolver;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -40,6 +43,9 @@ import org.apache.hadoop.hdfs.net.DomainPeerServer;
|
|||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||
import org.apache.hadoop.hdfs.protocol.*;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.*;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||
|
@ -224,6 +230,8 @@ public class DataNode extends Configured
|
|||
private final List<String> usersWithLocalPathAccess;
|
||||
private final boolean connectToDnViaHostname;
|
||||
ReadaheadPool readaheadPool;
|
||||
SaslDataTransferClient saslClient;
|
||||
SaslDataTransferServer saslServer;
|
||||
private final boolean getHdfsBlockLocationsEnabled;
|
||||
private ObjectName dataNodeInfoBeanName;
|
||||
private Thread checkDiskErrorThread = null;
|
||||
|
@ -722,15 +730,10 @@ public class DataNode extends Configured
|
|||
*/
|
||||
void startDataNode(Configuration conf,
|
||||
List<StorageLocation> dataDirs,
|
||||
// DatanodeProtocol namenode,
|
||||
SecureResources resources
|
||||
) throws IOException {
|
||||
if(UserGroupInformation.isSecurityEnabled() && resources == null) {
|
||||
if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
|
||||
throw new RuntimeException("Cannot start secure cluster without "
|
||||
+ "privileged resources.");
|
||||
}
|
||||
}
|
||||
|
||||
checkSecureConfig(conf, resources);
|
||||
|
||||
// settings global for all BPs in the Data Node
|
||||
this.secureResources = resources;
|
||||
|
@ -745,15 +748,19 @@ public class DataNode extends Configured
|
|||
" size (%s) is greater than zero and native code is not available.",
|
||||
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
|
||||
}
|
||||
long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
|
||||
if (dnConf.maxLockedMemory > ulimit) {
|
||||
throw new RuntimeException(String.format(
|
||||
"Cannot start datanode because the configured max locked memory" +
|
||||
" size (%s) of %d bytes is more than the datanode's available" +
|
||||
" RLIMIT_MEMLOCK ulimit of %d bytes.",
|
||||
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||
dnConf.maxLockedMemory,
|
||||
ulimit));
|
||||
if (Path.WINDOWS) {
|
||||
NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
|
||||
} else {
|
||||
long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
|
||||
if (dnConf.maxLockedMemory > ulimit) {
|
||||
throw new RuntimeException(String.format(
|
||||
"Cannot start datanode because the configured max locked memory" +
|
||||
" size (%s) of %d bytes is more than the datanode's available" +
|
||||
" RLIMIT_MEMLOCK ulimit of %d bytes.",
|
||||
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||
dnConf.maxLockedMemory,
|
||||
ulimit));
|
||||
}
|
||||
}
|
||||
}
|
||||
LOG.info("Starting DataNode with maxLockedMemory = " +
|
||||
|
@ -786,6 +793,55 @@ public class DataNode extends Configured
|
|||
// Create the ReadaheadPool from the DataNode context so we can
|
||||
// exit without having to explicitly shutdown its thread pool.
|
||||
readaheadPool = ReadaheadPool.getInstance();
|
||||
saslClient = new SaslDataTransferClient(dnConf.saslPropsResolver,
|
||||
dnConf.trustedChannelResolver,
|
||||
conf.getBoolean(
|
||||
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
|
||||
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT));
|
||||
saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the DataNode has a secure configuration if security is enabled.
|
||||
* There are 2 possible configurations that are considered secure:
|
||||
* 1. The server has bound to privileged ports for RPC and HTTP via
|
||||
* SecureDataNodeStarter.
|
||||
* 2. The configuration enables SASL on DataTransferProtocol and HTTPS (no
|
||||
* plain HTTP) for the HTTP server. The SASL handshake guarantees
|
||||
* authentication of the RPC server before a client transmits a secret, such
|
||||
* as a block access token. Similarly, SSL guarantees authentication of the
|
||||
* HTTP server before a client transmits a secret, such as a delegation
|
||||
* token.
|
||||
* It is not possible to run with both privileged ports and SASL on
|
||||
* DataTransferProtocol. For backwards-compatibility, the connection logic
|
||||
* must check if the target port is a privileged port, and if so, skip the
|
||||
* SASL handshake.
|
||||
*
|
||||
* @param conf Configuration to check
|
||||
* @param resources SecuredResources obtained for DataNode
|
||||
* @throws RuntimeException if security enabled, but configuration is insecure
|
||||
*/
|
||||
private static void checkSecureConfig(Configuration conf,
|
||||
SecureResources resources) throws RuntimeException {
|
||||
if (!UserGroupInformation.isSecurityEnabled()) {
|
||||
return;
|
||||
}
|
||||
String dataTransferProtection = conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY);
|
||||
if (resources != null && dataTransferProtection == null) {
|
||||
return;
|
||||
}
|
||||
if (conf.getBoolean("ignore.secure.ports.for.testing", false)) {
|
||||
return;
|
||||
}
|
||||
if (dataTransferProtection != null &&
|
||||
DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY &&
|
||||
resources == null) {
|
||||
return;
|
||||
}
|
||||
throw new RuntimeException("Cannot start secure DataNode without " +
|
||||
"configuring either privileged resources or SASL RPC data transfer " +
|
||||
"protection and SSL for HTTP. Using privileged resources in " +
|
||||
"combination with SASL RPC data transfer protection is not supported.");
|
||||
}
|
||||
|
||||
public static String generateUuid() {
|
||||
|
@ -1619,28 +1675,6 @@ public class DataNode extends Configured
|
|||
NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
|
||||
sock.setSoTimeout(targets.length * dnConf.socketTimeout);
|
||||
|
||||
long writeTimeout = dnConf.socketWriteTimeout +
|
||||
HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
|
||||
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
|
||||
InputStream unbufIn = NetUtils.getInputStream(sock);
|
||||
if (dnConf.encryptDataTransfer &&
|
||||
!dnConf.trustedChannelResolver.isTrusted(sock.getInetAddress())) {
|
||||
IOStreamPair encryptedStreams =
|
||||
DataTransferEncryptor.getEncryptedStreams(
|
||||
unbufOut, unbufIn,
|
||||
blockPoolTokenSecretManager.generateDataEncryptionKey(
|
||||
b.getBlockPoolId()));
|
||||
unbufOut = encryptedStreams.out;
|
||||
unbufIn = encryptedStreams.in;
|
||||
}
|
||||
|
||||
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
in = new DataInputStream(unbufIn);
|
||||
blockSender = new BlockSender(b, 0, b.getNumBytes(),
|
||||
false, false, true, DataNode.this, null, cachingStrategy);
|
||||
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
|
||||
|
||||
//
|
||||
// Header info
|
||||
//
|
||||
|
@ -1650,6 +1684,24 @@ public class DataNode extends Configured
|
|||
EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE));
|
||||
}
|
||||
|
||||
long writeTimeout = dnConf.socketWriteTimeout +
|
||||
HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
|
||||
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
|
||||
InputStream unbufIn = NetUtils.getInputStream(sock);
|
||||
DataEncryptionKeyFactory keyFactory =
|
||||
getDataEncryptionKeyFactoryForBlock(b);
|
||||
IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
|
||||
unbufIn, keyFactory, accessToken, bpReg);
|
||||
unbufOut = saslStreams.out;
|
||||
unbufIn = saslStreams.in;
|
||||
|
||||
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
in = new DataInputStream(unbufIn);
|
||||
blockSender = new BlockSender(b, 0, b.getNumBytes(),
|
||||
false, false, true, DataNode.this, null, cachingStrategy);
|
||||
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
|
||||
|
||||
new Sender(out).writeBlock(b, accessToken, clientname, targets, srcNode,
|
||||
stage, 0, 0, 0, 0, blockSender.getChecksum(), cachingStrategy);
|
||||
|
||||
|
@ -1692,7 +1744,26 @@ public class DataNode extends Configured
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a new DataEncryptionKeyFactory that generates a key from the
|
||||
* BlockPoolTokenSecretManager, using the block pool ID of the given block.
|
||||
*
|
||||
* @param block for which the factory needs to create a key
|
||||
* @return DataEncryptionKeyFactory for block's block pool ID
|
||||
*/
|
||||
DataEncryptionKeyFactory getDataEncryptionKeyFactoryForBlock(
|
||||
final ExtendedBlock block) {
|
||||
return new DataEncryptionKeyFactory() {
|
||||
@Override
|
||||
public DataEncryptionKey newDataEncryptionKey() {
|
||||
return dnConf.encryptDataTransfer ?
|
||||
blockPoolTokenSecretManager.generateDataEncryptionKey(
|
||||
block.getBlockPoolId()) : null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* After a block becomes finalized, a datanode increases metric counter,
|
||||
* notifies namenode, and adds it to the block scanner
|
||||
|
@ -2299,11 +2370,11 @@ public class DataNode extends Configured
|
|||
|
||||
@Override // ClientDataNodeProtocol
|
||||
public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
|
||||
checkWriteAccess(block);
|
||||
checkReadAccess(block);
|
||||
return data.getReplicaVisibleLength(block);
|
||||
}
|
||||
|
||||
private void checkWriteAccess(final ExtendedBlock block) throws IOException {
|
||||
private void checkReadAccess(final ExtendedBlock block) throws IOException {
|
||||
if (isBlockTokenEnabled) {
|
||||
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
|
||||
.getTokenIdentifiers();
|
||||
|
|
|
@ -36,11 +36,9 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.security.MessageDigest;
|
||||
import java.util.Arrays;
|
||||
|
@ -52,13 +50,12 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor.InvalidMagicNumberException;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
|
||||
|
@ -85,7 +82,6 @@ import org.apache.hadoop.security.token.Token;
|
|||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.net.InetAddresses;
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
|
||||
|
@ -174,24 +170,11 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
dataXceiverServer.addPeer(peer, Thread.currentThread());
|
||||
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
|
||||
InputStream input = socketIn;
|
||||
if ((!peer.hasSecureChannel()) && dnConf.encryptDataTransfer &&
|
||||
!dnConf.trustedChannelResolver.isTrusted(getClientAddress(peer))){
|
||||
IOStreamPair encryptedStreams = null;
|
||||
try {
|
||||
encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
|
||||
socketIn, datanode.blockPoolTokenSecretManager,
|
||||
dnConf.encryptionAlgorithm);
|
||||
} catch (InvalidMagicNumberException imne) {
|
||||
LOG.info("Failed to read expected encryption handshake from client " +
|
||||
"at " + peer.getRemoteAddressString() + ". Perhaps the client " +
|
||||
"is running an older version of Hadoop which does not support " +
|
||||
"encryption");
|
||||
return;
|
||||
}
|
||||
input = encryptedStreams.in;
|
||||
socketOut = encryptedStreams.out;
|
||||
}
|
||||
input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
|
||||
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
|
||||
socketIn, datanode.getDatanodeId());
|
||||
input = new BufferedInputStream(saslStreams.in,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE);
|
||||
socketOut = saslStreams.out;
|
||||
|
||||
super.initialize(new DataInputStream(input));
|
||||
|
||||
|
@ -263,19 +246,6 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns InetAddress from peer
|
||||
* The getRemoteAddressString is the form /ip-address:port
|
||||
* The ip-address is extracted from peer and InetAddress is formed
|
||||
* @param peer
|
||||
* @return
|
||||
* @throws UnknownHostException
|
||||
*/
|
||||
private static InetAddress getClientAddress(Peer peer) {
|
||||
return InetAddresses.forString(
|
||||
peer.getRemoteAddressString().split(":")[0].substring(1));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void requestShortCircuitFds(final ExtendedBlock blk,
|
||||
|
@ -656,17 +626,12 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock,
|
||||
writeTimeout);
|
||||
InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
|
||||
if (dnConf.encryptDataTransfer &&
|
||||
!dnConf.trustedChannelResolver.isTrusted(mirrorSock.getInetAddress())) {
|
||||
IOStreamPair encryptedStreams =
|
||||
DataTransferEncryptor.getEncryptedStreams(
|
||||
unbufMirrorOut, unbufMirrorIn,
|
||||
datanode.blockPoolTokenSecretManager
|
||||
.generateDataEncryptionKey(block.getBlockPoolId()));
|
||||
|
||||
unbufMirrorOut = encryptedStreams.out;
|
||||
unbufMirrorIn = encryptedStreams.in;
|
||||
}
|
||||
DataEncryptionKeyFactory keyFactory =
|
||||
datanode.getDataEncryptionKeyFactoryForBlock(block);
|
||||
IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock,
|
||||
unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0]);
|
||||
unbufMirrorOut = saslStreams.out;
|
||||
unbufMirrorIn = saslStreams.in;
|
||||
mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
mirrorIn = new DataInputStream(unbufMirrorIn);
|
||||
|
@ -1026,17 +991,12 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock,
|
||||
dnConf.socketWriteTimeout);
|
||||
InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
|
||||
if (dnConf.encryptDataTransfer &&
|
||||
!dnConf.trustedChannelResolver.isTrusted(
|
||||
proxySock.getInetAddress())) {
|
||||
IOStreamPair encryptedStreams =
|
||||
DataTransferEncryptor.getEncryptedStreams(
|
||||
unbufProxyOut, unbufProxyIn,
|
||||
datanode.blockPoolTokenSecretManager
|
||||
.generateDataEncryptionKey(block.getBlockPoolId()));
|
||||
unbufProxyOut = encryptedStreams.out;
|
||||
unbufProxyIn = encryptedStreams.in;
|
||||
}
|
||||
DataEncryptionKeyFactory keyFactory =
|
||||
datanode.getDataEncryptionKeyFactoryForBlock(block);
|
||||
IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock,
|
||||
unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource);
|
||||
unbufProxyOut = saslStreams.out;
|
||||
unbufProxyIn = saslStreams.in;
|
||||
|
||||
proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
|
|
|
@ -81,7 +81,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
|
||||
import org.apache.hadoop.hdfs.util.ByteArray;
|
||||
|
@ -101,7 +101,7 @@ import com.google.common.collect.Lists;
|
|||
**/
|
||||
@InterfaceAudience.Private
|
||||
public class FSDirectory implements Closeable {
|
||||
private static INodeDirectorySnapshottable createRoot(FSNamesystem namesystem) {
|
||||
private static INodeDirectory createRoot(FSNamesystem namesystem) {
|
||||
final INodeDirectory r = new INodeDirectory(
|
||||
INodeId.ROOT_INODE_ID,
|
||||
INodeDirectory.ROOT_NAME,
|
||||
|
@ -110,9 +110,9 @@ public class FSDirectory implements Closeable {
|
|||
r.addDirectoryWithQuotaFeature(
|
||||
DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA,
|
||||
DirectoryWithQuotaFeature.DEFAULT_DISKSPACE_QUOTA);
|
||||
final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r);
|
||||
s.setSnapshotQuota(0);
|
||||
return s;
|
||||
r.addSnapshottableFeature();
|
||||
r.setSnapshotQuota(0);
|
||||
return r;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -606,8 +606,7 @@ public class FSDirectory implements Closeable {
|
|||
|
||||
ezManager.checkMoveValidity(srcIIP, dstIIP, src);
|
||||
final INode dstInode = dstIIP.getLastINode();
|
||||
List<INodeDirectorySnapshottable> snapshottableDirs =
|
||||
new ArrayList<INodeDirectorySnapshottable>();
|
||||
List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
|
||||
if (dstInode != null) { // Destination exists
|
||||
validateRenameOverwrite(src, dst, overwrite, srcInode, dstInode);
|
||||
checkSnapshot(dstInode, snapshottableDirs);
|
||||
|
@ -676,7 +675,7 @@ public class FSDirectory implements Closeable {
|
|||
dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes,
|
||||
true).get(Quota.NAMESPACE);
|
||||
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
|
||||
removedINodes);
|
||||
removedINodes, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1133,8 +1132,7 @@ public class FSDirectory implements Closeable {
|
|||
if (!deleteAllowed(inodesInPath, src) ) {
|
||||
filesRemoved = -1;
|
||||
} else {
|
||||
List<INodeDirectorySnapshottable> snapshottableDirs =
|
||||
new ArrayList<INodeDirectorySnapshottable>();
|
||||
List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
|
||||
checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
|
||||
filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
|
||||
removedINodes, mtime);
|
||||
|
@ -1204,8 +1202,7 @@ public class FSDirectory implements Closeable {
|
|||
normalizePath(src), false);
|
||||
long filesRemoved = -1;
|
||||
if (deleteAllowed(inodesInPath, src)) {
|
||||
List<INodeDirectorySnapshottable> snapshottableDirs =
|
||||
new ArrayList<INodeDirectorySnapshottable>();
|
||||
List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
|
||||
checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
|
||||
filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
|
||||
removedINodes, mtime);
|
||||
|
@ -1214,7 +1211,7 @@ public class FSDirectory implements Closeable {
|
|||
|
||||
if (filesRemoved >= 0) {
|
||||
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
|
||||
removedINodes);
|
||||
removedINodes, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1281,19 +1278,20 @@ public class FSDirectory implements Closeable {
|
|||
* but do not have snapshots yet
|
||||
*/
|
||||
private static void checkSnapshot(INode target,
|
||||
List<INodeDirectorySnapshottable> snapshottableDirs) throws SnapshotException {
|
||||
List<INodeDirectory> snapshottableDirs) throws SnapshotException {
|
||||
if (target.isDirectory()) {
|
||||
INodeDirectory targetDir = target.asDirectory();
|
||||
if (targetDir.isSnapshottable()) {
|
||||
INodeDirectorySnapshottable ssTargetDir =
|
||||
(INodeDirectorySnapshottable) targetDir;
|
||||
if (ssTargetDir.getNumSnapshots() > 0) {
|
||||
throw new SnapshotException("The directory " + ssTargetDir.getFullPathName()
|
||||
+ " cannot be deleted since " + ssTargetDir.getFullPathName()
|
||||
DirectorySnapshottableFeature sf = targetDir
|
||||
.getDirectorySnapshottableFeature();
|
||||
if (sf != null) {
|
||||
if (sf.getNumSnapshots() > 0) {
|
||||
String fullPath = targetDir.getFullPathName();
|
||||
throw new SnapshotException("The directory " + fullPath
|
||||
+ " cannot be deleted since " + fullPath
|
||||
+ " is snapshottable and already has snapshots");
|
||||
} else {
|
||||
if (snapshottableDirs != null) {
|
||||
snapshottableDirs.add(ssTargetDir);
|
||||
snapshottableDirs.add(targetDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1380,14 +1378,18 @@ public class FSDirectory implements Closeable {
|
|||
Preconditions.checkArgument(
|
||||
src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
|
||||
"%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
|
||||
|
||||
|
||||
final String dirPath = normalizePath(src.substring(0,
|
||||
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
|
||||
|
||||
final INode node = this.getINode(dirPath);
|
||||
final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
|
||||
.valueOf(node, dirPath);
|
||||
final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
|
||||
final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
|
||||
final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
|
||||
if (sf == null) {
|
||||
throw new SnapshotException(
|
||||
"Directory is not a snapshottable directory: " + dirPath);
|
||||
}
|
||||
final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
|
||||
int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
|
||||
skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
|
||||
int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
|
||||
|
@ -1451,9 +1453,8 @@ public class FSDirectory implements Closeable {
|
|||
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
|
||||
|
||||
final INode node = this.getINode(dirPath);
|
||||
if (node != null
|
||||
&& node.isDirectory()
|
||||
&& node.asDirectory() instanceof INodeDirectorySnapshottable) {
|
||||
if (node != null && node.isDirectory()
|
||||
&& node.asDirectory().isSnapshottable()) {
|
||||
return node;
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -58,7 +58,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
|||
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
|
@ -73,8 +72,8 @@ import org.apache.hadoop.io.MD5Hash;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Contains inner classes for reading or writing the on-disk format for
|
||||
|
@ -554,21 +553,17 @@ public class FSImageFormat {
|
|||
if (!toLoadSubtree) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Step 2. Load snapshots if parent is snapshottable
|
||||
int numSnapshots = in.readInt();
|
||||
if (numSnapshots >= 0) {
|
||||
final INodeDirectorySnapshottable snapshottableParent
|
||||
= INodeDirectorySnapshottable.valueOf(parent, parent.getLocalName());
|
||||
// load snapshots and snapshotQuota
|
||||
SnapshotFSImageFormat.loadSnapshotList(snapshottableParent,
|
||||
numSnapshots, in, this);
|
||||
if (snapshottableParent.getSnapshotQuota() > 0) {
|
||||
SnapshotFSImageFormat.loadSnapshotList(parent, numSnapshots, in, this);
|
||||
if (parent.getDirectorySnapshottableFeature().getSnapshotQuota() > 0) {
|
||||
// add the directory to the snapshottable directory list in
|
||||
// SnapshotManager. Note that we only add root when its snapshot quota
|
||||
// is positive.
|
||||
this.namesystem.getSnapshotManager().addSnapshottable(
|
||||
snapshottableParent);
|
||||
this.namesystem.getSnapshotManager().addSnapshottable(parent);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -820,7 +815,10 @@ public class FSImageFormat {
|
|||
if (withSnapshot) {
|
||||
dir.addSnapshotFeature(null);
|
||||
}
|
||||
return snapshottable ? new INodeDirectorySnapshottable(dir) : dir;
|
||||
if (snapshottable) {
|
||||
dir.addSnapshottableFeature();
|
||||
}
|
||||
return dir;
|
||||
} else if (numBlocks == -2) {
|
||||
//symlink
|
||||
|
||||
|
@ -1367,10 +1365,8 @@ public class FSImageFormat {
|
|||
|
||||
// 2. Write INodeDirectorySnapshottable#snapshotsByNames to record all
|
||||
// Snapshots
|
||||
if (current instanceof INodeDirectorySnapshottable) {
|
||||
INodeDirectorySnapshottable snapshottableNode =
|
||||
(INodeDirectorySnapshottable) current;
|
||||
SnapshotFSImageFormat.saveSnapshots(snapshottableNode, out);
|
||||
if (current.isDirectory() && current.asDirectory().isSnapshottable()) {
|
||||
SnapshotFSImageFormat.saveSnapshots(current.asDirectory(), out);
|
||||
} else {
|
||||
out.writeInt(-1); // # of snapshots
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
import org.apache.hadoop.hdfs.util.XMLUtils;
|
||||
|
@ -241,7 +240,7 @@ public class FSImageSerialization {
|
|||
|
||||
writeQuota(node.getQuotaCounts(), out);
|
||||
|
||||
if (node instanceof INodeDirectorySnapshottable) {
|
||||
if (node.isSnapshottable()) {
|
||||
out.writeBoolean(true);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
|
|
|
@ -229,7 +229,6 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
|
|||
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
|
||||
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
|
||||
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
|
||||
|
@ -3000,9 +2999,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
checkOperation(OperationCategory.READ);
|
||||
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
||||
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
|
||||
final INodeFile pendingFile = analyzeFileState(
|
||||
FileState fileState = analyzeFileState(
|
||||
src, fileId, clientName, previous, onRetryBlock);
|
||||
src = pendingFile.getFullPathName();
|
||||
final INodeFile pendingFile = fileState.inode;
|
||||
src = fileState.path;
|
||||
|
||||
if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
|
||||
// This is a retry. Just return the last block if having locations.
|
||||
|
@ -3038,8 +3038,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
// Run the full analysis again, since things could have changed
|
||||
// while chooseTarget() was executing.
|
||||
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
|
||||
final INodeFile pendingFile =
|
||||
FileState fileState =
|
||||
analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
|
||||
final INodeFile pendingFile = fileState.inode;
|
||||
src = fileState.path;
|
||||
|
||||
if (onRetryBlock[0] != null) {
|
||||
if (onRetryBlock[0].getLocations().length > 0) {
|
||||
|
@ -3075,7 +3077,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return makeLocatedBlock(newBlock, targets, offset);
|
||||
}
|
||||
|
||||
INodeFile analyzeFileState(String src,
|
||||
static class FileState {
|
||||
public final INodeFile inode;
|
||||
public final String path;
|
||||
|
||||
public FileState(INodeFile inode, String fullPath) {
|
||||
this.inode = inode;
|
||||
this.path = fullPath;
|
||||
}
|
||||
}
|
||||
|
||||
FileState analyzeFileState(String src,
|
||||
long fileId,
|
||||
String clientName,
|
||||
ExtendedBlock previous,
|
||||
|
@ -3163,7 +3175,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
|
||||
((BlockInfoUnderConstruction)lastBlockInFile).getExpectedStorageLocations(),
|
||||
offset);
|
||||
return pendingFile;
|
||||
return new FileState(pendingFile, src);
|
||||
} else {
|
||||
// Case 3
|
||||
throw new IOException("Cannot allocate block in " + src + ": " +
|
||||
|
@ -3176,7 +3188,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
if (!checkFileProgress(pendingFile, false)) {
|
||||
throw new NotReplicatedYetException("Not replicated yet: " + src);
|
||||
}
|
||||
return pendingFile;
|
||||
return new FileState(pendingFile, src);
|
||||
}
|
||||
|
||||
LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
|
||||
|
@ -3324,10 +3336,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
: "Holder " + holder + " does not have any open files."));
|
||||
}
|
||||
// No further modification is allowed on a deleted file.
|
||||
// A file is considered deleted, if it has no parent or is marked
|
||||
// A file is considered deleted, if it is not in the inodeMap or is marked
|
||||
// as deleted in the snapshot feature.
|
||||
if (file.getParent() == null || (file.isWithSnapshot() &&
|
||||
file.getFileWithSnapshotFeature().isCurrentFileDeleted())) {
|
||||
if (isFileDeleted(file)) {
|
||||
throw new FileNotFoundException(src);
|
||||
}
|
||||
String clientName = file.getFileUnderConstructionFeature().getClientName();
|
||||
|
@ -3762,7 +3773,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
getEditLog().logDelete(src, mtime, logRetryCache);
|
||||
incrDeletedFileCount(filesRemoved);
|
||||
// Blocks/INodes will be handled later
|
||||
removePathAndBlocks(src, null, null);
|
||||
removePathAndBlocks(src, null, removedINodes, true);
|
||||
ret = true;
|
||||
} finally {
|
||||
writeUnlock();
|
||||
|
@ -3771,13 +3782,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
removeBlocks(collectedBlocks); // Incremental deletion of blocks
|
||||
collectedBlocks.clear();
|
||||
|
||||
dir.writeLock();
|
||||
try {
|
||||
dir.removeFromInodeMap(removedINodes);
|
||||
} finally {
|
||||
dir.writeUnlock();
|
||||
}
|
||||
removedINodes.clear();
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
|
||||
+ src +" is removed");
|
||||
|
@ -3815,14 +3819,24 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* @param blocks Containing the list of blocks to be deleted from blocksMap
|
||||
* @param removedINodes Containing the list of inodes to be removed from
|
||||
* inodesMap
|
||||
* @param acquireINodeMapLock Whether to acquire the lock for inode removal
|
||||
*/
|
||||
void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks,
|
||||
List<INode> removedINodes) {
|
||||
List<INode> removedINodes, final boolean acquireINodeMapLock) {
|
||||
assert hasWriteLock();
|
||||
leaseManager.removeLeaseWithPrefixPath(src);
|
||||
// remove inodes from inodesMap
|
||||
if (removedINodes != null) {
|
||||
dir.removeFromInodeMap(removedINodes);
|
||||
if (acquireINodeMapLock) {
|
||||
dir.writeLock();
|
||||
}
|
||||
try {
|
||||
dir.removeFromInodeMap(removedINodes);
|
||||
} finally {
|
||||
if (acquireINodeMapLock) {
|
||||
dir.writeUnlock();
|
||||
}
|
||||
}
|
||||
removedINodes.clear();
|
||||
}
|
||||
if (blocks == null) {
|
||||
|
@ -6472,6 +6486,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return blockId;
|
||||
}
|
||||
|
||||
private boolean isFileDeleted(INodeFile file) {
|
||||
// Not in the inodeMap or in the snapshot but marked deleted.
|
||||
if (dir.getInode(file.getId()) == null ||
|
||||
file.getParent() == null || (file.isWithSnapshot() &&
|
||||
file.getFileWithSnapshotFeature().isCurrentFileDeleted())) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private INodeFile checkUCBlock(ExtendedBlock block,
|
||||
String clientName) throws IOException {
|
||||
assert hasWriteLock();
|
||||
|
@ -6488,7 +6512,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
|
||||
// check file inode
|
||||
final INodeFile file = ((INode)storedBlock.getBlockCollection()).asFile();
|
||||
if (file == null || !file.isUnderConstruction()) {
|
||||
if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) {
|
||||
throw new IOException("The file " + storedBlock +
|
||||
" belonged to does not exist or it is not under construction.");
|
||||
}
|
||||
|
@ -7816,7 +7840,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
* Remove a list of INodeDirectorySnapshottable from the SnapshotManager
|
||||
* @param toRemove the list of INodeDirectorySnapshottable to be removed
|
||||
*/
|
||||
void removeSnapshottableDirs(List<INodeDirectorySnapshottable> toRemove) {
|
||||
void removeSnapshottableDirs(List<INodeDirectory> toRemove) {
|
||||
if (snapshotManager != null) {
|
||||
snapshotManager.removeSnapshottable(toRemove);
|
||||
}
|
||||
|
|
|
@ -97,14 +97,12 @@ public interface INodeAttributes {
|
|||
|
||||
@Override
|
||||
public final String getUserName() {
|
||||
final int n = (int)PermissionStatusFormat.USER.retrieve(permission);
|
||||
return SerialNumberManager.INSTANCE.getUser(n);
|
||||
return PermissionStatusFormat.getUser(permission);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String getGroupName() {
|
||||
final int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
|
||||
return SerialNumberManager.INSTANCE.getGroup(n);
|
||||
return PermissionStatusFormat.getGroup(permission);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -114,7 +112,7 @@ public interface INodeAttributes {
|
|||
|
||||
@Override
|
||||
public final short getFsPermissionShort() {
|
||||
return (short)PermissionStatusFormat.MODE.retrieve(permission);
|
||||
return PermissionStatusFormat.getMode(permission);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,10 +29,11 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
|||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
@ -102,11 +103,6 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
return this;
|
||||
}
|
||||
|
||||
/** Is this a snapshottable directory? */
|
||||
public boolean isSnapshottable() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void setQuota(long nsQuota, long dsQuota) {
|
||||
DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
|
||||
if (quota != null) {
|
||||
|
@ -186,7 +182,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
public final boolean isWithSnapshot() {
|
||||
return getDirectoryWithSnapshotFeature() != null;
|
||||
}
|
||||
|
||||
|
||||
public DirectoryDiffList getDiffs() {
|
||||
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||
return sf != null ? sf.getDiffs() : null;
|
||||
|
@ -204,50 +200,71 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
return super.toDetailString() + (sf == null ? "" : ", " + sf.getDiffs());
|
||||
}
|
||||
|
||||
/** Replace itself with an {@link INodeDirectorySnapshottable}. */
|
||||
public INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(
|
||||
int latestSnapshotId, final INodeMap inodeMap)
|
||||
throws QuotaExceededException {
|
||||
Preconditions.checkState(!(this instanceof INodeDirectorySnapshottable),
|
||||
"this is already an INodeDirectorySnapshottable, this=%s", this);
|
||||
final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(this);
|
||||
replaceSelf(s, inodeMap).getDirectoryWithSnapshotFeature().getDiffs()
|
||||
.saveSelf2Snapshot(latestSnapshotId, s, this);
|
||||
return s;
|
||||
public DirectorySnapshottableFeature getDirectorySnapshottableFeature() {
|
||||
return getFeature(DirectorySnapshottableFeature.class);
|
||||
}
|
||||
|
||||
/** Replace itself with {@link INodeDirectory}. */
|
||||
public INodeDirectory replaceSelf4INodeDirectory(final INodeMap inodeMap) {
|
||||
Preconditions.checkState(getClass() != INodeDirectory.class,
|
||||
"the class is already INodeDirectory, this=%s", this);
|
||||
return replaceSelf(new INodeDirectory(this, true, this.getFeatures()),
|
||||
inodeMap);
|
||||
public boolean isSnapshottable() {
|
||||
return getDirectorySnapshottableFeature() != null;
|
||||
}
|
||||
|
||||
/** Replace itself with the given directory. */
|
||||
private final <N extends INodeDirectory> N replaceSelf(final N newDir,
|
||||
final INodeMap inodeMap) {
|
||||
final INodeReference ref = getParentReference();
|
||||
if (ref != null) {
|
||||
ref.setReferredINode(newDir);
|
||||
if (inodeMap != null) {
|
||||
inodeMap.put(newDir);
|
||||
}
|
||||
} else {
|
||||
final INodeDirectory parent = getParent();
|
||||
Preconditions.checkArgument(parent != null, "parent is null, this=%s", this);
|
||||
parent.replaceChild(this, newDir, inodeMap);
|
||||
public Snapshot getSnapshot(byte[] snapshotName) {
|
||||
return getDirectorySnapshottableFeature().getSnapshot(snapshotName);
|
||||
}
|
||||
|
||||
public void setSnapshotQuota(int snapshotQuota) {
|
||||
getDirectorySnapshottableFeature().setSnapshotQuota(snapshotQuota);
|
||||
}
|
||||
|
||||
public Snapshot addSnapshot(int id, String name) throws SnapshotException,
|
||||
QuotaExceededException {
|
||||
return getDirectorySnapshottableFeature().addSnapshot(this, id, name);
|
||||
}
|
||||
|
||||
public Snapshot removeSnapshot(String snapshotName,
|
||||
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
|
||||
throws SnapshotException {
|
||||
return getDirectorySnapshottableFeature().removeSnapshot(this,
|
||||
snapshotName, collectedBlocks, removedINodes);
|
||||
}
|
||||
|
||||
public void renameSnapshot(String path, String oldName, String newName)
|
||||
throws SnapshotException {
|
||||
getDirectorySnapshottableFeature().renameSnapshot(path, oldName, newName);
|
||||
}
|
||||
|
||||
/** add DirectorySnapshottableFeature */
|
||||
public void addSnapshottableFeature() {
|
||||
Preconditions.checkState(!isSnapshottable(),
|
||||
"this is already snapshottable, this=%s", this);
|
||||
DirectoryWithSnapshotFeature s = this.getDirectoryWithSnapshotFeature();
|
||||
final DirectorySnapshottableFeature snapshottable =
|
||||
new DirectorySnapshottableFeature(s);
|
||||
if (s != null) {
|
||||
this.removeFeature(s);
|
||||
}
|
||||
clear();
|
||||
return newDir;
|
||||
this.addFeature(snapshottable);
|
||||
}
|
||||
|
||||
|
||||
/** remove DirectorySnapshottableFeature */
|
||||
public void removeSnapshottableFeature() {
|
||||
DirectorySnapshottableFeature s = getDirectorySnapshottableFeature();
|
||||
Preconditions.checkState(s != null,
|
||||
"The dir does not have snapshottable feature: this=%s", this);
|
||||
this.removeFeature(s);
|
||||
if (s.getDiffs().asList().size() > 0) {
|
||||
// add a DirectoryWithSnapshotFeature back
|
||||
DirectoryWithSnapshotFeature sf = new DirectoryWithSnapshotFeature(
|
||||
s.getDiffs());
|
||||
addFeature(sf);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace the given child with a new child. Note that we no longer need to
|
||||
* replace an normal INodeDirectory or INodeFile into an
|
||||
* INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
|
||||
* for child replacement is for {@link INodeDirectorySnapshottable} and
|
||||
* reference nodes.
|
||||
* for child replacement is for reference nodes.
|
||||
*/
|
||||
public void replaceChild(INode oldChild, final INode newChild,
|
||||
final INodeMap inodeMap) {
|
||||
|
@ -822,6 +839,11 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
};
|
||||
}
|
||||
});
|
||||
|
||||
final DirectorySnapshottableFeature s = getDirectorySnapshottableFeature();
|
||||
if (s != null) {
|
||||
s.dumpTreeRecursively(this, out, prefix, snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -830,7 +852,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
* @param subs The subtrees.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
protected static void dumpTreeRecursively(PrintWriter out,
|
||||
public static void dumpTreeRecursively(PrintWriter out,
|
||||
StringBuilder prefix, Iterable<SnapshotAndINode> subs) {
|
||||
if (subs != null) {
|
||||
for(final Iterator<SnapshotAndINode> i = subs.iterator(); i.hasNext();) {
|
||||
|
@ -843,7 +865,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
/** A pair of Snapshot and INode objects. */
|
||||
protected static class SnapshotAndINode {
|
||||
public static class SnapshotAndINode {
|
||||
public final int snapshotId;
|
||||
public final INode inode;
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
|||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.util.LongBitFormat;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -71,37 +72,29 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
/** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
|
||||
static class HeaderFormat {
|
||||
/** Number of bits for Block size */
|
||||
static final int BLOCKBITS = 48;
|
||||
/** Header mask 64-bit representation */
|
||||
static final long HEADERMASK = 0xffffL << BLOCKBITS;
|
||||
static final long MAX_BLOCK_SIZE = ~HEADERMASK;
|
||||
|
||||
static enum HeaderFormat {
|
||||
PREFERRED_BLOCK_SIZE(null, 48, 1),
|
||||
REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 16, 1);
|
||||
|
||||
private final LongBitFormat BITS;
|
||||
|
||||
private HeaderFormat(LongBitFormat previous, int length, long min) {
|
||||
BITS = new LongBitFormat(name(), previous, length, min);
|
||||
}
|
||||
|
||||
static short getReplication(long header) {
|
||||
return (short) ((header & HEADERMASK) >> BLOCKBITS);
|
||||
return (short)REPLICATION.BITS.retrieve(header);
|
||||
}
|
||||
|
||||
static long combineReplication(long header, short replication) {
|
||||
if (replication <= 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"Unexpected value for the replication: " + replication);
|
||||
}
|
||||
return ((long)replication << BLOCKBITS) | (header & MAX_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static long getPreferredBlockSize(long header) {
|
||||
return header & MAX_BLOCK_SIZE;
|
||||
return PREFERRED_BLOCK_SIZE.BITS.retrieve(header);
|
||||
}
|
||||
|
||||
static long combinePreferredBlockSize(long header, long blockSize) {
|
||||
if (blockSize < 0) {
|
||||
throw new IllegalArgumentException("Block size < 0: " + blockSize);
|
||||
} else if (blockSize > MAX_BLOCK_SIZE) {
|
||||
throw new IllegalArgumentException("Block size = " + blockSize
|
||||
+ " > MAX_BLOCK_SIZE = " + MAX_BLOCK_SIZE);
|
||||
}
|
||||
return (header & HEADERMASK) | (blockSize & MAX_BLOCK_SIZE);
|
||||
static long toLong(long preferredBlockSize, short replication) {
|
||||
long h = 0;
|
||||
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
|
||||
h = REPLICATION.BITS.combine(replication, h);
|
||||
return h;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,8 +106,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
long atime, BlockInfo[] blklist, short replication,
|
||||
long preferredBlockSize) {
|
||||
super(id, name, permissions, mtime, atime);
|
||||
header = HeaderFormat.combineReplication(header, replication);
|
||||
header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
|
||||
header = HeaderFormat.toLong(preferredBlockSize, replication);
|
||||
this.blocks = blklist;
|
||||
}
|
||||
|
||||
|
@ -347,7 +339,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
|
||||
/** Set the replication factor of this file. */
|
||||
public final void setFileReplication(short replication) {
|
||||
header = HeaderFormat.combineReplication(header, replication);
|
||||
header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
|
||||
}
|
||||
|
||||
/** Set the replication factor of this file. */
|
||||
|
|
|
@ -48,9 +48,7 @@ public interface INodeFileAttributes extends INodeAttributes {
|
|||
short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
|
||||
super(name, permissions, aclFeature, modificationTime, accessTime,
|
||||
xAttrsFeature);
|
||||
|
||||
final long h = HeaderFormat.combineReplication(0L, replication);
|
||||
header = HeaderFormat.combinePreferredBlockSize(h, preferredBlockSize);
|
||||
header = HeaderFormat.toLong(preferredBlockSize, replication);
|
||||
}
|
||||
|
||||
public SnapshotCopy(INodeFile file) {
|
||||
|
|
|
@ -21,9 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.Feature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
|
||||
import org.apache.hadoop.hdfs.util.LongBitFormat;
|
||||
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -36,26 +35,28 @@ import com.google.common.base.Preconditions;
|
|||
public abstract class INodeWithAdditionalFields extends INode
|
||||
implements LinkedElement {
|
||||
static enum PermissionStatusFormat {
|
||||
MODE(0, 16),
|
||||
GROUP(MODE.OFFSET + MODE.LENGTH, 25),
|
||||
USER(GROUP.OFFSET + GROUP.LENGTH, 23);
|
||||
MODE(null, 16),
|
||||
GROUP(MODE.BITS, 25),
|
||||
USER(GROUP.BITS, 23);
|
||||
|
||||
final int OFFSET;
|
||||
final int LENGTH; //bit length
|
||||
final long MASK;
|
||||
final LongBitFormat BITS;
|
||||
|
||||
PermissionStatusFormat(int offset, int length) {
|
||||
OFFSET = offset;
|
||||
LENGTH = length;
|
||||
MASK = ((-1L) >>> (64 - LENGTH)) << OFFSET;
|
||||
private PermissionStatusFormat(LongBitFormat previous, int length) {
|
||||
BITS = new LongBitFormat(name(), previous, length, 0);
|
||||
}
|
||||
|
||||
long retrieve(long record) {
|
||||
return (record & MASK) >>> OFFSET;
|
||||
static String getUser(long permission) {
|
||||
final int n = (int)USER.BITS.retrieve(permission);
|
||||
return SerialNumberManager.INSTANCE.getUser(n);
|
||||
}
|
||||
|
||||
long combine(long bits, long record) {
|
||||
return (record & ~MASK) | (bits << OFFSET);
|
||||
static String getGroup(long permission) {
|
||||
final int n = (int)GROUP.BITS.retrieve(permission);
|
||||
return SerialNumberManager.INSTANCE.getGroup(n);
|
||||
}
|
||||
|
||||
static short getMode(long permission) {
|
||||
return (short)MODE.BITS.retrieve(permission);
|
||||
}
|
||||
|
||||
/** Encode the {@link PermissionStatus} to a long. */
|
||||
|
@ -63,12 +64,12 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
long permission = 0L;
|
||||
final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
|
||||
ps.getUserName());
|
||||
permission = USER.combine(user, permission);
|
||||
permission = USER.BITS.combine(user, permission);
|
||||
final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
|
||||
ps.getGroupName());
|
||||
permission = GROUP.combine(group, permission);
|
||||
permission = GROUP.BITS.combine(group, permission);
|
||||
final int mode = ps.getPermission().toShort();
|
||||
permission = MODE.combine(mode, permission);
|
||||
permission = MODE.BITS.combine(mode, permission);
|
||||
return permission;
|
||||
}
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
}
|
||||
|
||||
private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
|
||||
this.permission = f.combine(n, permission);
|
||||
this.permission = f.BITS.combine(n, permission);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -170,9 +171,7 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
|
||||
return getSnapshotINode(snapshotId).getUserName();
|
||||
}
|
||||
|
||||
int n = (int)PermissionStatusFormat.USER.retrieve(permission);
|
||||
return SerialNumberManager.INSTANCE.getUser(n);
|
||||
return PermissionStatusFormat.getUser(permission);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -186,9 +185,7 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
|
||||
return getSnapshotINode(snapshotId).getGroupName();
|
||||
}
|
||||
|
||||
int n = (int)PermissionStatusFormat.GROUP.retrieve(permission);
|
||||
return SerialNumberManager.INSTANCE.getGroup(n);
|
||||
return PermissionStatusFormat.getGroup(permission);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -208,7 +205,7 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
|
||||
@Override
|
||||
public final short getFsPermissionShort() {
|
||||
return (short)PermissionStatusFormat.MODE.retrieve(permission);
|
||||
return PermissionStatusFormat.getMode(permission);
|
||||
}
|
||||
@Override
|
||||
void setPermission(FsPermission permission) {
|
||||
|
@ -318,8 +315,9 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
}
|
||||
|
||||
protected <T extends Feature> T getFeature(Class<? extends Feature> clazz) {
|
||||
Preconditions.checkArgument(clazz != null);
|
||||
for (Feature f : features) {
|
||||
if (f.getClass() == clazz) {
|
||||
if (clazz.isAssignableFrom(f.getClass())) {
|
||||
@SuppressWarnings("unchecked")
|
||||
T ret = (T) f;
|
||||
return ret;
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -208,8 +207,7 @@ public class INodesInPath {
|
|||
final byte[] childName = components[count + 1];
|
||||
|
||||
// check if the next byte[] in components is for ".snapshot"
|
||||
if (isDotSnapshotDir(childName)
|
||||
&& isDir && dir instanceof INodeDirectorySnapshottable) {
|
||||
if (isDotSnapshotDir(childName) && isDir && dir.isSnapshottable()) {
|
||||
// skip the ".snapshot" in components
|
||||
count++;
|
||||
index++;
|
||||
|
@ -222,8 +220,7 @@ public class INodesInPath {
|
|||
break;
|
||||
}
|
||||
// Resolve snapshot root
|
||||
final Snapshot s = ((INodeDirectorySnapshottable)dir).getSnapshot(
|
||||
components[count + 1]);
|
||||
final Snapshot s = dir.getSnapshot(components[count + 1]);
|
||||
if (s == null) {
|
||||
//snapshot not found
|
||||
curNode = null;
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
@ -47,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
import org.apache.hadoop.hdfs.RemotePeerFactory;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -55,6 +59,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
|||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
||||
|
@ -65,6 +75,7 @@ import org.apache.hadoop.net.NetworkTopology;
|
|||
import org.apache.hadoop.net.NodeBase;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -92,7 +103,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
* factors of each file.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class NamenodeFsck {
|
||||
public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
|
||||
|
||||
// return string marking fsck status
|
||||
|
@ -149,6 +160,7 @@ public class NamenodeFsck {
|
|||
private List<String> snapshottableDirs = null;
|
||||
|
||||
private final BlockPlacementPolicy bpPolicy;
|
||||
private final SaslDataTransferClient saslClient;
|
||||
|
||||
/**
|
||||
* Filesystem checker.
|
||||
|
@ -175,6 +187,12 @@ public class NamenodeFsck {
|
|||
networktopology,
|
||||
namenode.getNamesystem().getBlockManager().getDatanodeManager()
|
||||
.getHost2DatanodeMap());
|
||||
this.saslClient = new SaslDataTransferClient(
|
||||
DataTransferSaslUtil.getSaslPropertiesResolver(conf),
|
||||
TrustedChannelResolver.getInstance(conf),
|
||||
conf.getBoolean(
|
||||
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
|
||||
IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT));
|
||||
|
||||
for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
|
||||
String key = it.next();
|
||||
|
@ -616,15 +634,16 @@ public class NamenodeFsck {
|
|||
setConfiguration(namenode.conf).
|
||||
setRemotePeerFactory(new RemotePeerFactory() {
|
||||
@Override
|
||||
public Peer newConnectedPeer(InetSocketAddress addr)
|
||||
public Peer newConnectedPeer(InetSocketAddress addr,
|
||||
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
Peer peer = null;
|
||||
Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||
try {
|
||||
s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
|
||||
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
|
||||
peer = TcpPeerServer.peerFromSocketAndKey(s, namenode.getRpcServer().
|
||||
getDataEncryptionKey());
|
||||
peer = TcpPeerServer.peerFromSocketAndKey(saslClient, s,
|
||||
NamenodeFsck.this, blockToken, datanodeId);
|
||||
} finally {
|
||||
if (peer == null) {
|
||||
IOUtils.closeQuietly(s);
|
||||
|
@ -663,7 +682,12 @@ public class NamenodeFsck {
|
|||
throw new Exception("Could not copy block data for " + lblock.getBlock());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public DataEncryptionKey newDataEncryptionKey() throws IOException {
|
||||
return namenode.getRpcServer().getDataEncryptionKey();
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX (ab) See comment above for copyBlock().
|
||||
*
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -33,74 +32,50 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
|||
import org.apache.hadoop.hdfs.server.namenode.Content;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* Directories where taking snapshots is allowed.
|
||||
*
|
||||
* Like other {@link INode} subclasses, this class is synchronized externally
|
||||
* by the namesystem and FSDirectory locks.
|
||||
* A directory with this feature is a snapshottable directory, where snapshots
|
||||
* can be taken. This feature extends {@link DirectoryWithSnapshotFeature}, and
|
||||
* maintains extra information about all the snapshots taken on this directory.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class INodeDirectorySnapshottable extends INodeDirectory {
|
||||
public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature {
|
||||
/** Limit the number of snapshot per snapshottable directory. */
|
||||
static final int SNAPSHOT_LIMIT = 1 << 16;
|
||||
|
||||
/** Cast INode to INodeDirectorySnapshottable. */
|
||||
static public INodeDirectorySnapshottable valueOf(
|
||||
INode inode, String src) throws IOException {
|
||||
final INodeDirectory dir = INodeDirectory.valueOf(inode, src);
|
||||
if (!dir.isSnapshottable()) {
|
||||
throw new SnapshotException(
|
||||
"Directory is not a snapshottable directory: " + src);
|
||||
}
|
||||
return (INodeDirectorySnapshottable)dir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Snapshots of this directory in ascending order of snapshot names.
|
||||
* Note that snapshots in ascending order of snapshot id are stored in
|
||||
* {@link INodeDirectoryWithSnapshot}.diffs (a private field).
|
||||
*/
|
||||
private final List<Snapshot> snapshotsByNames = new ArrayList<Snapshot>();
|
||||
|
||||
/**
|
||||
* @return {@link #snapshotsByNames}
|
||||
*/
|
||||
ReadOnlyList<Snapshot> getSnapshotsByNames() {
|
||||
return ReadOnlyList.Util.asReadOnlyList(this.snapshotsByNames);
|
||||
}
|
||||
|
||||
/** Number of snapshots allowed. */
|
||||
private int snapshotQuota = SNAPSHOT_LIMIT;
|
||||
|
||||
public INodeDirectorySnapshottable(INodeDirectory dir) {
|
||||
super(dir, true, dir.getFeatures());
|
||||
// add snapshot feature if the original directory does not have it
|
||||
if (!isWithSnapshot()) {
|
||||
addSnapshotFeature(null);
|
||||
}
|
||||
public DirectorySnapshottableFeature(DirectoryWithSnapshotFeature feature) {
|
||||
super(feature == null ? null : feature.getDiffs());
|
||||
}
|
||||
|
||||
|
||||
/** @return the number of existing snapshots. */
|
||||
public int getNumSnapshots() {
|
||||
return snapshotsByNames.size();
|
||||
}
|
||||
|
||||
|
||||
private int searchSnapshot(byte[] snapshotName) {
|
||||
return Collections.binarySearch(snapshotsByNames, snapshotName);
|
||||
}
|
||||
|
@ -110,7 +85,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
final int i = searchSnapshot(snapshotName);
|
||||
return i < 0? null: snapshotsByNames.get(i);
|
||||
}
|
||||
|
||||
|
||||
public Snapshot getSnapshotById(int sid) {
|
||||
for (Snapshot s : snapshotsByNames) {
|
||||
if (s.getId() == sid) {
|
||||
|
@ -119,12 +94,12 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
/** @return {@link #snapshotsByNames} as a {@link ReadOnlyList} */
|
||||
public ReadOnlyList<Snapshot> getSnapshotList() {
|
||||
return ReadOnlyList.Util.asReadOnlyList(snapshotsByNames);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Rename a snapshot
|
||||
* @param path
|
||||
|
@ -139,7 +114,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
* name does not exist or a snapshot with the new name already
|
||||
* exists
|
||||
*/
|
||||
void renameSnapshot(String path, String oldName, String newName)
|
||||
public void renameSnapshot(String path, String oldName, String newName)
|
||||
throws SnapshotException {
|
||||
if (newName.equals(oldName)) {
|
||||
return;
|
||||
|
@ -180,22 +155,17 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
this.snapshotQuota = snapshotQuota;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSnapshottable() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simply add a snapshot into the {@link #snapshotsByNames}. Used by FSImage
|
||||
* loading.
|
||||
* Simply add a snapshot into the {@link #snapshotsByNames}. Used when loading
|
||||
* fsimage.
|
||||
*/
|
||||
void addSnapshot(Snapshot snapshot) {
|
||||
this.snapshotsByNames.add(snapshot);
|
||||
}
|
||||
|
||||
/** Add a snapshot. */
|
||||
Snapshot addSnapshot(int id, String name) throws SnapshotException,
|
||||
QuotaExceededException {
|
||||
public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name)
|
||||
throws SnapshotException, QuotaExceededException {
|
||||
//check snapshot quota
|
||||
final int n = getNumSnapshots();
|
||||
if (n + 1 > snapshotQuota) {
|
||||
|
@ -203,7 +173,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
+ n + " snapshot(s) and the snapshot quota is "
|
||||
+ snapshotQuota);
|
||||
}
|
||||
final Snapshot s = new Snapshot(id, name, this);
|
||||
final Snapshot s = new Snapshot(id, name, snapshotRoot);
|
||||
final byte[] nameBytes = s.getRoot().getLocalNameBytes();
|
||||
final int i = searchSnapshot(nameBytes);
|
||||
if (i >= 0) {
|
||||
|
@ -211,60 +181,61 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
+ "snapshot with the same name \"" + Snapshot.getSnapshotName(s) + "\".");
|
||||
}
|
||||
|
||||
final DirectoryDiff d = getDiffs().addDiff(id, this);
|
||||
final DirectoryDiff d = getDiffs().addDiff(id, snapshotRoot);
|
||||
d.setSnapshotRoot(s.getRoot());
|
||||
snapshotsByNames.add(-i - 1, s);
|
||||
|
||||
//set modification time
|
||||
updateModificationTime(Time.now(), Snapshot.CURRENT_STATE_ID);
|
||||
s.getRoot().setModificationTime(getModificationTime(),
|
||||
Snapshot.CURRENT_STATE_ID);
|
||||
// set modification time
|
||||
final long now = Time.now();
|
||||
snapshotRoot.updateModificationTime(now, Snapshot.CURRENT_STATE_ID);
|
||||
s.getRoot().setModificationTime(now, Snapshot.CURRENT_STATE_ID);
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Remove the snapshot with the given name from {@link #snapshotsByNames},
|
||||
* and delete all the corresponding DirectoryDiff.
|
||||
*
|
||||
*
|
||||
* @param snapshotRoot The directory where we take snapshots
|
||||
* @param snapshotName The name of the snapshot to be removed
|
||||
* @param collectedBlocks Used to collect information to update blocksMap
|
||||
* @return The removed snapshot. Null if no snapshot with the given name
|
||||
* @return The removed snapshot. Null if no snapshot with the given name
|
||||
* exists.
|
||||
*/
|
||||
Snapshot removeSnapshot(String snapshotName,
|
||||
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
|
||||
throws SnapshotException {
|
||||
public Snapshot removeSnapshot(INodeDirectory snapshotRoot,
|
||||
String snapshotName, BlocksMapUpdateInfo collectedBlocks,
|
||||
final List<INode> removedINodes) throws SnapshotException {
|
||||
final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
|
||||
if (i < 0) {
|
||||
throw new SnapshotException("Cannot delete snapshot " + snapshotName
|
||||
+ " from path " + this.getFullPathName()
|
||||
+ " from path " + snapshotRoot.getFullPathName()
|
||||
+ ": the snapshot does not exist.");
|
||||
} else {
|
||||
final Snapshot snapshot = snapshotsByNames.get(i);
|
||||
int prior = Snapshot.findLatestSnapshot(this, snapshot.getId());
|
||||
int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
|
||||
try {
|
||||
Quota.Counts counts = cleanSubtree(snapshot.getId(), prior,
|
||||
collectedBlocks, removedINodes, true);
|
||||
INodeDirectory parent = getParent();
|
||||
Quota.Counts counts = snapshotRoot.cleanSubtree(snapshot.getId(),
|
||||
prior, collectedBlocks, removedINodes, true);
|
||||
INodeDirectory parent = snapshotRoot.getParent();
|
||||
if (parent != null) {
|
||||
// there will not be any WithName node corresponding to the deleted
|
||||
// there will not be any WithName node corresponding to the deleted
|
||||
// snapshot, thus only update the quota usage in the current tree
|
||||
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
|
||||
-counts.get(Quota.DISKSPACE), true);
|
||||
}
|
||||
} catch(QuotaExceededException e) {
|
||||
LOG.error("BUG: removeSnapshot increases namespace usage.", e);
|
||||
INode.LOG.error("BUG: removeSnapshot increases namespace usage.", e);
|
||||
}
|
||||
// remove from snapshotsByNames after successfully cleaning the subtree
|
||||
snapshotsByNames.remove(i);
|
||||
return snapshot;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
final INodeDirectory snapshotRoot,
|
||||
final ContentSummaryComputationContext summary) {
|
||||
super.computeContentSummary(summary);
|
||||
snapshotRoot.computeContentSummary(summary);
|
||||
summary.getCounts().add(Content.SNAPSHOT, snapshotsByNames.size());
|
||||
summary.getCounts().add(Content.SNAPSHOTTABLE_DIRECTORY, 1);
|
||||
return summary;
|
||||
|
@ -273,7 +244,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
/**
|
||||
* Compute the difference between two snapshots (or a snapshot and the current
|
||||
* directory) of the directory.
|
||||
*
|
||||
*
|
||||
* @param from The name of the start point of the comparison. Null indicating
|
||||
* the current tree.
|
||||
* @param to The name of the end point. Null indicating the current tree.
|
||||
|
@ -282,52 +253,55 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
* point, or if endSnapshotName is not null but cannot be identified
|
||||
* as a previous snapshot.
|
||||
*/
|
||||
SnapshotDiffInfo computeDiff(final String from, final String to)
|
||||
throws SnapshotException {
|
||||
Snapshot fromSnapshot = getSnapshotByName(from);
|
||||
Snapshot toSnapshot = getSnapshotByName(to);
|
||||
SnapshotDiffInfo computeDiff(final INodeDirectory snapshotRoot,
|
||||
final String from, final String to) throws SnapshotException {
|
||||
Snapshot fromSnapshot = getSnapshotByName(snapshotRoot, from);
|
||||
Snapshot toSnapshot = getSnapshotByName(snapshotRoot, to);
|
||||
// if the start point is equal to the end point, return null
|
||||
if (from.equals(to)) {
|
||||
return null;
|
||||
}
|
||||
SnapshotDiffInfo diffs = new SnapshotDiffInfo(this, fromSnapshot,
|
||||
SnapshotDiffInfo diffs = new SnapshotDiffInfo(snapshotRoot, fromSnapshot,
|
||||
toSnapshot);
|
||||
computeDiffRecursively(this, new ArrayList<byte[]>(), diffs);
|
||||
computeDiffRecursively(snapshotRoot, snapshotRoot, new ArrayList<byte[]>(),
|
||||
diffs);
|
||||
return diffs;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Find the snapshot matching the given name.
|
||||
*
|
||||
*
|
||||
* @param snapshotRoot The directory where snapshots were taken.
|
||||
* @param snapshotName The name of the snapshot.
|
||||
* @return The corresponding snapshot. Null if snapshotName is null or empty.
|
||||
* @throws SnapshotException If snapshotName is not null or empty, but there
|
||||
* is no snapshot matching the name.
|
||||
*/
|
||||
private Snapshot getSnapshotByName(String snapshotName)
|
||||
throws SnapshotException {
|
||||
private Snapshot getSnapshotByName(INodeDirectory snapshotRoot,
|
||||
String snapshotName) throws SnapshotException {
|
||||
Snapshot s = null;
|
||||
if (snapshotName != null && !snapshotName.isEmpty()) {
|
||||
final int index = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
|
||||
if (index < 0) {
|
||||
throw new SnapshotException("Cannot find the snapshot of directory "
|
||||
+ this.getFullPathName() + " with name " + snapshotName);
|
||||
+ snapshotRoot.getFullPathName() + " with name " + snapshotName);
|
||||
}
|
||||
s = snapshotsByNames.get(index);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Recursively compute the difference between snapshots under a given
|
||||
* directory/file.
|
||||
* @param node The directory/file under which the diff is computed.
|
||||
* @param parentPath Relative path (corresponding to the snapshot root) of
|
||||
* @param snapshotRoot The directory where snapshots were taken.
|
||||
* @param node The directory/file under which the diff is computed.
|
||||
* @param parentPath Relative path (corresponding to the snapshot root) of
|
||||
* the node's parent.
|
||||
* @param diffReport data structure used to store the diff.
|
||||
*/
|
||||
private void computeDiffRecursively(INode node, List<byte[]> parentPath,
|
||||
SnapshotDiffInfo diffReport) {
|
||||
private void computeDiffRecursively(final INodeDirectory snapshotRoot,
|
||||
INode node, List<byte[]> parentPath, SnapshotDiffInfo diffReport) {
|
||||
final Snapshot earlierSnapshot = diffReport.isFromEarlier() ?
|
||||
diffReport.getFrom() : diffReport.getTo();
|
||||
final Snapshot laterSnapshot = diffReport.isFromEarlier() ?
|
||||
|
@ -350,9 +324,10 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
final byte[] name = child.getLocalNameBytes();
|
||||
boolean toProcess = diff.searchIndex(ListType.DELETED, name) < 0;
|
||||
if (!toProcess && child instanceof INodeReference.WithName) {
|
||||
byte[][] renameTargetPath = findRenameTargetPath((WithName) child,
|
||||
laterSnapshot == null ? Snapshot.CURRENT_STATE_ID :
|
||||
laterSnapshot.getId());
|
||||
byte[][] renameTargetPath = findRenameTargetPath(
|
||||
snapshotRoot, (WithName) child,
|
||||
laterSnapshot == null ? Snapshot.CURRENT_STATE_ID :
|
||||
laterSnapshot.getId());
|
||||
if (renameTargetPath != null) {
|
||||
toProcess = true;
|
||||
diffReport.setRenameTarget(child.getId(), renameTargetPath);
|
||||
|
@ -360,7 +335,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
}
|
||||
if (toProcess) {
|
||||
parentPath.add(name);
|
||||
computeDiffRecursively(child, parentPath, diffReport);
|
||||
computeDiffRecursively(snapshotRoot, child, parentPath, diffReport);
|
||||
parentPath.remove(parentPath.size() - 1);
|
||||
}
|
||||
}
|
||||
|
@ -379,12 +354,12 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
* However, we should include it in our snapshot diff report as rename only
|
||||
* if the rename target is also under the same snapshottable directory.
|
||||
*/
|
||||
private byte[][] findRenameTargetPath(INodeReference.WithName wn,
|
||||
final int snapshotId) {
|
||||
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
|
||||
INodeReference.WithName wn, final int snapshotId) {
|
||||
INode inode = wn.getReferredINode();
|
||||
final LinkedList<byte[]> ancestors = Lists.newLinkedList();
|
||||
while (inode != null) {
|
||||
if (inode == this) {
|
||||
if (inode == snapshotRoot) {
|
||||
return ancestors.toArray(new byte[ancestors.size()][]);
|
||||
}
|
||||
if (inode instanceof INodeReference.WithCount) {
|
||||
|
@ -407,39 +382,20 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace itself with {@link INodeDirectoryWithSnapshot} or
|
||||
* {@link INodeDirectory} depending on the latest snapshot.
|
||||
*/
|
||||
INodeDirectory replaceSelf(final int latestSnapshotId, final INodeMap inodeMap)
|
||||
throws QuotaExceededException {
|
||||
if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
|
||||
Preconditions.checkState(getDirectoryWithSnapshotFeature()
|
||||
.getLastSnapshotId() == Snapshot.CURRENT_STATE_ID, "this=%s", this);
|
||||
}
|
||||
INodeDirectory dir = replaceSelf4INodeDirectory(inodeMap);
|
||||
if (latestSnapshotId != Snapshot.CURRENT_STATE_ID) {
|
||||
dir.recordModification(latestSnapshotId);
|
||||
}
|
||||
return dir;
|
||||
@Override
|
||||
public String toString() {
|
||||
return "snapshotsByNames=" + snapshotsByNames;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toDetailString() {
|
||||
return super.toDetailString() + ", snapshotsByNames=" + snapshotsByNames;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
|
||||
int snapshot) {
|
||||
super.dumpTreeRecursively(out, prefix, snapshot);
|
||||
|
||||
@VisibleForTesting
|
||||
public void dumpTreeRecursively(INodeDirectory snapshotRoot, PrintWriter out,
|
||||
StringBuilder prefix, int snapshot) {
|
||||
if (snapshot == Snapshot.CURRENT_STATE_ID) {
|
||||
out.println();
|
||||
out.print(prefix);
|
||||
|
||||
out.print("Snapshot of ");
|
||||
final String name = getLocalName();
|
||||
final String name = snapshotRoot.getLocalName();
|
||||
out.print(name.isEmpty()? "/": name);
|
||||
out.print(": quota=");
|
||||
out.print(getSnapshotQuota());
|
||||
|
@ -455,13 +411,14 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
out.print(", #snapshot=");
|
||||
out.println(n);
|
||||
|
||||
dumpTreeRecursively(out, prefix, new Iterable<SnapshotAndINode>() {
|
||||
INodeDirectory.dumpTreeRecursively(out, prefix,
|
||||
new Iterable<SnapshotAndINode>() {
|
||||
@Override
|
||||
public Iterator<SnapshotAndINode> iterator() {
|
||||
return new Iterator<SnapshotAndINode>() {
|
||||
final Iterator<DirectoryDiff> i = getDiffs().iterator();
|
||||
private DirectoryDiff next = findNext();
|
||||
|
||||
|
||||
private DirectoryDiff findNext() {
|
||||
for(; i.hasNext(); ) {
|
||||
final DirectoryDiff diff = i.next();
|
||||
|
@ -476,7 +433,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
public boolean hasNext() {
|
||||
return next != null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public SnapshotAndINode next() {
|
||||
final SnapshotAndINode pair = new SnapshotAndINode(next
|
||||
|
@ -485,7 +442,7 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
next = findNext();
|
||||
return pair;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException();
|
||||
|
@ -495,4 +452,4 @@ public class INodeDirectorySnapshottable extends INodeDirectory {
|
|||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -48,7 +48,9 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
|||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Feature for directory with snapshot-related information.
|
||||
* Feature used to store and process the snapshot diff information for a
|
||||
* directory. In particular, it contains a directory diff list recording changes
|
||||
* made to the directory and its children for each snapshot.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class DirectoryWithSnapshotFeature implements INode.Feature {
|
||||
|
|
|
@ -127,9 +127,8 @@ public class FSImageFormatPBSnapshot {
|
|||
}
|
||||
|
||||
/**
|
||||
* Load the snapshots section from fsimage. Also convert snapshottable
|
||||
* directories into {@link INodeDirectorySnapshottable}.
|
||||
*
|
||||
* Load the snapshots section from fsimage. Also add snapshottable feature
|
||||
* to snapshottable directories.
|
||||
*/
|
||||
public void loadSnapshotSection(InputStream in) throws IOException {
|
||||
SnapshotManager sm = fsn.getSnapshotManager();
|
||||
|
@ -139,16 +138,13 @@ public class FSImageFormatPBSnapshot {
|
|||
sm.setSnapshotCounter(section.getSnapshotCounter());
|
||||
for (long sdirId : section.getSnapshottableDirList()) {
|
||||
INodeDirectory dir = fsDir.getInode(sdirId).asDirectory();
|
||||
final INodeDirectorySnapshottable sdir;
|
||||
if (!dir.isSnapshottable()) {
|
||||
sdir = new INodeDirectorySnapshottable(dir);
|
||||
fsDir.addToInodeMap(sdir);
|
||||
dir.addSnapshottableFeature();
|
||||
} else {
|
||||
// dir is root, and admin set root to snapshottable before
|
||||
sdir = (INodeDirectorySnapshottable) dir;
|
||||
sdir.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
|
||||
dir.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
|
||||
}
|
||||
sm.addSnapshottable(sdir);
|
||||
sm.addSnapshottable(dir);
|
||||
}
|
||||
loadSnapshots(in, snum);
|
||||
}
|
||||
|
@ -160,12 +156,11 @@ public class FSImageFormatPBSnapshot {
|
|||
INodeDirectory root = loadINodeDirectory(pbs.getRoot(),
|
||||
parent.getLoaderContext());
|
||||
int sid = pbs.getSnapshotId();
|
||||
INodeDirectorySnapshottable parent = (INodeDirectorySnapshottable) fsDir
|
||||
.getInode(root.getId()).asDirectory();
|
||||
INodeDirectory parent = fsDir.getInode(root.getId()).asDirectory();
|
||||
Snapshot snapshot = new Snapshot(sid, root, parent);
|
||||
// add the snapshot to parent, since we follow the sequence of
|
||||
// snapshotsByNames when saving, we do not need to sort when loading
|
||||
parent.addSnapshot(snapshot);
|
||||
parent.getDirectorySnapshottableFeature().addSnapshot(snapshot);
|
||||
snapshotMap.put(sid, snapshot);
|
||||
}
|
||||
}
|
||||
|
@ -373,14 +368,15 @@ public class FSImageFormatPBSnapshot {
|
|||
.setSnapshotCounter(sm.getSnapshotCounter())
|
||||
.setNumSnapshots(sm.getNumSnapshots());
|
||||
|
||||
INodeDirectorySnapshottable[] snapshottables = sm.getSnapshottableDirs();
|
||||
for (INodeDirectorySnapshottable sdir : snapshottables) {
|
||||
INodeDirectory[] snapshottables = sm.getSnapshottableDirs();
|
||||
for (INodeDirectory sdir : snapshottables) {
|
||||
b.addSnapshottableDir(sdir.getId());
|
||||
}
|
||||
b.build().writeDelimitedTo(out);
|
||||
int i = 0;
|
||||
for(INodeDirectorySnapshottable sdir : snapshottables) {
|
||||
for(Snapshot s : sdir.getSnapshotsByNames()) {
|
||||
for(INodeDirectory sdir : snapshottables) {
|
||||
for (Snapshot s : sdir.getDirectorySnapshottableFeature()
|
||||
.getSnapshotList()) {
|
||||
Root sroot = s.getRoot();
|
||||
SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot
|
||||
.newBuilder().setSnapshotId(s.getId());
|
||||
|
|
|
@ -184,15 +184,14 @@ public class Snapshot implements Comparable<byte[]> {
|
|||
/** The root directory of the snapshot. */
|
||||
private final Root root;
|
||||
|
||||
Snapshot(int id, String name, INodeDirectorySnapshottable dir) {
|
||||
Snapshot(int id, String name, INodeDirectory dir) {
|
||||
this(id, dir, dir);
|
||||
this.root.setLocalName(DFSUtil.string2Bytes(name));
|
||||
}
|
||||
|
||||
Snapshot(int id, INodeDirectory dir, INodeDirectorySnapshottable parent) {
|
||||
Snapshot(int id, INodeDirectory dir, INodeDirectory parent) {
|
||||
this.id = id;
|
||||
this.root = new Root(dir);
|
||||
|
||||
this.root.setParent(parent);
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ class SnapshotDiffInfo {
|
|||
}
|
||||
|
||||
/** The root directory of the snapshots */
|
||||
private final INodeDirectorySnapshottable snapshotRoot;
|
||||
private final INodeDirectory snapshotRoot;
|
||||
/** The starting point of the difference */
|
||||
private final Snapshot from;
|
||||
/** The end point of the difference */
|
||||
|
@ -122,8 +122,8 @@ class SnapshotDiffInfo {
|
|||
private final Map<Long, RenameEntry> renameMap =
|
||||
new HashMap<Long, RenameEntry>();
|
||||
|
||||
SnapshotDiffInfo(INodeDirectorySnapshottable snapshotRoot, Snapshot start,
|
||||
Snapshot end) {
|
||||
SnapshotDiffInfo(INodeDirectory snapshotRoot, Snapshot start, Snapshot end) {
|
||||
Preconditions.checkArgument(snapshotRoot.isSnapshottable());
|
||||
this.snapshotRoot = snapshotRoot;
|
||||
this.from = start;
|
||||
this.to = end;
|
||||
|
|
|
@ -41,6 +41,8 @@ import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
|
|||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A helper class defining static methods for reading/writing snapshot related
|
||||
* information from/to FSImage.
|
||||
|
@ -52,17 +54,19 @@ public class SnapshotFSImageFormat {
|
|||
* @param out The {@link DataOutput} to write.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void saveSnapshots(INodeDirectorySnapshottable current,
|
||||
DataOutput out) throws IOException {
|
||||
public static void saveSnapshots(INodeDirectory current, DataOutput out)
|
||||
throws IOException {
|
||||
DirectorySnapshottableFeature sf = current.getDirectorySnapshottableFeature();
|
||||
Preconditions.checkArgument(sf != null);
|
||||
// list of snapshots in snapshotsByNames
|
||||
ReadOnlyList<Snapshot> snapshots = current.getSnapshotsByNames();
|
||||
ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
|
||||
out.writeInt(snapshots.size());
|
||||
for (Snapshot s : snapshots) {
|
||||
// write the snapshot id
|
||||
out.writeInt(s.getId());
|
||||
}
|
||||
// snapshot quota
|
||||
out.writeInt(current.getSnapshotQuota());
|
||||
out.writeInt(sf.getSnapshotQuota());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -216,19 +220,22 @@ public class SnapshotFSImageFormat {
|
|||
* @param loader
|
||||
* The loader
|
||||
*/
|
||||
public static void loadSnapshotList(
|
||||
INodeDirectorySnapshottable snapshottableParent, int numSnapshots,
|
||||
DataInput in, FSImageFormat.Loader loader) throws IOException {
|
||||
public static void loadSnapshotList(INodeDirectory snapshottableParent,
|
||||
int numSnapshots, DataInput in, FSImageFormat.Loader loader)
|
||||
throws IOException {
|
||||
DirectorySnapshottableFeature sf = snapshottableParent
|
||||
.getDirectorySnapshottableFeature();
|
||||
Preconditions.checkArgument(sf != null);
|
||||
for (int i = 0; i < numSnapshots; i++) {
|
||||
// read snapshots
|
||||
final Snapshot s = loader.getSnapshot(in);
|
||||
s.getRoot().setParent(snapshottableParent);
|
||||
snapshottableParent.addSnapshot(s);
|
||||
sf.addSnapshot(s);
|
||||
}
|
||||
int snapshotQuota = in.readInt();
|
||||
snapshottableParent.setSnapshotQuota(snapshotQuota);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
|
||||
* directory.
|
||||
|
|
|
@ -44,6 +44,8 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Manage snapshottable directories and their snapshots.
|
||||
*
|
||||
|
@ -66,8 +68,8 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
private int snapshotCounter = 0;
|
||||
|
||||
/** All snapshottable directories in the namesystem. */
|
||||
private final Map<Long, INodeDirectorySnapshottable> snapshottables
|
||||
= new HashMap<Long, INodeDirectorySnapshottable>();
|
||||
private final Map<Long, INodeDirectory> snapshottables =
|
||||
new HashMap<Long, INodeDirectory>();
|
||||
|
||||
public SnapshotManager(final FSDirectory fsdir) {
|
||||
this.fsdir = fsdir;
|
||||
|
@ -84,7 +86,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
return;
|
||||
}
|
||||
|
||||
for(INodeDirectorySnapshottable s : snapshottables.values()) {
|
||||
for(INodeDirectory s : snapshottables.values()) {
|
||||
if (s.isAncestorDirectory(dir)) {
|
||||
throw new SnapshotException(
|
||||
"Nested snapshottable directories not allowed: path=" + path
|
||||
|
@ -112,33 +114,30 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
checkNestedSnapshottable(d, path);
|
||||
}
|
||||
|
||||
|
||||
final INodeDirectorySnapshottable s;
|
||||
if (d.isSnapshottable()) {
|
||||
//The directory is already a snapshottable directory.
|
||||
s = (INodeDirectorySnapshottable)d;
|
||||
s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
|
||||
d.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
|
||||
} else {
|
||||
s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshotId(),
|
||||
fsdir.getINodeMap());
|
||||
d.addSnapshottableFeature();
|
||||
}
|
||||
addSnapshottable(s);
|
||||
addSnapshottable(d);
|
||||
}
|
||||
|
||||
/** Add the given snapshottable directory to {@link #snapshottables}. */
|
||||
public void addSnapshottable(INodeDirectorySnapshottable dir) {
|
||||
public void addSnapshottable(INodeDirectory dir) {
|
||||
Preconditions.checkArgument(dir.isSnapshottable());
|
||||
snapshottables.put(dir.getId(), dir);
|
||||
}
|
||||
|
||||
/** Remove the given snapshottable directory from {@link #snapshottables}. */
|
||||
private void removeSnapshottable(INodeDirectorySnapshottable s) {
|
||||
private void removeSnapshottable(INodeDirectory s) {
|
||||
snapshottables.remove(s.getId());
|
||||
}
|
||||
|
||||
/** Remove snapshottable directories from {@link #snapshottables} */
|
||||
public void removeSnapshottable(List<INodeDirectorySnapshottable> toRemove) {
|
||||
public void removeSnapshottable(List<INodeDirectory> toRemove) {
|
||||
if (toRemove != null) {
|
||||
for (INodeDirectorySnapshottable s : toRemove) {
|
||||
for (INodeDirectory s : toRemove) {
|
||||
removeSnapshottable(s);
|
||||
}
|
||||
}
|
||||
|
@ -152,22 +151,22 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
public void resetSnapshottable(final String path) throws IOException {
|
||||
final INodesInPath iip = fsdir.getINodesInPath4Write(path);
|
||||
final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
|
||||
if (!d.isSnapshottable()) {
|
||||
DirectorySnapshottableFeature sf = d.getDirectorySnapshottableFeature();
|
||||
if (sf == null) {
|
||||
// the directory is already non-snapshottable
|
||||
return;
|
||||
}
|
||||
final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
|
||||
if (s.getNumSnapshots() > 0) {
|
||||
if (sf.getNumSnapshots() > 0) {
|
||||
throw new SnapshotException("The directory " + path + " has snapshot(s). "
|
||||
+ "Please redo the operation after removing all the snapshots.");
|
||||
}
|
||||
|
||||
if (s == fsdir.getRoot()) {
|
||||
s.setSnapshotQuota(0);
|
||||
if (d == fsdir.getRoot()) {
|
||||
d.setSnapshotQuota(0);
|
||||
} else {
|
||||
s.replaceSelf(iip.getLatestSnapshotId(), fsdir.getINodeMap());
|
||||
d.removeSnapshottableFeature();
|
||||
}
|
||||
removeSnapshottable(s);
|
||||
removeSnapshottable(d);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -180,10 +179,15 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
* Throw IOException when the given path does not lead to an
|
||||
* existing snapshottable directory.
|
||||
*/
|
||||
public INodeDirectorySnapshottable getSnapshottableRoot(final String path
|
||||
) throws IOException {
|
||||
final INodesInPath i = fsdir.getINodesInPath4Write(path);
|
||||
return INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
|
||||
public INodeDirectory getSnapshottableRoot(final String path)
|
||||
throws IOException {
|
||||
final INodeDirectory dir = INodeDirectory.valueOf(fsdir
|
||||
.getINodesInPath4Write(path).getLastINode(), path);
|
||||
if (!dir.isSnapshottable()) {
|
||||
throw new SnapshotException(
|
||||
"Directory is not a snapshottable directory: " + path);
|
||||
}
|
||||
return dir;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -202,7 +206,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
*/
|
||||
public String createSnapshot(final String path, String snapshotName
|
||||
) throws IOException {
|
||||
INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);
|
||||
INodeDirectory srcRoot = getSnapshottableRoot(path);
|
||||
|
||||
if (snapshotCounter == getMaxSnapshotID()) {
|
||||
// We have reached the maximum allowable snapshot ID and since we don't
|
||||
|
@ -235,7 +239,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
// parse the path, and check if the path is a snapshot path
|
||||
// the INodeDirectorySnapshottable#valueOf method will throw Exception
|
||||
// if the path is not for a snapshottable directory
|
||||
INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);
|
||||
INodeDirectory srcRoot = getSnapshottableRoot(path);
|
||||
srcRoot.removeSnapshot(snapshotName, collectedBlocks, removedINodes);
|
||||
numSnapshots.getAndDecrement();
|
||||
}
|
||||
|
@ -258,8 +262,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
final String newSnapshotName) throws IOException {
|
||||
// Find the source root directory path where the snapshot was taken.
|
||||
// All the check for path has been included in the valueOf method.
|
||||
final INodeDirectorySnapshottable srcRoot
|
||||
= INodeDirectorySnapshottable.valueOf(fsdir.getINode(path), path);
|
||||
final INodeDirectory srcRoot = getSnapshottableRoot(path);
|
||||
// Note that renameSnapshot and createSnapshot are synchronized externally
|
||||
// through FSNamesystem's write lock
|
||||
srcRoot.renameSnapshot(path, oldSnapshotName, newSnapshotName);
|
||||
|
@ -285,9 +288,9 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
snapshotCounter = counter;
|
||||
}
|
||||
|
||||
INodeDirectorySnapshottable[] getSnapshottableDirs() {
|
||||
INodeDirectory[] getSnapshottableDirs() {
|
||||
return snapshottables.values().toArray(
|
||||
new INodeDirectorySnapshottable[snapshottables.size()]);
|
||||
new INodeDirectory[snapshottables.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -299,8 +302,9 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
out.writeInt(numSnapshots.get());
|
||||
|
||||
// write all snapshots.
|
||||
for(INodeDirectorySnapshottable snapshottableDir : snapshottables.values()) {
|
||||
for(Snapshot s : snapshottableDir.getSnapshotsByNames()) {
|
||||
for(INodeDirectory snapshottableDir : snapshottables.values()) {
|
||||
for (Snapshot s : snapshottableDir.getDirectorySnapshottableFeature()
|
||||
.getSnapshotList()) {
|
||||
s.write(out);
|
||||
}
|
||||
}
|
||||
|
@ -339,16 +343,16 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
|
||||
List<SnapshottableDirectoryStatus> statusList =
|
||||
new ArrayList<SnapshottableDirectoryStatus>();
|
||||
for (INodeDirectorySnapshottable dir : snapshottables.values()) {
|
||||
for (INodeDirectory dir : snapshottables.values()) {
|
||||
if (userName == null || userName.equals(dir.getUserName())) {
|
||||
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
|
||||
dir.getModificationTime(), dir.getAccessTime(),
|
||||
dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
|
||||
dir.getLocalNameBytes(), dir.getId(),
|
||||
dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
|
||||
dir.getNumSnapshots(),
|
||||
dir.getSnapshotQuota(), dir.getParent() == null ?
|
||||
DFSUtil.EMPTY_BYTES :
|
||||
dir.getDirectorySnapshottableFeature().getNumSnapshots(),
|
||||
dir.getDirectorySnapshottableFeature().getSnapshotQuota(),
|
||||
dir.getParent() == null ? DFSUtil.EMPTY_BYTES :
|
||||
DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
|
||||
statusList.add(status);
|
||||
}
|
||||
|
@ -364,20 +368,18 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
*/
|
||||
public SnapshotDiffReport diff(final String path, final String from,
|
||||
final String to) throws IOException {
|
||||
// Find the source root directory path where the snapshots were taken.
|
||||
// All the check for path has been included in the valueOf method.
|
||||
final INodeDirectory snapshotRoot = getSnapshottableRoot(path);
|
||||
|
||||
if ((from == null || from.isEmpty())
|
||||
&& (to == null || to.isEmpty())) {
|
||||
// both fromSnapshot and toSnapshot indicate the current tree
|
||||
return new SnapshotDiffReport(path, from, to,
|
||||
Collections.<DiffReportEntry> emptyList());
|
||||
}
|
||||
|
||||
// Find the source root directory path where the snapshots were taken.
|
||||
// All the check for path has been included in the valueOf method.
|
||||
INodesInPath inodesInPath = fsdir.getINodesInPath4Write(path.toString());
|
||||
final INodeDirectorySnapshottable snapshotRoot = INodeDirectorySnapshottable
|
||||
.valueOf(inodesInPath.getLastINode(), path);
|
||||
|
||||
final SnapshotDiffInfo diffs = snapshotRoot.computeDiff(from, to);
|
||||
final SnapshotDiffInfo diffs = snapshotRoot
|
||||
.getDirectorySnapshottableFeature().computeDiff(snapshotRoot, from, to);
|
||||
return diffs != null ? diffs.generateReport() : new SnapshotDiffReport(
|
||||
path, from, to, Collections.<DiffReportEntry> emptyList());
|
||||
}
|
||||
|
@ -412,7 +414,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
getSnapshottableDirectories() {
|
||||
List<SnapshottableDirectoryStatus.Bean> beans =
|
||||
new ArrayList<SnapshottableDirectoryStatus.Bean>();
|
||||
for (INodeDirectorySnapshottable d : getSnapshottableDirs()) {
|
||||
for (INodeDirectory d : getSnapshottableDirs()) {
|
||||
beans.add(toBean(d));
|
||||
}
|
||||
return beans.toArray(new SnapshottableDirectoryStatus.Bean[beans.size()]);
|
||||
|
@ -421,20 +423,19 @@ public class SnapshotManager implements SnapshotStatsMXBean {
|
|||
@Override // SnapshotStatsMXBean
|
||||
public SnapshotInfo.Bean[] getSnapshots() {
|
||||
List<SnapshotInfo.Bean> beans = new ArrayList<SnapshotInfo.Bean>();
|
||||
for (INodeDirectorySnapshottable d : getSnapshottableDirs()) {
|
||||
for (Snapshot s : d.getSnapshotList()) {
|
||||
for (INodeDirectory d : getSnapshottableDirs()) {
|
||||
for (Snapshot s : d.getDirectorySnapshottableFeature().getSnapshotList()) {
|
||||
beans.add(toBean(s));
|
||||
}
|
||||
}
|
||||
return beans.toArray(new SnapshotInfo.Bean[beans.size()]);
|
||||
}
|
||||
|
||||
public static SnapshottableDirectoryStatus.Bean toBean(
|
||||
INodeDirectorySnapshottable d) {
|
||||
public static SnapshottableDirectoryStatus.Bean toBean(INodeDirectory d) {
|
||||
return new SnapshottableDirectoryStatus.Bean(
|
||||
d.getFullPathName(),
|
||||
d.getNumSnapshots(),
|
||||
d.getSnapshotQuota(),
|
||||
d.getDirectorySnapshottableFeature().getNumSnapshots(),
|
||||
d.getDirectorySnapshottableFeature().getSnapshotQuota(),
|
||||
d.getModificationTime(),
|
||||
Short.valueOf(Integer.toOctalString(
|
||||
d.getFsPermissionShort())),
|
||||
|
|
|
@ -1440,6 +1440,12 @@ public class DFSAdmin extends FsShell {
|
|||
} else if ("-fetchImage".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
+ " [-fetchImage <local directory>]");
|
||||
} else if ("-shutdownDatanode".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
+ " [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
|
||||
} else if ("-getDatanodeInfo".equals(cmd)) {
|
||||
System.err.println("Usage: java DFSAdmin"
|
||||
+ " [-getDatanodeInfo <datanode_host:ipc_port>]");
|
||||
} else {
|
||||
System.err.println("Usage: java DFSAdmin");
|
||||
System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.util;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
|
||||
/**
|
||||
* Bit format in a long.
|
||||
*/
|
||||
public class LongBitFormat implements Serializable {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private final String NAME;
|
||||
/** Bit offset */
|
||||
private final int OFFSET;
|
||||
/** Bit length */
|
||||
private final int LENGTH;
|
||||
/** Minimum value */
|
||||
private final long MIN;
|
||||
/** Maximum value */
|
||||
private final long MAX;
|
||||
/** Bit mask */
|
||||
private final long MASK;
|
||||
|
||||
public LongBitFormat(String name, LongBitFormat previous, int length, long min) {
|
||||
NAME = name;
|
||||
OFFSET = previous == null? 0: previous.OFFSET + previous.LENGTH;
|
||||
LENGTH = length;
|
||||
MIN = min;
|
||||
MAX = ((-1L) >>> (64 - LENGTH));
|
||||
MASK = MAX << OFFSET;
|
||||
}
|
||||
|
||||
/** Retrieve the value from the record. */
|
||||
public long retrieve(long record) {
|
||||
return (record & MASK) >>> OFFSET;
|
||||
}
|
||||
|
||||
/** Combine the value to the record. */
|
||||
public long combine(long value, long record) {
|
||||
if (value < MIN) {
|
||||
throw new IllegalArgumentException(
|
||||
"Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
|
||||
}
|
||||
if (value > MAX) {
|
||||
throw new IllegalArgumentException(
|
||||
"Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
|
||||
}
|
||||
return (record & ~MASK) | (value << OFFSET);
|
||||
}
|
||||
}
|
|
@ -1451,6 +1451,37 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.data.transfer.protection</name>
|
||||
<value></value>
|
||||
<description>
|
||||
A comma-separated list of SASL protection values used for secured
|
||||
connections to the DataNode when reading or writing block data. Possible
|
||||
values are authentication, integrity and privacy. authentication means
|
||||
authentication only and no integrity or privacy; integrity implies
|
||||
authentication and integrity are enabled; and privacy implies all of
|
||||
authentication, integrity and privacy are enabled. If
|
||||
dfs.encrypt.data.transfer is set to true, then it supersedes the setting for
|
||||
dfs.data.transfer.protection and enforces that all connections must use a
|
||||
specialized encrypted SASL handshake. This property is ignored for
|
||||
connections to a DataNode listening on a privileged port. In this case, it
|
||||
is assumed that the use of a privileged port establishes sufficient trust.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.data.transfer.saslproperties.resolver.class</name>
|
||||
<value></value>
|
||||
<description>
|
||||
SaslPropertiesResolver used to resolve the QOP used for a connection to the
|
||||
DataNode when reading or writing block data. If not specified, the full set
|
||||
of values specified in dfs.data.transfer.protection is used while
|
||||
determining the QOP used for the connection. If a class is specified, then
|
||||
the QOP values returned by the class will be used while determining the QOP
|
||||
used for the connection.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
|
||||
<value>false</value>
|
||||
|
|
|
@ -103,7 +103,7 @@
|
|||
}
|
||||
|
||||
var url = '/webhdfs/v1' + abs_path + '?op=GET_BLOCK_LOCATIONS';
|
||||
$.ajax({"url": url, "crossDomain": true}).done(function(data) {
|
||||
$.get(url).done(function(data) {
|
||||
var d = get_response(data, "LocatedBlocks");
|
||||
if (d === null) {
|
||||
show_err_msg(get_response_err_msg(data));
|
||||
|
|
|
@ -270,7 +270,7 @@ Centralized Cache Management in HDFS
|
|||
** {Native Libraries}
|
||||
|
||||
In order to lock block files into memory, the DataNode relies on native JNI
|
||||
code found in <<<libhadoop.so>>>. Be sure to
|
||||
code found in <<<libhadoop.so>>> or <<<hadoop.dll>>> on Windows. Be sure to
|
||||
{{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS
|
||||
centralized cache management.
|
||||
|
||||
|
@ -283,11 +283,11 @@ Centralized Cache Management in HDFS
|
|||
* dfs.datanode.max.locked.memory
|
||||
|
||||
This determines the maximum amount of memory a DataNode will use for caching.
|
||||
The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
|
||||
also needs to be increased to match this parameter (see below section on
|
||||
{{OS Limits}}). When setting this value, please remember that you will need
|
||||
space in memory for other things as well, such as the DataNode and
|
||||
application JVM heaps and the operating system page cache.
|
||||
On Unix-like systems, the "locked-in-memory size" ulimit (<<<ulimit -l>>>) of
|
||||
the DataNode user also needs to be increased to match this parameter (see
|
||||
below section on {{OS Limits}}). When setting this value, please remember
|
||||
that you will need space in memory for other things as well, such as the
|
||||
DataNode and application JVM heaps and the operating system page cache.
|
||||
|
||||
*** Optional
|
||||
|
||||
|
@ -339,3 +339,6 @@ Centralized Cache Management in HDFS
|
|||
"unlimited," indicating that there is no limit. Note that it's typical for
|
||||
<<<ulimit -l>>> to output the memory lock limit in KB, but
|
||||
dfs.datanode.max.locked.memory must be specified in bytes.
|
||||
|
||||
This information does not apply to deployments on Windows. Windows has no
|
||||
direct equivalent of <<<ulimit -l>>>.
|
||||
|
|
|
@ -838,41 +838,22 @@ digest:hdfs-zkfcs:vlUvLnd8MlacsE80rDuu6ONESbM=:rwcda
|
|||
|
||||
$ mvn clean package -Pdist
|
||||
|
||||
This will generate a jar with the BookKeeperJournalManager, all the dependencies
|
||||
needed by the journal manager,
|
||||
This will generate a jar with the BookKeeperJournalManager,
|
||||
hadoop-hdfs/src/contrib/bkjournal/target/hadoop-hdfs-bkjournal-<VERSION>.jar
|
||||
|
||||
Note that the -Pdist part of the build command is important, as otherwise
|
||||
the dependencies would not be packaged in the jar. The dependencies included in
|
||||
the jar are {{{http://maven.apache.org/plugins/maven-shade-plugin/}shaded}} to
|
||||
avoid conflicts with other dependencies of the NameNode.
|
||||
Note that the -Pdist part of the build command is important, this would
|
||||
copy the dependent bookkeeper-server jar under
|
||||
hadoop-hdfs/src/contrib/bkjournal/target/lib.
|
||||
|
||||
*** <<Putting the BookKeeperJournalManager in the NameNode classpath>>
|
||||
|
||||
To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal
|
||||
jar, generated above, into the lib directory of hdfs. In the standard
|
||||
distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
|
||||
To run a HDFS namenode using BookKeeper as a backend, copy the bkjournal and
|
||||
bookkeeper-server jar, mentioned above, into the lib directory of hdfs. In the
|
||||
standard distribution of HDFS, this is at $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
|
||||
|
||||
cp hadoop-hdfs/src/contrib/bkjournal/target/hadoop-hdfs-bkjournal-<VERSION>.jar $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/
|
||||
|
||||
*** <<Current limitations>>
|
||||
|
||||
1) NameNode format command will not format the BookKeeper data automatically.
|
||||
We have to clean the data manually from BookKeeper cluster
|
||||
and create the /ledgers/available path in Zookeeper.
|
||||
----
|
||||
$ zkCli.sh create /ledgers 0
|
||||
$ zkCli.sh create /ledgers/available 0
|
||||
----
|
||||
Note:
|
||||
bookkeeper://zk1:2181;zk2:2181;zk3:2181/hdfsjournal
|
||||
The final part /hdfsjournal specifies the znode in zookeeper where
|
||||
ledger metadata will be stored. Administrators may set this to anything
|
||||
they wish.
|
||||
|
||||
2) Security in BookKeeper. BookKeeper does not support SASL nor SSL for
|
||||
connections between the NameNode and BookKeeper storage nodes.
|
||||
|
||||
3) Auto-Recovery of storage node failures. Work inprogress
|
||||
{{{https://issues.apache.org/jira/browse/BOOKKEEPER-237 }BOOKKEEPER-237}}.
|
||||
Currently we have the tools to manually recover the data from failed storage nodes.
|
||||
1) Security in BookKeeper. BookKeeper does not support SASL nor SSL for
|
||||
connections between the NameNode and BookKeeper storage nodes.
|
|
@ -394,7 +394,7 @@ Hello, webhdfs user!
|
|||
* Submit a HTTP PUT request.
|
||||
|
||||
+---------------------------------
|
||||
curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
|
||||
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
|
||||
+---------------------------------
|
||||
|
||||
The client receives a response with a {{{Boolean JSON Schema}<<<boolean>>> JSON object}}:
|
||||
|
@ -419,7 +419,7 @@ Transfer-Encoding: chunked
|
|||
* Submit a HTTP PUT request.
|
||||
|
||||
+---------------------------------
|
||||
curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=CREATESYMLINK
|
||||
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CREATESYMLINK
|
||||
&destination=<PATH>[&createParent=<true|false>]"
|
||||
+---------------------------------
|
||||
|
||||
|
@ -934,7 +934,7 @@ Transfer-Encoding: chunked
|
|||
* Submit a HTTP PUT request.
|
||||
|
||||
+---------------------------------
|
||||
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=op=SETXATTR
|
||||
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETXATTR
|
||||
&xattr.name=<XATTRNAME>&xattr.value=<XATTRVALUE>
|
||||
&flag=<FLAG>"
|
||||
+---------------------------------
|
||||
|
|
|
@ -33,9 +33,11 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||
|
@ -48,6 +50,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
|
|||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.LogManager;
|
||||
|
||||
|
@ -192,7 +195,8 @@ public class BlockReaderTestUtil {
|
|||
setAllowShortCircuitLocalReads(true).
|
||||
setRemotePeerFactory(new RemotePeerFactory() {
|
||||
@Override
|
||||
public Peer newConnectedPeer(InetSocketAddress addr)
|
||||
public Peer newConnectedPeer(InetSocketAddress addr,
|
||||
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
Peer peer = null;
|
||||
Socket sock = NetUtils.
|
||||
|
@ -251,4 +255,4 @@ public class BlockReaderTestUtil {
|
|||
LogManager.getLogger(DataNode.class.getName()).setLevel(
|
||||
Level.TRACE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,12 +19,15 @@ package org.apache.hadoop.hdfs;
|
|||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
|
||||
|
@ -660,73 +663,81 @@ public class MiniDFSCluster {
|
|||
boolean checkDataNodeHostConfig,
|
||||
Configuration[] dnConfOverlays)
|
||||
throws IOException {
|
||||
ExitUtil.disableSystemExit();
|
||||
|
||||
synchronized (MiniDFSCluster.class) {
|
||||
instanceId = instanceCount++;
|
||||
}
|
||||
|
||||
this.conf = conf;
|
||||
base_dir = new File(determineDfsBaseDir());
|
||||
data_dir = new File(base_dir, "data");
|
||||
this.waitSafeMode = waitSafeMode;
|
||||
this.checkExitOnShutdown = checkExitOnShutdown;
|
||||
|
||||
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
|
||||
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
|
||||
int safemodeExtension = conf.getInt(
|
||||
DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
|
||||
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
|
||||
conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
|
||||
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||
StaticMapping.class, DNSToSwitchMapping.class);
|
||||
|
||||
// In an HA cluster, in order for the StandbyNode to perform checkpoints,
|
||||
// it needs to know the HTTP port of the Active. So, if ephemeral ports
|
||||
// are chosen, disable checkpoints for the test.
|
||||
if (!nnTopology.allHttpPortsSpecified() &&
|
||||
nnTopology.isHA()) {
|
||||
LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
|
||||
"since no HTTP ports have been specified.");
|
||||
conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
|
||||
}
|
||||
if (!nnTopology.allIpcPortsSpecified() &&
|
||||
nnTopology.isHA()) {
|
||||
LOG.info("MiniDFSCluster disabling log-roll triggering in the "
|
||||
+ "Standby node since no IPC ports have been specified.");
|
||||
conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
|
||||
}
|
||||
|
||||
federation = nnTopology.isFederated();
|
||||
boolean success = false;
|
||||
try {
|
||||
createNameNodesAndSetConf(
|
||||
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
|
||||
enableManagedDfsDirsRedundancy,
|
||||
format, startOpt, clusterId, conf);
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("IOE creating namenodes. Permissions dump:\n" +
|
||||
createPermissionsDiagnosisString(data_dir));
|
||||
throw ioe;
|
||||
}
|
||||
if (format) {
|
||||
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
|
||||
throw new IOException("Cannot remove data directory: " + data_dir +
|
||||
ExitUtil.disableSystemExit();
|
||||
|
||||
synchronized (MiniDFSCluster.class) {
|
||||
instanceId = instanceCount++;
|
||||
}
|
||||
|
||||
this.conf = conf;
|
||||
base_dir = new File(determineDfsBaseDir());
|
||||
data_dir = new File(base_dir, "data");
|
||||
this.waitSafeMode = waitSafeMode;
|
||||
this.checkExitOnShutdown = checkExitOnShutdown;
|
||||
|
||||
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
|
||||
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
|
||||
int safemodeExtension = conf.getInt(
|
||||
DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
|
||||
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
|
||||
conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
|
||||
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||
StaticMapping.class, DNSToSwitchMapping.class);
|
||||
|
||||
// In an HA cluster, in order for the StandbyNode to perform checkpoints,
|
||||
// it needs to know the HTTP port of the Active. So, if ephemeral ports
|
||||
// are chosen, disable checkpoints for the test.
|
||||
if (!nnTopology.allHttpPortsSpecified() &&
|
||||
nnTopology.isHA()) {
|
||||
LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
|
||||
"since no HTTP ports have been specified.");
|
||||
conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
|
||||
}
|
||||
if (!nnTopology.allIpcPortsSpecified() &&
|
||||
nnTopology.isHA()) {
|
||||
LOG.info("MiniDFSCluster disabling log-roll triggering in the "
|
||||
+ "Standby node since no IPC ports have been specified.");
|
||||
conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
|
||||
}
|
||||
|
||||
federation = nnTopology.isFederated();
|
||||
try {
|
||||
createNameNodesAndSetConf(
|
||||
nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
|
||||
enableManagedDfsDirsRedundancy,
|
||||
format, startOpt, clusterId, conf);
|
||||
} catch (IOException ioe) {
|
||||
LOG.error("IOE creating namenodes. Permissions dump:\n" +
|
||||
createPermissionsDiagnosisString(data_dir));
|
||||
throw ioe;
|
||||
}
|
||||
if (format) {
|
||||
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
|
||||
throw new IOException("Cannot remove data directory: " + data_dir +
|
||||
createPermissionsDiagnosisString(data_dir));
|
||||
}
|
||||
}
|
||||
|
||||
if (startOpt == StartupOption.RECOVER) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Start the DataNodes
|
||||
startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
|
||||
dnStartOpt != null ? dnStartOpt : startOpt,
|
||||
racks, hosts, simulatedCapacities, setupHostsFile,
|
||||
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
|
||||
waitClusterUp();
|
||||
//make sure ProxyUsers uses the latest conf
|
||||
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
if (startOpt == StartupOption.RECOVER) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Start the DataNodes
|
||||
startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
|
||||
dnStartOpt != null ? dnStartOpt : startOpt,
|
||||
racks, hosts, simulatedCapacities, setupHostsFile,
|
||||
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
|
||||
waitClusterUp();
|
||||
//make sure ProxyUsers uses the latest conf
|
||||
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1308,15 +1319,42 @@ public class MiniDFSCluster {
|
|||
}
|
||||
|
||||
SecureResources secureResources = null;
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
if (UserGroupInformation.isSecurityEnabled() &&
|
||||
conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
|
||||
try {
|
||||
secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
|
||||
} catch (Exception ex) {
|
||||
ex.printStackTrace();
|
||||
}
|
||||
}
|
||||
DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf,
|
||||
secureResources);
|
||||
final int maxRetriesOnSasl = conf.getInt(
|
||||
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
|
||||
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
|
||||
int numRetries = 0;
|
||||
DataNode dn = null;
|
||||
while (true) {
|
||||
try {
|
||||
dn = DataNode.instantiateDataNode(dnArgs, dnConf,
|
||||
secureResources);
|
||||
break;
|
||||
} catch (IOException e) {
|
||||
// Work around issue testing security where rapidly starting multiple
|
||||
// DataNodes using the same principal gets rejected by the KDC as a
|
||||
// replay attack.
|
||||
if (UserGroupInformation.isSecurityEnabled() &&
|
||||
numRetries < maxRetriesOnSasl) {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
} catch (InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
break;
|
||||
}
|
||||
++numRetries;
|
||||
continue;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
if(dn == null)
|
||||
throw new IOException("Cannot start DataNode in "
|
||||
+ dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
|
||||
|
|
|
@ -51,6 +51,8 @@ public class TestBlockMissingException {
|
|||
long blockSize = 1024L;
|
||||
int numBlocks = 4;
|
||||
conf = new HdfsConfiguration();
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
try {
|
||||
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
|
||||
dfs.waitActive();
|
||||
|
|
|
@ -64,6 +64,8 @@ public class TestBlockReaderLocalLegacy {
|
|||
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
||||
UserGroupInformation.getCurrentUser().getShortUserName());
|
||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,8 @@ public class TestClientReportBadBlock {
|
|||
public void startUpCluster() throws IOException {
|
||||
// disable block scanner
|
||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
|
||||
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
|
|
@ -88,6 +88,8 @@ public class TestCrcCorruption {
|
|||
@Test(timeout=50000)
|
||||
public void testCorruptionDuringWrt() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
MiniDFSCluster cluster = null;
|
||||
|
||||
try {
|
||||
|
@ -152,7 +154,8 @@ public class TestCrcCorruption {
|
|||
int numDataNodes = 2;
|
||||
short replFactor = 2;
|
||||
Random random = new Random();
|
||||
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
|
||||
cluster.waitActive();
|
||||
|
@ -334,6 +337,8 @@ public class TestCrcCorruption {
|
|||
short replFactor = (short)numDataNodes;
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
|
||||
|
||||
try {
|
||||
|
|
|
@ -1479,7 +1479,8 @@ public class TestDFSShell {
|
|||
Path root = new Path("/test/get");
|
||||
final Path remotef = new Path(root, fname);
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
TestGetRunner runner = new TestGetRunner() {
|
||||
private int count = 0;
|
||||
private final FsShell shell = new FsShell(conf);
|
||||
|
|
|
@ -202,6 +202,8 @@ public class TestEncryptedTransfer {
|
|||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
Configuration conf = new Configuration();
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
|
||||
FileSystem fs = getFileSystem(conf);
|
||||
|
|
|
@ -58,6 +58,7 @@ public class TestMissingBlocksAlert {
|
|||
Configuration conf = new HdfsConfiguration();
|
||||
//minimize test delay
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
int fileLen = 10*1024;
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);
|
||||
|
||||
|
|
|
@ -284,15 +284,17 @@ public class TestPread {
|
|||
numHedgedReadPoolThreads);
|
||||
conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
|
||||
hedgedReadTimeoutMillis);
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
|
||||
// Set up the InjectionHandler
|
||||
DFSClientFaultInjector.instance = Mockito
|
||||
.mock(DFSClientFaultInjector.class);
|
||||
DFSClientFaultInjector injector = DFSClientFaultInjector.instance;
|
||||
final int sleepMs = 100;
|
||||
Mockito.doAnswer(new Answer<Void>() {
|
||||
@Override
|
||||
public Void answer(InvocationOnMock invocation) throws Throwable {
|
||||
if (true) {
|
||||
Thread.sleep(hedgedReadTimeoutMillis + 1);
|
||||
Thread.sleep(hedgedReadTimeoutMillis + sleepMs);
|
||||
if (DFSClientFaultInjector.exceptionNum.compareAndSet(0, 1)) {
|
||||
System.out.println("-------------- throw Checksum Exception");
|
||||
throw new ChecksumException("ChecksumException test", 100);
|
||||
|
@ -301,6 +303,15 @@ public class TestPread {
|
|||
return null;
|
||||
}
|
||||
}).when(injector).fetchFromDatanodeException();
|
||||
Mockito.doAnswer(new Answer<Void>() {
|
||||
@Override
|
||||
public Void answer(InvocationOnMock invocation) throws Throwable {
|
||||
if (true) {
|
||||
Thread.sleep(sleepMs * 2);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}).when(injector).readFromDatanodeDelay();
|
||||
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
|
||||
.format(true).build();
|
||||
|
@ -329,11 +340,11 @@ public class TestPread {
|
|||
} catch (BlockMissingException e) {
|
||||
assertTrue(false);
|
||||
} finally {
|
||||
Mockito.reset(injector);
|
||||
IOUtils.cleanup(null, input);
|
||||
IOUtils.cleanup(null, output);
|
||||
fileSys.close();
|
||||
cluster.shutdown();
|
||||
Mockito.reset(injector);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
public abstract class SaslDataTransferTestCase {
|
||||
|
||||
private static File baseDir;
|
||||
private static String hdfsPrincipal;
|
||||
private static MiniKdc kdc;
|
||||
private static String keytab;
|
||||
private static String spnegoPrincipal;
|
||||
|
||||
@BeforeClass
|
||||
public static void initKdc() throws Exception {
|
||||
baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
|
||||
SaslDataTransferTestCase.class.getSimpleName());
|
||||
FileUtil.fullyDelete(baseDir);
|
||||
assertTrue(baseDir.mkdirs());
|
||||
|
||||
Properties kdcConf = MiniKdc.createConf();
|
||||
kdc = new MiniKdc(kdcConf, baseDir);
|
||||
kdc.start();
|
||||
|
||||
String userName = UserGroupInformation.getLoginUser().getShortUserName();
|
||||
File keytabFile = new File(baseDir, userName + ".keytab");
|
||||
keytab = keytabFile.getAbsolutePath();
|
||||
kdc.createPrincipal(keytabFile, userName + "/localhost", "HTTP/localhost");
|
||||
hdfsPrincipal = userName + "/localhost@" + kdc.getRealm();
|
||||
spnegoPrincipal = "HTTP/localhost@" + kdc.getRealm();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void shutdownKdc() {
|
||||
if (kdc != null) {
|
||||
kdc.stop();
|
||||
}
|
||||
FileUtil.fullyDelete(baseDir);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates configuration for starting a secure cluster.
|
||||
*
|
||||
* @param dataTransferProtection supported QOPs
|
||||
* @return configuration for starting a secure cluster
|
||||
* @throws Exception if there is any failure
|
||||
*/
|
||||
protected HdfsConfiguration createSecureConfig(
|
||||
String dataTransferProtection) throws Exception {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
||||
conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
|
||||
conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
|
||||
conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
|
||||
conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
|
||||
conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
|
||||
conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
|
||||
conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
|
||||
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
|
||||
|
||||
String keystoresDir = baseDir.getAbsolutePath();
|
||||
String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
|
||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||
return conf;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,155 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
public class TestSaslDataTransfer extends SaslDataTransferTestCase {
|
||||
|
||||
private static final int BLOCK_SIZE = 4096;
|
||||
private static final int BUFFER_SIZE= 1024;
|
||||
private static final int NUM_BLOCKS = 3;
|
||||
private static final Path PATH = new Path("/file1");
|
||||
private static final short REPLICATION = 3;
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private FileSystem fs;
|
||||
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
|
||||
@After
|
||||
public void shutdown() {
|
||||
IOUtils.cleanup(null, fs);
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAuthentication() throws Exception {
|
||||
HdfsConfiguration clusterConf = createSecureConfig(
|
||||
"authentication,integrity,privacy");
|
||||
startCluster(clusterConf);
|
||||
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
|
||||
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
|
||||
doTest(clientConf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIntegrity() throws Exception {
|
||||
HdfsConfiguration clusterConf = createSecureConfig(
|
||||
"authentication,integrity,privacy");
|
||||
startCluster(clusterConf);
|
||||
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
|
||||
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "integrity");
|
||||
doTest(clientConf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPrivacy() throws Exception {
|
||||
HdfsConfiguration clusterConf = createSecureConfig(
|
||||
"authentication,integrity,privacy");
|
||||
startCluster(clusterConf);
|
||||
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
|
||||
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "privacy");
|
||||
doTest(clientConf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClientAndServerDoNotHaveCommonQop() throws Exception {
|
||||
HdfsConfiguration clusterConf = createSecureConfig("privacy");
|
||||
startCluster(clusterConf);
|
||||
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
|
||||
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
|
||||
exception.expect(IOException.class);
|
||||
exception.expectMessage("could only be replicated to 0 nodes");
|
||||
doTest(clientConf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClientSaslNoServerSasl() throws Exception {
|
||||
HdfsConfiguration clusterConf = createSecureConfig("");
|
||||
startCluster(clusterConf);
|
||||
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
|
||||
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
|
||||
exception.expect(IOException.class);
|
||||
exception.expectMessage("could only be replicated to 0 nodes");
|
||||
doTest(clientConf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testServerSaslNoClientSasl() throws Exception {
|
||||
HdfsConfiguration clusterConf = createSecureConfig(
|
||||
"authentication,integrity,privacy");
|
||||
startCluster(clusterConf);
|
||||
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
|
||||
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");
|
||||
exception.expect(IOException.class);
|
||||
exception.expectMessage("could only be replicated to 0 nodes");
|
||||
doTest(clientConf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests DataTransferProtocol with the given client configuration.
|
||||
*
|
||||
* @param conf client configuration
|
||||
* @throws IOException if there is an I/O error
|
||||
*/
|
||||
private void doTest(HdfsConfiguration conf) throws IOException {
|
||||
fs = FileSystem.get(cluster.getURI(), conf);
|
||||
FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
|
||||
assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
|
||||
DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
|
||||
BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0,
|
||||
Long.MAX_VALUE);
|
||||
assertNotNull(blockLocations);
|
||||
assertEquals(NUM_BLOCKS, blockLocations.length);
|
||||
for (BlockLocation blockLocation: blockLocations) {
|
||||
assertNotNull(blockLocation.getHosts());
|
||||
assertEquals(3, blockLocation.getHosts().length);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts a cluster with the given configuration.
|
||||
*
|
||||
* @param conf cluster configuration
|
||||
* @throws IOException if there is an I/O error
|
||||
*/
|
||||
private void startCluster(HdfsConfiguration conf) throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
cluster.waitActive();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.balancer;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestBalancerWithSaslDataTransfer extends SaslDataTransferTestCase {
|
||||
|
||||
private static final TestBalancer TEST_BALANCER = new TestBalancer();
|
||||
|
||||
@Test
|
||||
public void testBalancer0Authentication() throws Exception {
|
||||
TEST_BALANCER.testBalancer0Internal(createSecureConfig("authentication"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBalancer0Integrity() throws Exception {
|
||||
TEST_BALANCER.testBalancer0Internal(createSecureConfig("integrity"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBalancer0Privacy() throws Exception {
|
||||
TEST_BALANCER.testBalancer0Internal(createSecureConfig("privacy"));
|
||||
}
|
||||
}
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.RemotePeerFactory;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
|
@ -160,7 +161,8 @@ public class TestBlockTokenWithDFS {
|
|||
setConfiguration(conf).
|
||||
setRemotePeerFactory(new RemotePeerFactory() {
|
||||
@Override
|
||||
public Peer newConnectedPeer(InetSocketAddress addr)
|
||||
public Peer newConnectedPeer(InetSocketAddress addr,
|
||||
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
Peer peer = null;
|
||||
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||
|
@ -209,6 +211,8 @@ public class TestBlockTokenWithDFS {
|
|||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
|
||||
conf.setInt("ipc.client.connect.max.retries", 0);
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,8 @@ public class DataNodeTestUtils {
|
|||
|
||||
public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
|
||||
BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
|
||||
bpScanner.verifyBlock(b);
|
||||
bpScanner.verifyBlock(new ExtendedBlock(b.getBlockPoolId(),
|
||||
new BlockPoolSliceScanner.BlockScanInfo(b.getLocalBlock())));
|
||||
}
|
||||
|
||||
private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,
|
||||
|
|
|
@ -46,9 +46,11 @@ import org.apache.hadoop.hdfs.net.Peer;
|
|||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
|
@ -58,6 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
|||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
@ -307,7 +310,8 @@ public class TestDataNodeVolumeFailure {
|
|||
setConfiguration(conf).
|
||||
setRemotePeerFactory(new RemotePeerFactory() {
|
||||
@Override
|
||||
public Peer newConnectedPeer(InetSocketAddress addr)
|
||||
public Peer newConnectedPeer(InetSocketAddress addr,
|
||||
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
||||
throws IOException {
|
||||
Peer peer = null;
|
||||
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode;
|
|||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyInt;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
|
@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
|
||||
|
@ -114,7 +114,6 @@ public class TestFsDatasetCache {
|
|||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
assumeTrue(!Path.WINDOWS);
|
||||
conf = new HdfsConfiguration();
|
||||
conf.setLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
|
||||
|
@ -143,6 +142,9 @@ public class TestFsDatasetCache {
|
|||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
// Verify that each test uncached whatever it cached. This cleanup is
|
||||
// required so that file descriptors are not leaked across tests.
|
||||
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
|
||||
if (fs != null) {
|
||||
fs.close();
|
||||
}
|
||||
|
@ -205,9 +207,16 @@ public class TestFsDatasetCache {
|
|||
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
|
||||
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
|
||||
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
|
||||
FileChannel blockChannel =
|
||||
((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
|
||||
sizes[i] = blockChannel.size();
|
||||
FileInputStream blockInputStream = null;
|
||||
FileChannel blockChannel = null;
|
||||
try {
|
||||
blockInputStream =
|
||||
(FileInputStream)fsd.getBlockInputStream(extBlock, 0);
|
||||
blockChannel = blockInputStream.getChannel();
|
||||
sizes[i] = blockChannel.size();
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, blockChannel, blockInputStream);
|
||||
}
|
||||
}
|
||||
return sizes;
|
||||
}
|
||||
|
@ -571,5 +580,7 @@ public class TestFsDatasetCache {
|
|||
return true;
|
||||
}
|
||||
}, 1000, 30000);
|
||||
|
||||
dfs.removeCacheDirective(shortCacheDirectiveId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -146,4 +146,62 @@ public class TestDeleteRace {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class RenameThread extends Thread {
|
||||
private FileSystem fs;
|
||||
private Path from;
|
||||
private Path to;
|
||||
|
||||
RenameThread(FileSystem fs, Path from, Path to) {
|
||||
this.fs = fs;
|
||||
this.from = from;
|
||||
this.to = to;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
LOG.info("Renaming " + from + " to " + to);
|
||||
|
||||
fs.rename(from, to);
|
||||
LOG.info("Renamed " + from + " to " + to);
|
||||
} catch (Exception e) {
|
||||
LOG.info(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRenameRace() throws Exception {
|
||||
try {
|
||||
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
|
||||
SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
Path dirPath1 = new Path("/testRenameRace1");
|
||||
Path dirPath2 = new Path("/testRenameRace2");
|
||||
Path filePath = new Path("/testRenameRace1/file1");
|
||||
|
||||
|
||||
fs.mkdirs(dirPath1);
|
||||
FSDataOutputStream out = fs.create(filePath);
|
||||
Thread renameThread = new RenameThread(fs, dirPath1, dirPath2);
|
||||
renameThread.start();
|
||||
|
||||
// write data and close to make sure a block is allocated.
|
||||
out.write(new byte[32], 0, 32);
|
||||
out.close();
|
||||
|
||||
// Restart name node so that it replays edit. If old path was
|
||||
// logged in edit, it will fail to come up.
|
||||
cluster.restartNameNode(0);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
|||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
||||
import org.apache.hadoop.hdfs.util.Canceler;
|
||||
|
@ -194,8 +193,8 @@ public class TestFSImageWithSnapshot {
|
|||
fsn = cluster.getNamesystem();
|
||||
hdfs = cluster.getFileSystem();
|
||||
|
||||
INodeDirectorySnapshottable rootNode =
|
||||
(INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
|
||||
INodeDirectory rootNode = fsn.dir.getINode4Write(root.toString())
|
||||
.asDirectory();
|
||||
assertTrue("The children list of root should be empty",
|
||||
rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
|
||||
// one snapshot on root: s1
|
||||
|
|
|
@ -612,6 +612,8 @@ public class TestFsck {
|
|||
public void testCorruptBlock() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
|
||||
FileSystem fs = null;
|
||||
DFSClient dfsClient = null;
|
||||
LocatedBlocks blocks = null;
|
||||
|
|
|
@ -77,7 +77,7 @@ public class TestINodeFile {
|
|||
private final PermissionStatus perm = new PermissionStatus(
|
||||
"userName", null, FsPermission.getDefault());
|
||||
private short replication;
|
||||
private long preferredBlockSize;
|
||||
private long preferredBlockSize = 1024;
|
||||
|
||||
INodeFile createINodeFile(short replication, long preferredBlockSize) {
|
||||
return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue