diff --git a/BUILDING.txt b/BUILDING.txt index 408cae13055..c7a91da5d0d 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -75,6 +75,7 @@ Optional packages: $ sudo apt-get install snappy libsnappy-dev * Intel ISA-L library for erasure coding Please refer to https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version + (OR https://github.com/01org/isa-l) * Bzip2 $ sudo apt-get install bzip2 libbz2-dev * Jansson (C Library for JSON) @@ -188,11 +189,12 @@ Maven build goals: Intel ISA-L build options: - Intel ISA-L is a erasure coding library that can be utilized by the native code. + Intel ISA-L is an erasure coding library that can be utilized by the native code. It is currently an optional component, meaning that Hadoop can be built with or without this dependency. Note the library is used via dynamic module. Please reference the official site for the library details. https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version + (OR https://github.com/01org/isa-l) * Use -Drequire.isal to fail the build if libisal.so is not found. If this option is not specified and the isal library is missing, diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java index 4bdc80826aa..5c93fd37374 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java @@ -61,9 +61,9 @@ import java.util.*; *
3600
seconds. This is also used for the rollover interval for
@@ -79,17 +79,16 @@ import java.util.*;
*
* * Out of the box it provides 3 signer secret provider implementations: - * "string", "random", and "zookeeper" + * "file", "random" and "zookeeper" *
* Additional signer secret providers are supported via the * {@link SignerSecretProvider} class. ** For the HTTP cookies mentioned above, the SignerSecretProvider is used to * determine the secret to use for signing the cookies. Different - * implementations can have different behaviors. The "string" implementation - * simply uses the string set in the [#PREFIX#.]signature.secret property - * mentioned above. The "random" implementation uses a randomly generated - * secret that rolls over at the interval specified by the + * implementations can have different behaviors. The "file" implementation + * loads the secret from a specified file. The "random" implementation uses a + * randomly generated secret that rolls over at the interval specified by the * [#PREFIX#.]token.validity mentioned above. The "zookeeper" implementation * is like the "random" one, except that it synchronizes the random secret * and rollovers between multiple servers; it's meant for HA services. @@ -97,12 +96,12 @@ import java.util.*; * The relevant configuration properties are: *
name
property as a Pattern
.
* If no such property is specified, or if the specified value is not a valid
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 9b4069a422a..a708900e630 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -90,14 +90,22 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/**
* CallQueue related settings. These are not used directly, but rather
* combined with a namespace and port. For instance:
- * IPC_CALLQUEUE_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
+ * IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
*/
- public static final String IPC_CALLQUEUE_NAMESPACE = "ipc";
+ public static final String IPC_NAMESPACE = "ipc";
public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";
- public static final String IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY = "identity-provider.impl";
+ public static final String IPC_SCHEDULER_IMPL_KEY = "scheduler.impl";
+ public static final String IPC_IDENTITY_PROVIDER_KEY = "identity-provider.impl";
public static final String IPC_BACKOFF_ENABLE = "backoff.enable";
public static final boolean IPC_BACKOFF_ENABLE_DEFAULT = false;
+ /**
+ * IPC scheduler priority levels.
+ */
+ public static final String IPC_SCHEDULER_PRIORITY_LEVELS_KEY =
+ "scheduler.priority.levels";
+ public static final int IPC_SCHEDULER_PRIORITY_LEVELS_DEFAULT_KEY = 4;
+
/** This is for specifying the implementation for the mappings from
* hostnames to the racks they belong to
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index e74c41c8290..e2d6ecdc07b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -23,7 +23,6 @@ import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
@@ -381,46 +380,6 @@ public class FileUtil {
}
- /** Copy all files in a directory to one output file (merge). */
- public static boolean copyMerge(FileSystem srcFS, Path srcDir,
- FileSystem dstFS, Path dstFile,
- boolean deleteSource,
- Configuration conf, String addString) throws IOException {
- dstFile = checkDest(srcDir.getName(), dstFS, dstFile, false);
-
- if (!srcFS.getFileStatus(srcDir).isDirectory())
- return false;
-
- OutputStream out = dstFS.create(dstFile);
-
- try {
- FileStatus contents[] = srcFS.listStatus(srcDir);
- Arrays.sort(contents);
- for (int i = 0; i < contents.length; i++) {
- if (contents[i].isFile()) {
- InputStream in = srcFS.open(contents[i].getPath());
- try {
- IOUtils.copyBytes(in, out, conf, false);
- if (addString!=null)
- out.write(addString.getBytes("UTF-8"));
-
- } finally {
- in.close();
- }
- }
- }
- } finally {
- out.close();
- }
-
-
- if (deleteSource) {
- return srcFS.delete(srcDir, true);
- } else {
- return true;
- }
- }
-
/** Copy local files to a FileSystem. */
public static boolean copy(File src,
FileSystem dstFS, Path dst,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java
index 459a83669aa..deb3880ee41 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java
@@ -33,6 +33,7 @@ public class PathIOException extends IOException {
// uris with no authority
private String operation;
private String path;
+ private String fullyQualifiedPath;
private String targetPath;
/**
@@ -68,6 +69,11 @@ public class PathIOException extends IOException {
this.path = path;
}
+ public PathIOException withFullyQualifiedPath(String fqPath) {
+ fullyQualifiedPath = fqPath;
+ return this;
+ }
+
/** Format:
* cmd: {operation} `path' {to `target'}: error string
*/
@@ -85,6 +91,9 @@ public class PathIOException extends IOException {
if (getCause() != null) {
message.append(": " + getCause().getMessage());
}
+ if (fullyQualifiedPath != null && !fullyQualifiedPath.equals(path)) {
+ message.append(": ").append(formatPath(fullyQualifiedPath));
+ }
return message.toString();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index 4b8d3bc11ac..5fcfdf88eff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -220,7 +220,8 @@ abstract class CommandWithDestination extends FsCommand {
throw new PathExistsException(dst.toString());
}
} else if (!dst.parentExists()) {
- throw new PathNotFoundException(dst.toString());
+ throw new PathNotFoundException(dst.toString())
+ .withFullyQualifiedPath(dst.path.toUri().toString());
}
super.processArguments(args);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 02a3b251bf0..d35928228a7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
@Override
protected void processPath(PathData src, PathData target) throws IOException {
- if (!src.fs.getUri().equals(target.fs.getUri())) {
+ String srcUri = src.fs.getUri().getScheme() + "://" +
+ src.fs.getUri().getHost();
+ String dstUri = target.fs.getUri().getScheme() + "://" +
+ target.fs.getUri().getHost();
+ if (!srcUri.equals(dstUri)) {
throw new PathIOException(src.toString(),
"Does not match target filesystem");
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
index 72a463af7ba..a6c751ea6f0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
@@ -72,7 +72,8 @@ class Touch extends FsCommand {
@Override
protected void processNonexistentPath(PathData item) throws IOException {
if (!item.parentExists()) {
- throw new PathNotFoundException(item.toString());
+ throw new PathNotFoundException(item.toString())
+ .withFullyQualifiedPath(item.path.toUri().toString());
}
touchz(item);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
index c10f839db4f..1a7782aa63c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.ipc;
import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
@@ -26,6 +27,7 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
/**
* Abstracts queue operations for different blocking queues.
@@ -42,6 +44,13 @@ public class CallQueueManager