diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2b12cc70c21..8398ad0d9f0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -677,6 +677,9 @@ Release 2.8.0 - UNRELEASED HADOOP-12513. Dockerfile lacks initial 'apt-get update'. (Akihiro Suda via ozawa) + HADOOP-12457. [JDK8] Fix a failure of compiling common by javadoc. + (Akira AJISAKA via ozawa) + OPTIMIZATIONS HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString() diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 8cdae3d9fac..1ed26540654 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -167,7 +167,13 @@ import com.google.common.base.Preconditions; * will be resolved to another property in this Configuration, while * ${user.name} would then ordinarily be resolved to the value * of the System property with that name. - * By default, warnings will be given to any deprecated configuration + *

When conf.get("otherdir") is called, then ${env.BASE_DIR} + * will be resolved to the value of the ${BASE_DIR} environment variable. + * It supports ${env.NAME:-default} and ${env.NAME-default} notations. + * The former is resolved to "default" if ${NAME} environment variable is undefined + * or its value is empty. + * The latter behaves the same way only if ${NAME} is undefined. + *

By default, warnings will be given to any deprecated configuration * parameters and these are suppressible by configuring * log4j.logger.org.apache.hadoop.conf.Configuration.deprecation in * log4j.properties file. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java index 6983eb922c8..a0663950159 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java @@ -65,7 +65,7 @@ class Delete { "-[rR]: Recursively deletes directories.\n" + "-skipTrash: option bypasses trash, if enabled, and immediately " + "deletes .\n" + - "-safely: option requires safety confirmation,if enabled, " + + "-safely: option requires safety confirmation, if enabled, " + "requires confirmation before deleting large directory with more " + "than files. Delay is " + "expected when walking over large directory recursively to count " + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 371109a09e2..20c993b8e5c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -413,7 +413,7 @@ public abstract class Server { * if this request took too much time relative to other requests * we consider that as a slow RPC. 3 is a magic number that comes * from 3 sigma deviation. A very simple explanation can be found - * by searching for 68–95–99.7 rule. We flag an RPC as slow RPC + * by searching for 68-95-99.7 rule. We flag an RPC as slow RPC * if and only if it falls above 99.7% of requests. We start this logic * only once we have enough sample size. */