listStatusIterator(final Path p)
* List the statuses and block locations of the files in the given path.
* Does not guarantee to return the iterator that traverses statuses
* of the files in a sorted order.
+ *
*
* If the path is a directory,
* if recursive is false, returns files in the directory;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
index dbb751dc44e..29e19989edd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
@@ -123,6 +123,13 @@ public LocatedFileStatus(long length, boolean isdir,
/**
* Get the file's block locations
+ *
+ * In HDFS, the returned BlockLocation will have different formats for
+ * replicated and erasure coded file.
+ * Please refer to
+ * {@link FileSystem#getFileBlockLocations(FileStatus, long, long)}
+ * for more details.
+ *
* @return the file's block locations
*/
public BlockLocation[] getBlockLocations() {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index a450f664b20..fa447d8d469 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -27,6 +27,7 @@
import java.io.PrintStream;
import java.net.BindException;
import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
@@ -993,14 +994,31 @@ public WebAppContext getWebAppContext(){
* Get the pathname to the webapps files.
* @param appName eg "secondary" or "datanode"
* @return the pathname as a URL
- * @throws FileNotFoundException if 'webapps' directory cannot be found on CLASSPATH.
+ * @throws FileNotFoundException if 'webapps' directory cannot be found
+ * on CLASSPATH or in the development location.
*/
protected String getWebAppsPath(String appName) throws FileNotFoundException {
- URL url = getClass().getClassLoader().getResource("webapps/" + appName);
- if (url == null)
- throw new FileNotFoundException("webapps/" + appName
- + " not found in CLASSPATH");
- String urlString = url.toString();
+ URL resourceUrl = null;
+ File webResourceDevLocation = new File("src/main/webapps", appName);
+ if (webResourceDevLocation.exists()) {
+ LOG.info("Web server is in development mode. Resources "
+ + "will be read from the source tree.");
+ try {
+ resourceUrl = webResourceDevLocation.getParentFile().toURI().toURL();
+ } catch (MalformedURLException e) {
+ throw new FileNotFoundException("Mailformed URL while finding the "
+ + "web resource dir:" + e.getMessage());
+ }
+ } else {
+ resourceUrl =
+ getClass().getClassLoader().getResource("webapps/" + appName);
+
+ if (resourceUrl == null) {
+ throw new FileNotFoundException("webapps/" + appName +
+ " not found in CLASSPATH");
+ }
+ }
+ String urlString = resourceUrl.toString();
return urlString.substring(0, urlString.lastIndexOf('/'));
}
@@ -1200,6 +1218,7 @@ private void bindForPortRange(ServerConnector listener, int startPort)
* @throws Exception
*/
void openListeners() throws Exception {
+ LOG.debug("opening listeners: {}", listeners);
for (ServerConnector listener : listeners) {
if (listener.getLocalPort() != -1 && listener.getLocalPort() != -2) {
// This listener is either started externally or has been bound or was
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java
index e7f4c8319e3..050c15bc61f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java
@@ -28,7 +28,7 @@
* BoundedRangeFileInputStream on top of the same FSDataInputStream and they
* would not interfere with each other.
*/
-class BoundedRangeFileInputStream extends InputStream {
+public class BoundedRangeFileInputStream extends InputStream {
private FSDataInputStream in;
private long pos;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
index f82f4df2e51..fa85ed77a1f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
@@ -43,7 +43,7 @@
/**
* Compression related stuff.
*/
-final class Compression {
+public final class Compression {
static final Logger LOG = LoggerFactory.getLogger(Compression.class);
/**
@@ -75,7 +75,7 @@ public void flush() throws IOException {
/**
* Compression algorithms.
*/
- enum Algorithm {
+ public enum Algorithm {
LZO(TFile.COMPRESSION_LZO) {
private transient boolean checked = false;
private static final String defaultClazz =
@@ -348,7 +348,7 @@ public String getName() {
}
}
- static Algorithm getCompressionAlgorithmByName(String compressName) {
+ public static Algorithm getCompressionAlgorithmByName(String compressName) {
Algorithm[] algos = Algorithm.class.getEnumConstants();
for (Algorithm a : algos) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java
index a26a02d5769..0a194a3ce60 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java
@@ -25,7 +25,7 @@
* A simplified BufferedOutputStream with borrowed buffer, and allow users to
* see how much data have been buffered.
*/
-class SimpleBufferedOutputStream extends FilterOutputStream {
+public class SimpleBufferedOutputStream extends FilterOutputStream {
protected byte buf[]; // the borrowed buffer
protected int count = 0; // bytes used in buffer.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java
index 3f17c9ab113..b4ebe861c1f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CpuTimeTracker.java
@@ -97,7 +97,9 @@ public long getCumulativeCpuTime() {
* @param newTime new sample time
*/
public void updateElapsedJiffies(BigInteger elapsedJiffies, long newTime) {
- cumulativeCpuTime = elapsedJiffies.multiply(jiffyLengthInMillis);
+ BigInteger newValue = elapsedJiffies.multiply(jiffyLengthInMillis);
+ cumulativeCpuTime = newValue.compareTo(cumulativeCpuTime) >= 0 ?
+ newValue : cumulativeCpuTime;
sampleTime = newTime;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 8c8507c961d..538df97fbc2 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -871,30 +871,6 @@
File space usage statistics refresh interval in msec.
-
- fs.s3n.buffer.dir
- ${hadoop.tmp.dir}/s3n
- Determines where on the local filesystem the s3n:// filesystem
- should store files before sending them to S3
- (or after retrieving them from S3).
-
-
-
-
- fs.s3n.maxRetries
- 4
- The maximum number of retries for reading or writing files to S3,
- before we signal failure to the application.
-
-
-
-
- fs.s3n.sleepTimeSeconds
- 10
- The number of seconds to sleep between each S3 retry.
-
-
-
fs.swift.impl
org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem
@@ -911,56 +887,6 @@
-
- fs.s3n.awsAccessKeyId
- AWS access key ID used by S3 native file system.
-
-
-
- fs.s3n.awsSecretAccessKey
- AWS secret key used by S3 native file system.
-
-
-
- fs.s3n.block.size
- 67108864
- Block size to use when reading files using the native S3
- filesystem (s3n: URIs).
-
-
-
- fs.s3n.multipart.uploads.enabled
- false
- Setting this property to true enables multiple uploads to
- native S3 filesystem. When uploading a file, it is split into blocks
- if the size is larger than fs.s3n.multipart.uploads.block.size.
-
-
-
-
- fs.s3n.multipart.uploads.block.size
- 67108864
- The block size for multipart uploads to native S3 filesystem.
- Default size is 64MB.
-
-
-
-
- fs.s3n.multipart.copy.block.size
- 5368709120
- The block size for multipart copy in native S3 filesystem.
- Default size is 5GB.
-
-
-
-
- fs.s3n.server-side-encryption-algorithm
-
- Specify a server-side encryption algorithm for S3.
- Unset by default, and the only other currently allowable value is AES256.
-
-
-
fs.s3a.access.key
AWS access key ID used by S3A file system. Omit for IAM role-based or provider-based authentication.
@@ -1234,22 +1160,12 @@
uploads to.
-
- fs.s3a.fast.upload
- false
-
- Use the incremental block-based fast upload mechanism with
- the buffering mechanism set in fs.s3a.fast.upload.buffer.
-
-
-
fs.s3a.fast.upload.buffer
disk
- The buffering mechanism to use when using S3A fast upload
- (fs.s3a.fast.upload=true). Values: disk, array, bytebuffer.
- This configuration option has no effect if fs.s3a.fast.upload is false.
+ The buffering mechanism to for data being written.
+ Values: disk, array, bytebuffer.
"disk" will use the directories listed in fs.s3a.buffer.dir as
the location(s) to save data prior to being uploaded.
@@ -1428,20 +1344,16 @@
The implementation class of the S3A AbstractFileSystem.
-
-
- fs.ozfs.impl
- org.apache.hadoop.fs.ozone.OzoneFileSystem
- The implementation class of the Ozone FileSystem.
-
+
+ fs.s3a.list.version
+ 2
+
+ Select which version of the S3 SDK's List Objects API to use. Currently
+ support 2 (default) and 1 (older API).
+
+
-
- fs.AbstractFileSystem.ozfs.impl
- org.apache.hadoop.fs.ozone.OzFs
- The implementation class of the OzFs AbstractFileSystem.
-
-
-
+
fs.wasb.impl
org.apache.hadoop.fs.azure.NativeAzureFileSystem
@@ -1547,7 +1459,21 @@
-
+
+
+ fs.ozfs.impl
+ org.apache.hadoop.fs.ozone.OzoneFileSystem
+ The implementation class of the Ozone FileSystem.
+
+
+
+ fs.AbstractFileSystem.ozfs.impl
+ org.apache.hadoop.fs.ozone.OzFs
+ The implementation class of the OzFs AbstractFileSystem.
+
+
+
+
ipc.client.idlethreshold
@@ -1807,42 +1733,6 @@
Replication factor
-
-
-
- s3native.stream-buffer-size
- 4096
- The size of buffer to stream files.
- The size of this buffer should probably be a multiple of hardware
- page size (4096 on Intel x86), and it determines how much data is
- buffered during read and write operations.
-
-
-
- s3native.bytes-per-checksum
- 512
- The number of bytes per checksum. Must not be larger than
- s3native.stream-buffer-size
-
-
-
- s3native.client-write-packet-size
- 65536
- Packet size for clients to write
-
-
-
- s3native.blocksize
- 67108864
- Block size
-
-
-
- s3native.replication
- 3
- Replication factor
-
-
ftp.stream-buffer-size
@@ -1977,38 +1867,38 @@
- Enable/disable the cross-origin (CORS) filter.
hadoop.http.cross-origin.enabled
false
+ Enable/disable the cross-origin (CORS) filter.
+ hadoop.http.cross-origin.allowed-origins
+ *
Comma separated list of origins that are allowed for web
services needing cross-origin (CORS) support. Wildcards (*) and patterns
allowed
- hadoop.http.cross-origin.allowed-origins
- *
- Comma separated list of methods that are allowed for web
- services needing cross-origin (CORS) support.
hadoop.http.cross-origin.allowed-methods
GET,POST,HEAD
+ Comma separated list of methods that are allowed for web
+ services needing cross-origin (CORS) support.
- Comma separated list of headers that are allowed for web
- services needing cross-origin (CORS) support.
hadoop.http.cross-origin.allowed-headers
X-Requested-With,Content-Type,Accept,Origin
+ Comma separated list of headers that are allowed for web
+ services needing cross-origin (CORS) support.
- The number of seconds a pre-flighted request can be cached
- for web services needing cross-origin (CORS) support.
hadoop.http.cross-origin.max-age
1800
+ The number of seconds a pre-flighted request can be cached
+ for web services needing cross-origin (CORS) support.
@@ -2099,13 +1989,13 @@
+ hadoop.http.staticuser.user
+ dr.who
The user name to filter as, on static web filters
while rendering content. An example use is the HDFS
web UI (user to be used for browsing files).
- hadoop.http.staticuser.user
- dr.who
@@ -2453,34 +2343,6 @@
-
- hadoop.security.kms.client.failover.sleep.base.millis
- 100
-
- Expert only. The time to wait, in milliseconds, between failover
- attempts increases exponentially as a function of the number of
- attempts made so far, with a random factor of +/- 50%. This option
- specifies the base value used in the failover calculation. The
- first failover will retry immediately. The 2nd failover attempt
- will delay at least hadoop.security.client.failover.sleep.base.millis
- milliseconds. And so on.
-
-
-
-
- hadoop.security.kms.client.failover.sleep.max.millis
- 2000
-
- Expert only. The time to wait, in milliseconds, between failover
- attempts increases exponentially as a function of the number of
- attempts made so far, with a random factor of +/- 50%. This option
- specifies the maximum value to wait between failovers.
- Specifically, the time between two failover attempts will not
- exceed +/- 50% of hadoop.security.client.failover.sleep.max.millis
- milliseconds.
-
-
-
ipc.server.max.connections
0
@@ -2496,6 +2358,8 @@
+ hadoop.registry.rm.enabled
+ false
Is the registry enabled in the YARN Resource Manager?
@@ -2507,50 +2371,50 @@
If false, the paths must be created by other means,
and no automatic cleanup of service records will take place.
- hadoop.registry.rm.enabled
- false
+ hadoop.registry.zk.root
+ /registry
The root zookeeper node for the registry
- hadoop.registry.zk.root
- /registry
+ hadoop.registry.zk.session.timeout.ms
+ 60000
Zookeeper session timeout in milliseconds
- hadoop.registry.zk.session.timeout.ms
- 60000
+ hadoop.registry.zk.connection.timeout.ms
+ 15000
Zookeeper connection timeout in milliseconds
- hadoop.registry.zk.connection.timeout.ms
- 15000
+ hadoop.registry.zk.retry.times
+ 5
Zookeeper connection retry count before failing
- hadoop.registry.zk.retry.times
- 5
-
-
hadoop.registry.zk.retry.interval.ms
1000
+
+
+ hadoop.registry.zk.retry.ceiling.ms
+ 60000
Zookeeper retry limit in milliseconds, during
exponential backoff.
@@ -2560,20 +2424,20 @@
with the backoff policy, result in a long retry
period
- hadoop.registry.zk.retry.ceiling.ms
- 60000
+ hadoop.registry.zk.quorum
+ localhost:2181
List of hostname:port pairs defining the
zookeeper quorum binding for the registry
- hadoop.registry.zk.quorum
- localhost:2181
+ hadoop.registry.secure
+ false
Key to set if the registry is secure. Turning it on
changes the permissions policy from "open access"
@@ -2581,11 +2445,11 @@
a user adding one or more auth key pairs down their
own tree.
- hadoop.registry.secure
- false
+ hadoop.registry.system.acls
+ sasl:yarn@, sasl:mapred@, sasl:hdfs@
A comma separated list of Zookeeper ACL identifiers with
system access to the registry in a secure cluster.
@@ -2595,11 +2459,11 @@
If there is an "@" at the end of a SASL entry it
instructs the registry client to append the default kerberos domain.
- hadoop.registry.system.acls
- sasl:yarn@, sasl:mapred@, sasl:hdfs@
+ hadoop.registry.kerberos.realm
+
The kerberos realm: used to set the realm of
system principals which do not declare their realm,
@@ -2611,26 +2475,24 @@
If neither are known and the realm is needed, then the registry
service/client will fail.
- hadoop.registry.kerberos.realm
-
+ hadoop.registry.jaas.context
+ Client
Key to define the JAAS context. Used in secure
mode
- hadoop.registry.jaas.context
- Client
+ hadoop.shell.missing.defaultFs.warning
+ false
Enable hdfs shell commands to display warnings if (fs.defaultFS) property
is not set.
- hadoop.shell.missing.defaultFs.warning
- false
@@ -2660,13 +2522,13 @@
+ hadoop.http.logs.enabled
+ true
Enable the "/logs" endpoint on all Hadoop daemons, which serves local
logs, but may be considered a security risk due to it listing the contents
of a directory.
- hadoop.http.logs.enabled
- true
@@ -2721,8 +2583,7 @@
fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url.
The RefreshToken type requires property fs.adl.oauth2.client.id and
fs.adl.oauth2.refresh.token.
- The MSI type requires properties fs.adl.oauth2.msi.port and
- fs.adl.oauth2.msi.tenantguid.
+ The MSI type reads optional property fs.adl.oauth2.msi.port, if specified.
The DeviceCode type requires property
fs.adl.oauth2.devicecode.clientapp.id.
The Custom type requires property fs.adl.oauth2.access.token.provider.
@@ -2766,17 +2627,8 @@
The localhost port for the MSI token service. This is the port specified
- when creating the Azure VM.
- Used by MSI token provider.
-
-
-
-
- fs.adl.oauth2.msi.tenantguid
-
-
- The tenant guid for the Azure AAD tenant under which the azure data lake
- store account is created.
+ when creating the Azure VM. The default, if this setting is not specified,
+ is 50342.
Used by MSI token provider.
@@ -2841,48 +2693,48 @@
- Host:Port of the ZooKeeper server to be used.
-
hadoop.zk.address
+ Host:Port of the ZooKeeper server to be used.
+
- Number of tries to connect to ZooKeeper.
hadoop.zk.num-retries
1000
+ Number of tries to connect to ZooKeeper.
- Retry interval in milliseconds when connecting to ZooKeeper.
-
hadoop.zk.retry-interval-ms
1000
+ Retry interval in milliseconds when connecting to ZooKeeper.
+
+ hadoop.zk.timeout-ms
+ 10000
ZooKeeper session timeout in milliseconds. Session expiration
is managed by the ZooKeeper cluster itself, not by the client. This value is
used by the cluster to determine when the client's session expires.
Expirations happens when the cluster does not hear from the client within
the specified session timeout period (i.e. no heartbeat).
- hadoop.zk.timeout-ms
- 10000
- ACL's to be used for ZooKeeper znodes.
hadoop.zk.acl
world:anyone:rwcda
+ ACL's to be used for ZooKeeper znodes.
+ hadoop.zk.auth
Specify the auths to be used for the ACL's specified in hadoop.zk.acl.
This takes a comma-separated list of authentication mechanisms, each of the
form 'scheme:auth' (the same syntax used for the 'addAuth' command in
the ZK CLI).
- hadoop.zk.auth
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 05b18b59298..4fa8c027992 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -20,109 +20,276 @@ Apache Hadoop Compatibility
Purpose
-------
-This document captures the compatibility goals of the Apache Hadoop project. The different types of compatibility between Hadoop releases that affects Hadoop developers, downstream projects, and end-users are enumerated. For each type of compatibility we:
+This document captures the compatibility goals of the Apache Hadoop project.
+The different types of compatibility between Hadoop releases that affect
+Hadoop developers, downstream projects, and end-users are enumerated. For each
+type of compatibility this document will:
* describe the impact on downstream projects or end-users
* where applicable, call out the policy adopted by the Hadoop developers when incompatible changes are permitted.
+All Hadoop interfaces are classified according to the intended audience and
+stability in order to maintain compatibility with previous releases. See the
+[Hadoop Interface Taxonomy](./InterfaceClassification.html) for details
+about the classifications.
+
+### Target Audience
+
+This document is intended for consumption by the Hadoop developer community.
+This document describes the lens through which changes to the Hadoop project
+should be viewed. In order for end users and third party developers to have
+confidence about cross-release compatibility, the developer community must
+ensure that development efforts adhere to these policies. It is the
+responsibility of the project committers to validate that all changes either
+maintain compatibility or are explicitly marked as incompatible.
+
+Within a component Hadoop developers are free to use Private and Limited Private
+APIs, but when using components from a different module Hadoop developers
+should follow the same guidelines as third-party developers: do not
+use Private or Limited Private (unless explicitly allowed) interfaces and
+prefer instead Stable interfaces to Evolving or Unstable interfaces where
+possible. Where not possible, the preferred solution is to expand the audience
+of the API rather than introducing or perpetuating an exception to these
+compatibility guidelines. When working within a Maven module Hadoop developers
+should observe where possible the same level of restraint with regard to
+using components located in other Maven modules.
+
+Above all, Hadoop developers must be mindful of the impact of their changes.
+Stable interfaces must not change between major releases. Evolving interfaces
+must not change between minor releases. New classes and components must be
+labeled appropriately for audience and stability. See the
+[Hadoop Interface Taxonomy](./InterfaceClassification.html) for details about
+when the various labels are appropriate. As a general rule, all new interfaces
+and APIs should have the most limited labels (e.g. Private Unstable) that will
+not inhibit the intent of the interface or API.
+
+### Notational Conventions
+
+The key words "MUST" "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
+"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as
+described in [RFC 2119](http://tools.ietf.org/html/rfc2119).
+
+Deprecation
+-----------
+
+The Java API provides a @Deprecated annotation to mark an API element as
+flagged for removal. The standard meaning of the annotation is that the
+API element should not be used and may be removed in a later version.
+
+In all cases removing an element from an API is an incompatible
+change. In the case of [Stable](./InterfaceClassification.html#Stable) APIs,
+the change cannot be made between minor releases within the same major
+version. In addition, to allow consumers of the API time to adapt to the change,
+the API element to be removed should be marked as deprecated for a full major
+release before it is removed. For example, if a method is marked as deprecated
+in Hadoop 2.8, it cannot be removed until Hadoop 4.0.
+
+### Policy
+
+[Stable](./InterfaceClassification.html#Stable) API elements MUST NOT be removed
+until they have been marked as deprecated (through the @Deprecated annotation or
+other appropriate documentation) for a full major release. In the case that an
+API element was introduced as deprecated (to indicate that it is a temporary
+measure that is intended to be removed) the API element MAY be removed in the
+following major release. When modifying a
+[Stable](./InterfaceClassification.html#Stable) API, developers SHOULD prefer
+introducing a new method or endpoint and deprecating the existing one to making
+incompatible changes to the method or endpoint.
+
Compatibility types
-------------------
### Java API
-Hadoop interfaces and classes are annotated to describe the intended audience and stability in order to maintain compatibility with previous releases. See [Hadoop Interface Classification](./InterfaceClassification.html) for details.
+Developers SHOULD annotate all Hadoop interfaces and classes with the
+@InterfaceAudience and @InterfaceStability annotations to describe the
+intended audience and stability. Annotations may be at the package, class, or
+member variable or method level. Member variable and method annotations SHALL
+override class annotations, and class annotations SHALL override package
+annotations. A package, class, or member variable or method that is not
+annotated SHALL be interpreted as implicitly
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable).
-* InterfaceAudience: captures the intended audience, possible values are Public (for end users and external projects), LimitedPrivate (for other Hadoop components, and closely related projects like YARN, MapReduce, HBase etc.), and Private (for intra component use).
-* InterfaceStability: describes what types of interface changes are permitted. Possible values are Stable, Evolving, Unstable, and Deprecated.
+* @InterfaceAudience captures the intended audience. Possible values are
+[Public](./InterfaceClassification.html#Public) (for end users and external
+projects), Limited[Private](./InterfaceClassification.html#Private) (for other
+Hadoop components, and closely related projects like YARN, MapReduce, HBase
+etc.), and [Private](./InterfaceClassification.html#Private)
+(for intra component use).
+* @InterfaceStability describes what types of interface changes are permitted. Possible values are [Stable](./InterfaceClassification.html#Stable), [Evolving](./InterfaceClassification.html#Evolving), and [Unstable](./InterfaceClassification.html#Unstable).
+* @Deprecated notes that the package, class, or member variable or method could potentially be removed in the future and should not be used.
#### Use Cases
-* Public-Stable API compatibility is required to ensure end-user programs and downstream projects continue to work without modification.
-* LimitedPrivate-Stable API compatibility is required to allow upgrade of individual components across minor releases.
-* Private-Stable API compatibility is required for rolling upgrades.
+* [Public](./InterfaceClassification.html#Public)-[Stable](./InterfaceClassification.html#Stable) API compatibility is required to ensure end-user programs and downstream projects continue to work without modification.
+* [Public](./InterfaceClassification.html#Public)-[Evolving](./InterfaceClassification.html#Evolving) API compatibility is useful to make functionality available for consumption before it is fully baked.
+* Limited Private-[Stable](./InterfaceClassification.html#Stable) API compatibility is required to allow upgrade of individual components across minor releases.
+* [Private](./InterfaceClassification.html#Private)-[Stable](./InterfaceClassification.html#Stable) API compatibility is required for rolling upgrades.
+* [Private](./InterfaceClassification.html#Private)-[Unstable](./InterfaceClassification.html#Unstable) API compatibility allows internal components to evolve rapidly without concern for downstream consumers, and is how most interfaces should be labeled.
#### Policy
-* Public-Stable APIs must be deprecated for at least one major release prior to their removal in a major release.
-* LimitedPrivate-Stable APIs can change across major releases, but not within a major release.
-* Private-Stable APIs can change across major releases, but not within a major release.
-* Classes not annotated are implicitly "Private". Class members not annotated inherit the annotations of the enclosing class.
-* Note: APIs generated from the proto files need to be compatible for rolling-upgrades. See the section on wire-compatibility for more details. The compatibility policies for APIs and wire-communication need to go hand-in-hand to address this.
+The compatibility policy SHALL be determined by the relevant package, class, or
+member variable or method annotations.
-### Semantic compatibility
+Note: APIs generated from the proto files MUST be compatible for rolling
+upgrades. See the section on wire protocol compatibility for more details. The
+compatibility policies for APIs and wire protocols must therefore go hand
+in hand.
-Apache Hadoop strives to ensure that the behavior of APIs remains consistent over versions, though changes for correctness may result in changes in behavior. Tests and javadocs specify the API's behavior. The community is in the process of specifying some APIs more rigorously, and enhancing test suites to verify compliance with the specification, effectively creating a formal specification for the subset of behaviors that can be easily tested.
+#### Semantic compatibility
+
+Apache Hadoop strives to ensure that the behavior of APIs remains consistent
+over versions, though changes for correctness may result in changes in
+behavior. API behavior SHALL be specified by the JavaDoc API documentation
+where present and complete. When JavaDoc API documentation is not available,
+behavior SHALL be specified by the behavior expected by the related unit tests.
+In cases with no JavaDoc API documentation or unit test coverage, the expected
+behavior is presumed to be obvious and SHOULD be assumed to be the minimum
+functionality implied by the interface naming. The community is in the process
+of specifying some APIs more rigorously and enhancing test suites to verify
+compliance with the specification, effectively creating a formal specification
+for the subset of behaviors that can be easily tested.
+
+The behavior of any API MAY be changed to fix incorrect behavior according to
+the stability of the API, with such a change to be accompanied by updating
+existing documentation and tests and/or adding new documentation or tests.
+
+#### Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
+
+Apache Hadoop revisions SHOUD retain binary compatability such that end-user
+applications continue to work without any modifications. Minor Apache Hadoop
+revisions within the same major revision MUST retain compatibility such that
+existing MapReduce applications (e.g. end-user applications and projects such
+as Apache Pig, Apache Hive, et al), existing YARN applications (e.g.
+end-user applications and projects such as Apache Spark, Apache Tez et al),
+and applications that accesses HDFS directly (e.g. end-user applications and
+projects such as Apache HBase, Apache Flume, et al) work unmodified and without
+recompilation when used with any Apache Hadoop cluster within the same major
+release as the original build target.
+
+For MapReduce applications in particular, i.e. applications using the
+org.apache.hadoop.mapred and/or org.apache.hadoop.mapreduce APIs, the developer
+community SHALL support binary compatibility across major releases. The
+MapReduce APIs SHALL be supported compatibly across major releases. See
+[Compatibility for MapReduce applications between hadoop-1.x and hadoop-2.x](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html) for more details.
+
+Some applications may be affected by changes to disk layouts or other internal
+changes. See the sections that follow for policies on how incompatible
+changes to non-API interfaces are handled.
+
+### Native Dependencies
+
+Hadoop includes several native components, including compression, the
+container executor binary, and various native integrations. These native
+components introduce a set of native dependencies for Hadoop, both at compile
+time and at runtime, such as cmake, gcc, zlib, etc. This set of native
+dependencies is part of the Hadoop ABI.
#### Policy
-The behavior of API may be changed to fix incorrect behavior, such a change to be accompanied by updating existing buggy tests or adding tests in cases there were none prior to the change.
+The minimum required versions of the native components on which Hadoop depends
+at compile time and/or runtime SHALL be considered
+[Stable](./InterfaceClassification.html#Stable). Changes to the minimum
+required versions MUST NOT increase between minor releases within a major
+version.
-### Wire compatibility
+### Wire Protocols
-Wire compatibility concerns data being transmitted over the wire between Hadoop processes. Hadoop uses Protocol Buffers for most RPC communication. Preserving compatibility requires prohibiting modification as described below. Non-RPC communication should be considered as well, for example using HTTP to transfer an HDFS image as part of snapshotting or transferring MapTask output. The potential communications can be categorized as follows:
+Wire compatibility concerns data being transmitted "over the wire" between
+Hadoop processes. Hadoop uses
+[Protocol Buffers](https://developers.google.com/protocol-buffers/) for most
+RPC communication. Preserving compatibility requires prohibiting modification
+as described below. Non-RPC communication should be considered as well, for
+example using HTTP to transfer an HDFS image as part of snapshotting or
+transferring MapReduce map task output. The communications can be categorized as
+follows:
* Client-Server: communication between Hadoop clients and servers (e.g., the HDFS client to NameNode protocol, or the YARN client to ResourceManager protocol).
-* Client-Server (Admin): It is worth distinguishing a subset of the Client-Server protocols used solely by administrative commands (e.g., the HAAdmin protocol) as these protocols only impact administrators who can tolerate changes that end users (which use general Client-Server protocols) can not.
+* Client-Server (Admin): It is worth distinguishing a subset of the Client-Server protocols used solely by administrative commands (e.g., the HAAdmin protocol) as these protocols only impact administrators who can tolerate changes that end users (which use general Client-Server protocols) cannot.
* Server-Server: communication between servers (e.g., the protocol between the DataNode and NameNode, or NodeManager and ResourceManager)
-#### Use Cases
+#### Protocol Dependencies
-* Client-Server compatibility is required to allow users to continue using the old clients even after upgrading the server (cluster) to a later version (or vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 cluster.
-* Client-Server compatibility is also required to allow users to upgrade the client before upgrading the server (cluster). For example, a Hadoop 2.4.0 client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side bug fixes ahead of full cluster upgrades. Note that new cluster features invoked by new client APIs or shell commands will not be usable. YARN applications that attempt to use new APIs (including new fields in data structures) that have not yet been deployed to the cluster can expect link exceptions.
-* Client-Server compatibility is also required to allow upgrading individual components without upgrading others. For example, upgrade HDFS from version 2.1.0 to 2.2.0 without upgrading MapReduce.
-* Server-Server compatibility is required to allow mixed versions within an active cluster so the cluster may be upgraded without downtime in a rolling fashion.
+The components of Apache Hadoop may have dependencies that include their own
+protocols, such as Zookeeper, S3, Kerberos, etc. These protocol dependencies
+SHALL be treated as internal protocols and governed by the same policy.
+
+#### Transports
+
+In addition to compatibility of the protocols themselves, maintaining
+cross-version communications requires that the transports supported also be
+stable. The most likely source of transport changes stems from secure
+transports, such as SSL. Upgrading a service from SSLv2 to SSLv3 may break
+existing SSLv2 clients. The minimum supported major version of any transports
+MUST not increase across minor releases within a major version.
+
+Service ports are considered as part of the transport mechanism. Fixed
+service port numbers MUST be kept consistent to prevent breaking clients.
#### Policy
-* Both Client-Server and Server-Server compatibility is preserved within a major release. (Different policies for different categories are yet to be considered.)
-* Compatibility can be broken only at a major release, though breaking compatibility even at major releases has grave consequences and should be discussed in the Hadoop community.
-* Hadoop protocols are defined in .proto (ProtocolBuffers) files. Client-Server protocols and Server-Server protocol .proto files are marked as stable. When a .proto file is marked as stable it means that changes should be made in a compatible fashion as described below:
- * The following changes are compatible and are allowed at any time:
- * Add an optional field, with the expectation that the code deals with the field missing due to communication with an older version of the code.
- * Add a new rpc/method to the service
- * Add a new optional request to a Message
- * Rename a field
- * Rename a .proto file
- * Change .proto annotations that effect code generation (e.g. name of java package)
- * The following changes are incompatible but can be considered only at a major release
- * Change the rpc/method name
- * Change the rpc/method parameter type or return type
- * Remove an rpc/method
- * Change the service name
- * Change the name of a Message
- * Modify a field type in an incompatible way (as defined recursively)
- * Change an optional field to required
- * Add or delete a required field
- * Delete an optional field as long as the optional field has reasonable defaults to allow deletions
- * The following changes are incompatible and hence never allowed
- * Change a field id
- * Reuse an old field that was previously deleted.
- * Field numbers are cheap and changing and reusing is not a good idea.
+Hadoop wire protocols are defined in .proto (ProtocolBuffers) files.
+Client-Server and Server-Server protocols SHALL be classified according to the
+audience and stability classifications noted in their .proto files. In cases
+where no classifications are present, the protocols SHOULD be assumed to be
+[Private](./InterfaceClassification.html#Private) and
+[Stable](./InterfaceClassification.html#Stable).
-### Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
+The following changes to a .proto file SHALL be considered compatible:
-As Apache Hadoop revisions are upgraded end-users reasonably expect that their applications should continue to work without any modifications. This is fulfilled as a result of supporting API compatibility, Semantic compatibility and Wire compatibility.
+* Add an optional field, with the expectation that the code deals with the field missing due to communication with an older version of the code
+* Add a new rpc/method to the service
+* Add a new optional request to a Message
+* Rename a field
+* Rename a .proto file
+* Change .proto annotations that effect code generation (e.g. name of java package)
-However, Apache Hadoop is a very complex, distributed system and services a very wide variety of use-cases. In particular, Apache Hadoop MapReduce is a very, very wide API; in the sense that end-users may make wide-ranging assumptions such as layout of the local disk when their map/reduce tasks are executing, environment variables for their tasks etc. In such cases, it becomes very hard to fully specify, and support, absolute compatibility.
+The following changes to a .proto file SHALL be considered incompatible:
-#### Use cases
+* Change an rpc/method name
+* Change an rpc/method parameter type or return type
+* Remove an rpc/method
+* Change the service name
+* Change the name of a Message
+* Modify a field type in an incompatible way (as defined recursively)
+* Change an optional field to required
+* Add or delete a required field
+* Delete an optional field as long as the optional field has reasonable defaults to allow deletions
-* Existing MapReduce applications, including jars of existing packaged end-user applications and projects such as Apache Pig, Apache Hive, Cascading etc. should work unmodified when pointed to an upgraded Apache Hadoop cluster within a major release.
-* Existing YARN applications, including jars of existing packaged end-user applications and projects such as Apache Tez etc. should work unmodified when pointed to an upgraded Apache Hadoop cluster within a major release.
-* Existing applications which transfer data in/out of HDFS, including jars of existing packaged end-user applications and frameworks such as Apache Flume, should work unmodified when pointed to an upgraded Apache Hadoop cluster within a major release.
+The following changes to a .proto file SHALL be considered incompatible:
-#### Policy
+* Change a field id
+* Reuse an old field that was previously deleted.
-* Existing MapReduce, YARN & HDFS applications and frameworks should work unmodified within a major release i.e. Apache Hadoop ABI is supported.
-* A very minor fraction of applications maybe affected by changes to disk layouts etc., the developer community will strive to minimize these changes and will not make them within a minor version. In more egregious cases, we will consider strongly reverting these breaking changes and invalidating offending releases if necessary.
-* In particular for MapReduce applications, the developer community will try our best to support providing binary compatibility across major releases e.g. applications using org.apache.hadoop.mapred.
-* APIs are supported compatibly across hadoop-1.x and hadoop-2.x. See [Compatibility for MapReduce applications between hadoop-1.x and hadoop-2.x](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html) for more details.
+Hadoop wire protocols that are not defined via .proto files SHOULD be considered
+to be [Private](./InterfaceClassification.html#Private) and
+[Stable](./InterfaceClassification.html#Stable).
+
+In addition to the limitations imposed by being
+[Stable](./InterfaceClassification.html#Stable), Hadoop's wire protocols
+MUST also be forward compatible across minor releases within a major version
+according to the following:
+
+* Client-Server compatibility MUST be maintained so as to allow users to continue using older clients even after upgrading the server (cluster) to a later version (or vice versa). For example, a Hadoop 2.1.0 client talking to a Hadoop 2.3.0 cluster.
+* Client-Server compatibility MUST be maintained so as to allow users to upgrade the client before upgrading the server (cluster). For example, a Hadoop 2.4.0 client talking to a Hadoop 2.3.0 cluster. This allows deployment of client-side bug fixes ahead of full cluster upgrades. Note that new cluster features invoked by new client APIs or shell commands will not be usable. YARN applications that attempt to use new APIs (including new fields in data structures) that have not yet been deployed to the cluster can expect link exceptions.
+* Client-Server compatibility MUST be maintained so as to allow upgrading individual components without upgrading others. For example, upgrade HDFS from version 2.1.0 to 2.2.0 without upgrading MapReduce.
+* Server-Server compatibility MUST be maintained so as to allow mixed versions within an active cluster so the cluster may be upgraded without downtime in a rolling fashion.
+
+New transport mechanisms MUST only be introduced with minor or major version
+changes. Existing transport mechanisms MUST continue to be supported across
+minor versions within a major version. Service port numbers MUST remain
+consistent across minor version numbers within a major version.
### REST APIs
-REST API compatibility corresponds to both the requests (URLs) and responses to each request (content, which may contain other URLs). Hadoop REST APIs are specifically meant for stable use by clients across releases, even major ones. The following are the exposed REST APIs:
+REST API compatibility applies to the REST endpoints (URLs) and response data
+format. Hadoop REST APIs are specifically meant for stable use by clients across
+releases, even major ones. The following is a non-exhaustive list of the
+exposed REST APIs:
-* [WebHDFS](../hadoop-hdfs/WebHDFS.html) - Stable
+* [WebHDFS](../hadoop-hdfs/WebHDFS.html)
* [ResourceManager](../../hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html)
* [NodeManager](../../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html)
* [MR Application Master](../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredAppMasterRest.html)
@@ -130,134 +297,390 @@ REST API compatibility corresponds to both the requests (URLs) and responses to
* [Timeline Server v1 REST API](../../hadoop-yarn/hadoop-yarn-site/TimelineServer.html)
* [Timeline Service v2 REST API](../../hadoop-yarn/hadoop-yarn-site/TimelineServiceV2.html)
+Each API has an API-specific version number. Any incompatible changes MUST
+increment the API version number.
+
#### Policy
-The APIs annotated stable in the text above preserve compatibility across at least one major release, and maybe deprecated by a newer version of the REST API in a major release.
+The Hadoop REST APIs SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving). With respect to API version
+numbers, the Hadoop REST APIs SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable), i.e. no incompatible changes
+are allowed to within an API version number.
+
+### Log Output
+
+The Hadoop daemons and CLIs produce log output via Log4j that is intended to
+aid administrators and developers in understanding and troubleshooting cluster
+behavior. Log messages are intended for human consumption, though automation
+use cases are also supported.
+
+#### Policy
+
+All log output SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
+
+### Audit Log Output
+
+Several components have audit logging systems that record system information in
+a machine readable format. Incompatible changes to that data format may break
+existing automation utilities. For the audit log, an incompatible change is any
+change that changes the format such that existing parsers no longer can parse
+the logs.
+
+#### Policy
+
+All audit log output SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). Any change to the
+data format SHALL be considered an incompatible change.
### Metrics/JMX
-While the Metrics API compatibility is governed by Java API compatibility, the actual metrics exposed by Hadoop need to be compatible for users to be able to automate using them (scripts etc.). Adding additional metrics is compatible. Modifying (e.g. changing the unit or measurement) or removing existing metrics breaks compatibility. Similarly, changes to JMX MBean object names also break compatibility.
+While the Metrics API compatibility is governed by Java API compatibility, the
+Metrics data format exposed by Hadoop MUST be maintained as compatible for
+consumers of the data, e.g. for automation tasks.
#### Policy
-Metrics should preserve compatibility within the major release.
+The data format exposed via Metrics SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable).
### File formats & Metadata
-User and system level data (including metadata) is stored in files of different formats. Changes to the metadata or the file formats used to store data/metadata can lead to incompatibilities between versions.
+User and system level data (including metadata) is stored in files of various
+formats. Changes to the metadata or the file formats used to store
+data/metadata can lead to incompatibilities between versions. Each class of file
+format is addressed below.
#### User-level file formats
-Changes to formats that end-users use to store their data can prevent them from accessing the data in later releases, and hence it is highly important to keep those file-formats compatible. One can always add a "new" format improving upon an existing format. Examples of these formats include har, war, SequenceFileFormat etc.
+Changes to formats that end users use to store their data can prevent them from
+accessing the data in later releases, and hence are important to be compatible.
+Examples of these formats include har, war, SequenceFileFormat, etc.
##### Policy
-* Non-forward-compatible user-file format changes are restricted to major releases. When user-file formats change, new releases are expected to read existing formats, but may write data in formats incompatible with prior releases. Also, the community shall prefer to create a new format that programs must opt in to instead of making incompatible changes to existing formats.
+User-level file formats SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). User-lever file
+format changes SHOULD be made forward compatible across major releases and MUST
+be made forward compatible within a major release. The developer community
+SHOULD prefer the creation of a new derivative file format to making
+incompatible changes to an existing file format. Such new file formats MUST be
+created as opt-in, meaning that users must be able to continue using the
+existing compatible format until and unless they explicitly opt in to using
+the new file format.
-#### System-internal file formats
+#### System-internal data schemas
-Hadoop internal data is also stored in files and again changing these formats can lead to incompatibilities. While such changes are not as devastating as the user-level file formats, a policy on when the compatibility can be broken is important.
+Hadoop internal data may also be stored in files or other data stores. Changing
+the schemas of these data stores can lead to incompatibilities.
##### MapReduce
MapReduce uses formats like I-File to store MapReduce-specific data.
-##### Policy
+###### Policy
-MapReduce-internal formats like IFile maintain compatibility within a major release. Changes to these formats can cause in-flight jobs to fail and hence we should ensure newer clients can fetch shuffle-data from old servers in a compatible manner.
+All MapReduce-internal file formats, such as I-File format or the job history
+server's jhist file format, SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Stable](./InterfaceClassification.html#Stable).
##### HDFS Metadata
-HDFS persists metadata (the image and edit logs) in a particular format. Incompatible changes to either the format or the metadata prevent subsequent releases from reading older metadata. Such incompatible changes might require an HDFS "upgrade" to convert the metadata to make it accessible. Some changes can require more than one such "upgrades".
+HDFS persists metadata (the image and edit logs) in a private file format.
+Incompatible changes to either the format or the metadata prevent subsequent
+releases from reading older metadata. Incompatible changes MUST include a
+process by which existing metadata may be upgraded. Changes SHALL be
+allowed to require more than one upgrade. Incompatible changes MUST result in
+the metadata version number being incremented.
-Depending on the degree of incompatibility in the changes, the following potential scenarios can arise:
+Depending on the degree of incompatibility in the changes, the following
+potential scenarios can arise:
* Automatic: The image upgrades automatically, no need for an explicit "upgrade".
* Direct: The image is upgradable, but might require one explicit release "upgrade".
* Indirect: The image is upgradable, but might require upgrading to intermediate release(s) first.
* Not upgradeable: The image is not upgradeable.
-##### Policy
+HDFS data nodes store data in a private directory structure. The schema of that
+directory structure must remain stable to retain compatibility.
-* A release upgrade must allow a cluster to roll-back to the older version and its older disk format. The rollback needs to restore the original data, but not required to restore the updated data.
-* HDFS metadata changes must be upgradeable via any of the upgrade paths - automatic, direct or indirect.
-* More detailed policies based on the kind of upgrade are yet to be considered.
+###### Policy
+
+The HDFS metadata format SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving). Incompatible
+changes MUST include a process by which existing metada may be upgraded. The
+upgrade process MUST allow the cluster metadata to be rolled back to the older
+version and its older disk format. The rollback MUST restore the original data
+but is not REQUIRED to restore the updated data. Any incompatible change
+to the format MUST result in the major version number of the schema being
+incremented.
+
+The data node directory format SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving).
+
+##### AWS S3A Guard Metadata
+
+For each operation in the Hadoop S3 client (s3a) that reads or modifies
+file metadata, a shadow copy of that file metadata is stored in a separate
+metadata store, which offers HDFS-like consistency for the metadata, and may
+also provide faster lookups for things like file status or directory listings.
+S3A guard tables are created with a version marker which indicates
+compatibility.
+
+###### Policy
+
+The S3A guard metadata schema SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable). Any incompatible change
+to the schema MUST result in the version number of the schema being incremented.
+
+##### YARN Resource Manager State Store
+
+The YARN resource manager stores information about the cluster state in an
+external state store for use in fail over and recovery. If the schema used for
+the state store data does not remain compatible, the resource manager will not
+be able to recover its state and will fail to start. The state store data
+schema includes a version number that indicates compatibility.
+
+###### Policy
+
+The YARN resource manager state store data schema SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving). Any incompatible change
+to the schema MUST result in the major version number of the schema being
+incremented. Any compatible change to the schema MUST result in the minor
+version number being incremented.
+
+##### YARN Node Manager State Store
+
+The YARN node manager stores information about the node state in an
+external state store for use in recovery. If the schema used for the state
+store data does not remain compatible, the node manager will not
+be able to recover its state and will fail to start. The state store data
+schema includes a version number that indicates compatibility.
+
+###### Policy
+
+The YARN node manager state store data schema SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving). Any incompatible change
+to the schema MUST result in the major version number of the schema being
+incremented. Any compatible change to the schema MUST result in the minor
+version number being incremented.
+
+##### YARN Federation State Store
+
+The YARN resource manager federation service stores information about the
+federated clusters, running applications, and routing policies in an
+external state store for use in replication and recovery. If the schema used
+for the state store data does not remain compatible, the federation service
+will fail to initialize. The state store data schema includes a version number
+that indicates compatibility.
+
+###### Policy
+
+The YARN federation service state store data schema SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Evolving](./InterfaceClassification.html#Evolving). Any incompatible change
+to the schema MUST result in the major version number of the schema being
+incremented. Any compatible change to the schema MUST result in the minor
+version number being incremented.
### Command Line Interface (CLI)
-The Hadoop command line programs may be used either directly via the system shell or via shell scripts. Changing the path of a command, removing or renaming command line options, the order of arguments, or the command return code and output break compatibility and may adversely affect users.
+The Hadoop command line programs may be used either directly via the system
+shell or via shell scripts. The CLIs include both the user-facing commands, such
+as the hdfs command or the yarn command, and the admin-facing commands, such as
+the scripts used to start and stop daemons. Changing the path of a command,
+removing or renaming command line options, the order of arguments, or the
+command return codes and output break compatibility and adversely affect users.
#### Policy
-CLI commands are to be deprecated (warning when used) for one major release before they are removed or incompatibly modified in a subsequent major release.
+All Hadoop CLI paths, usage, and output SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable).
+Note that the CLI output SHALL be considered distinct from the log output
+generated by the Hadoop CLIs. The latter SHALL be governed by the policy on log
+output. Note also that for CLI output, all changes SHALL be considered
+incompatible changes.
### Web UI
-Web UI, particularly the content and layout of web pages, changes could potentially interfere with attempts to screen scrape the web pages for information.
+Web UI, particularly the content and layout of web pages, changes could
+potentially interfere with attempts to screen scrape the web pages for
+information. The Hadoop Web UI pages, however, are not meant to be scraped, e.g.
+for automation purposes. Users are expected to use REST APIs to programmatically
+access cluster information.
#### Policy
-Web pages are not meant to be scraped and hence incompatible changes to them are allowed at any time. Users are expected to use REST APIs to get any information.
+The Hadoop Web UI SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Unstable](./InterfaceClassification.html#Unstable).
### Hadoop Configuration Files
-Users use (1) Hadoop-defined properties to configure and provide hints to Hadoop and (2) custom properties to pass information to jobs. Hence, compatibility of config properties is two-fold:
-
-* Modifying key-names, units of values, and default values of Hadoop-defined properties.
-* Custom configuration property keys should not conflict with the namespace of Hadoop-defined properties. Typically, users should avoid using prefixes used by Hadoop: hadoop, io, ipc, fs, net, file, ftp, s3, kfs, ha, file, dfs, mapred, mapreduce, yarn.
+Users use Hadoop-defined properties to configure and provide hints to Hadoop and
+custom properties to pass information to jobs. Users are encouraged to avoid
+using custom configuration property names that conflict with the namespace of
+Hadoop-defined properties and should avoid using any prefixes used by Hadoop,
+e.g. hadoop, io, ipc, fs, net, file, ftp, s3, kfs, ha, file, dfs, mapred,
+mapreduce, and yarn.
#### Policy
-* Hadoop-defined properties are to be deprecated at least for one major release before being removed. Modifying units for existing properties is not allowed.
-* The default values of Hadoop-defined properties can be changed across minor/major releases, but will remain the same across point releases within a minor release.
-* Currently, there is NO explicit policy regarding when new prefixes can be added/removed, and the list of prefixes to be avoided for custom configuration properties. However, as noted above, users should avoid using prefixes used by Hadoop: hadoop, io, ipc, fs, net, file, ftp, s3, kfs, ha, file, dfs, mapred, mapreduce, yarn.
+Hadoop-defined properties (names and meanings) SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). The units implied by a
+Hadoop-defined property MUST NOT change, even
+across major versions. Default values of Hadoop-defined properties SHALL be
+considered [Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
+
+### Log4j Configuration Files
+
+The log output produced by Hadoop daemons and CLIs is governed by a set of
+configuration files. These files control the minimum level of log message that
+will be output by the various components of Hadoop, as well as where and how
+those messages are stored.
+
+#### Policy
+
+All Log4j configurations SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
### Directory Structure
-Source code, artifacts (source and tests), user logs, configuration files, output and job history are all stored on disk either local file system or HDFS. Changing the directory structure of these user-accessible files break compatibility, even in cases where the original path is preserved via symbolic links (if, for example, the path is accessed by a servlet that is configured to not follow symbolic links).
+Source code, artifacts (source and tests), user logs, configuration files,
+output, and job history are all stored on disk either local file system or HDFS.
+Changing the directory structure of these user-accessible files can break
+compatibility, even in cases where the original path is preserved via symbolic
+links (such as when the path is accessed by a servlet that is configured to
+not follow symbolic links).
#### Policy
-* The layout of source code and build artifacts can change anytime, particularly so across major versions. Within a major version, the developers will attempt (no guarantees) to preserve the directory structure; however, individual files can be added/moved/deleted. The best way to ensure patches stay in sync with the code is to get them committed to the Apache source tree.
-* The directory structure of configuration files, user logs, and job history will be preserved across minor and point releases within a major release.
+The layout of source code and build artifacts SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable). Within a major version,
+the developer community SHOULD preserve the
+overall directory structure, though individual files MAY be added, moved, or
+deleted with no warning.
+
+The directory structure of configuration files, user logs, and job history SHALL
+be considered [Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
### Java Classpath
-User applications built against Hadoop might add all Hadoop jars (including Hadoop's library dependencies) to the application's classpath. Adding new dependencies or updating the version of existing dependencies may interfere with those in applications' classpaths.
+Hadoop provides several client artifacts that applications use to interact
+with the system. These artifacts typically have their own dependencies on
+common libraries. In the cases where these dependencies are exposed to
+end user applications or downstream consumers (i.e. not
+[shaded](https://stackoverflow.com/questions/13620281/what-is-the-maven-shade-plugin-used-for-and-why-would-you-want-to-relocate-java))
+changes to these dependencies can be disruptive. Developers are strongly
+encouraged to avoid exposing dependencies to clients by using techniques
+such as
+[shading](https://stackoverflow.com/questions/13620281/what-is-the-maven-shade-plugin-used-for-and-why-would-you-want-to-relocate-java).
+
+With regard to dependencies, adding a dependency is an incompatible change,
+whereas removing a dependency is a compatible change.
+
+Some user applications built against Hadoop may add all Hadoop JAR files
+(including Hadoop's library dependencies) to the application's classpath.
+Adding new dependencies or updating the versions of existing dependencies may
+interfere with those in applications' classpaths and hence their correct
+operation. Users are therefore discouraged from adopting this practice.
#### Policy
-Currently, there is NO policy on when Hadoop's dependencies can change.
+The set of dependencies exposed by the Hadoop client artifacts SHALL be
+considered [Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). Any dependencies that are not
+exposed to clients (either because they are shaded or only exist in non-client
+artifacts) SHALL be considered [Private](./InterfaceClassification.html#Private)
+and [Unstable](./InterfaceClassification.html#Unstable)
### Environment variables
-Users and related projects often utilize the exported environment variables (eg HADOOP\_CONF\_DIR), therefore removing or renaming environment variables is an incompatible change.
+Users and related projects often utilize the environment variables exported by
+Hadoop (e.g. HADOOP\_CONF\_DIR). Removing or renaming environment variables can
+therefore impact end user applications.
#### Policy
-Currently, there is NO policy on when the environment variables can change. Developers try to limit changes to major releases.
+The environment variables consumed by Hadoop and the environment variables made
+accessible to applications through YARN SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Evolving](./InterfaceClassification.html#Evolving).
+The developer community SHOULD limit changes to major releases.
### Build artifacts
-Hadoop uses maven for project management and changing the artifacts can affect existing user workflows.
+Hadoop uses Maven for project management. Changes to the contents of
+generated artifacts can impact existing user applications.
#### Policy
-* Test artifacts: The test jars generated are strictly for internal use and are not expected to be used outside of Hadoop, similar to APIs annotated @Private, @Unstable.
-* Built artifacts: The hadoop-client artifact (maven groupId:artifactId) stays compatible within a major release, while the other artifacts can change in incompatible ways.
+The contents of Hadoop test artifacts SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable). Test artifacts include
+all JAR files generated from test source code and all JAR files that include
+"tests" in the file name.
+
+The Hadoop client artifacts SHALL be considered
+[Public](./InterfaceClassification.html#Public) and
+[Stable](./InterfaceClassification.html#Stable). Client artifacts are the
+following:
+
+* hadoop-client
+* hadoop-client-api
+* hadoop-client-minicluster
+* hadoop-client-runtime
+* hadoop-hdfs-client
+* hadoop-hdfs-native-client
+* hadoop-mapreduce-client-app
+* hadoop-mapreduce-client-common
+* hadoop-mapreduce-client-core
+* hadoop-mapreduce-client-hs
+* hadoop-mapreduce-client-hs-plugins
+* hadoop-mapreduce-client-jobclient
+* hadoop-mapreduce-client-nativetask
+* hadoop-mapreduce-client-shuffle
+* hadoop-yarn-client
+
+All other build artifacts SHALL be considered
+[Private](./InterfaceClassification.html#Private) and
+[Unstable](./InterfaceClassification.html#Unstable).
### Hardware/Software Requirements
-To keep up with the latest advances in hardware, operating systems, JVMs, and other software, new Hadoop releases or some of their features might require higher versions of the same. For a specific environment, upgrading Hadoop might require upgrading other dependent software components.
+To keep up with the latest advances in hardware, operating systems, JVMs, and
+other software, new Hadoop releases may include features that require
+newer hardware, operating systems releases, or JVM versions than previous
+Hadoop releases. For a specific environment, upgrading Hadoop might require
+upgrading other dependent software components.
#### Policies
* Hardware
* Architecture: The community has no plans to restrict Hadoop to specific architectures, but can have family-specific optimizations.
- * Minimum resources: While there are no guarantees on the minimum resources required by Hadoop daemons, the community attempts to not increase requirements within a minor release.
-* Operating Systems: The community will attempt to maintain the same OS requirements (OS kernel versions) within a minor release. Currently GNU/Linux and Microsoft Windows are the OSes officially supported by the community while Apache Hadoop is known to work reasonably well on other OSes such as Apple MacOSX, Solaris etc.
-* The JVM requirements will not change across point releases within the same minor release except if the JVM version under question becomes unsupported. Minor/major releases might require later versions of JVM for some/all of the supported operating systems.
-* Other software: The community tries to maintain the minimum versions of additional software required by Hadoop. For example, ssh, kerberos etc.
+ * Minimum resources: While there are no guarantees on the minimum resources required by Hadoop daemons, the developer community SHOULD avoid increasing requirements within a minor release.
+* Operating Systems: The community SHOULD maintain the same minimum OS requirements (OS kernel versions) within a minor release. Currently GNU/Linux and Microsoft Windows are the OSes officially supported by the community, while Apache Hadoop is known to work reasonably well on other OSes such as Apple MacOSX, Solaris, etc.
+* The JVM requirements SHALL NOT change across minor releases within the same major release unless the JVM version in question becomes unsupported. The JVM version requirement MAY be different for different operating systems or even operating system releases.
+* File systems supported by Hadoop, e.g. through the HDFS FileSystem API, SHOULD not become unsupported between minor releases within a major version unless a migration path to an alternate client implementation is available.
References
----------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index c7309ab7714..451f9be3073 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -66,54 +66,103 @@ Hadoop uses the following kinds of audience in order of increasing/wider visibil
#### Private
-The interface is for internal use within the project (such as HDFS or MapReduce)
-and should not be used by applications or by other projects. It is subject to
-change at anytime without notice. Most interfaces of a project are Private (also
-referred to as project-private).
+A Private interface is for internal use within the project (such as HDFS or
+MapReduce) and should not be used by applications or by other projects. Most
+interfaces of a project are Private (also referred to as project-private).
+Unless an interface is intentionally exposed for external consumption, it should
+be marked Private.
#### Limited-Private
-The interface is used by a specified set of projects or systems (typically
-closely related projects). Other projects or systems should not use the
-interface. Changes to the interface will be communicated/negotiated with the
+A Limited-Private interface is used by a specified set of projects or systems
+(typically closely related projects). Other projects or systems should not use
+the interface. Changes to the interface will be communicated/negotiated with the
specified projects. For example, in the Hadoop project, some interfaces are
LimitedPrivate{HDFS, MapReduce} in that they are private to the HDFS and
MapReduce projects.
#### Public
-The interface is for general use by any application.
+A Public interface is for general use by any application.
+
+### Change Compatibility
+
+Changes to an API fall into two broad categories: compatible and incompatible.
+A compatible change is a change that meets the following criteria:
+
+* no existing capabilities are removed,
+* no existing capabilities are modified in a way that prevents their use by clients that were constructed to use the interface prior to the change, and
+* no capabilities are added that require changes to clients that were constructed to use the interface prior to the change.
+
+Any change that does not meet these three criteria is an incompatible change.
+Stated simply a compatible change will not break existing clients. These
+examples are compatible changes:
+
+* adding a method to a Java class,
+* adding an optional parameter to a RESTful web service, or
+* adding a tag to an XML document.
+* making the audience annotation of an interface more broad (e.g. from Private to Public) or the change compatibility annotation more restrictive (e.g. from Evolving to Stable)
+
+These examples are incompatible changes:
+
+* removing a method from a Java class,
+* adding a method to a Java interface,
+* adding a required parameter to a RESTful web service, or
+* renaming a field in a JSON document.
+* making the audience annotation of an interface less broad (e.g. from Public to Limited Private) or the change compatibility annotation more restrictive (e.g. from Evolving to Unstable)
### Stability
-Stability denotes how stable an interface is, as in when incompatible changes to
-the interface are allowed. Hadoop APIs have the following levels of stability.
+Stability denotes how stable an interface is and when compatible and
+incompatible changes to the interface are allowed. Hadoop APIs have the
+following levels of stability.
#### Stable
-Can evolve while retaining compatibility for minor release boundaries; in other
-words, incompatible changes to APIs marked as Stable are allowed only at major
-releases (i.e. at m.0).
+A Stable interface is exposed as a preferred means of communication. A Stable
+interface is expected not to change incompatibly within a major release and
+hence serves as a safe development target. A Stable interface may evolve
+compatibly between minor releases.
+
+Incompatible changes allowed: major (X.0.0)
+Compatible changes allowed: maintenance (x.Y.0)
#### Evolving
-Evolving, but incompatible changes are allowed at minor releases (i.e. m .x)
+An Evolving interface is typically exposed so that users or external code can
+make use of a feature before it has stabilized. The expectation that an
+interface should "eventually" stabilize and be promoted to Stable, however,
+is not a requirement for the interface to be labeled as Evolving.
+
+Incompatible changes are allowed for Evolving interface only at minor releases.
+
+Incompatible changes allowed: minor (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
#### Unstable
-Incompatible changes to Unstable APIs are allowed at any time. This usually makes
-sense for only private interfaces.
+An Unstable interface is one for which no compatibility guarantees are made. An
+Unstable interface is not necessarily unstable. An unstable interface is
+typically exposed because a user or external code needs to access an interface
+that is not intended for consumption. The interface is exposed as an Unstable
+interface to state clearly that even though the interface is exposed, it is not
+the preferred access path, and no compatibility guarantees are made for it.
-However one may call this out for a supposedly public interface to highlight
-that it should not be used as an interface; for public interfaces, labeling it
-as Not-an-interface is probably more appropriate than "Unstable".
+Unless there is a reason to offer a compatibility guarantee on an interface,
+whether it is exposed or not, it should be labeled as Unstable. Private
+interfaces also should be Unstable in most cases.
-Examples of publicly visible interfaces that are unstable
-(i.e. not-an-interface): GUI, CLIs whose output format will change.
+Incompatible changes to Unstable interfaces are allowed at any time.
+
+Incompatible changes allowed: maintenance (x.y.Z)
+Compatible changes allowed: maintenance (x.y.Z)
#### Deprecated
-APIs that could potentially be removed in the future and should not be used.
+A Deprecated interface could potentially be removed in the future and should
+not be used. Even so, a Deprecated interface will continue to function until
+it is removed. When a Deprecated interface can be removed depends on whether
+it is also Stable, Evolving, or Unstable.
How are the Classifications Recorded?
-------------------------------------
@@ -121,95 +170,101 @@ How are the Classifications Recorded?
How will the classification be recorded for Hadoop APIs?
* Each interface or class will have the audience and stability recorded using
- annotations in org.apache.hadoop.classification package.
+ annotations in the org.apache.hadoop.classification package.
-* The javadoc generated by the maven target javadoc:javadoc lists only the public API.
+* The javadoc generated by the maven target javadoc:javadoc lists only the
+ public API.
* One can derive the audience of java classes and java interfaces by the
audience of the package in which they are contained. Hence it is useful to
declare the audience of each java package as public or private (along with the
private audience variations).
+How will the classification be recorded for other interfaces, such as CLIs?
+
+* See the [Hadoop Compatibility](Compatibility.html) page for details.
+
FAQ
---
* Why aren’t the java scopes (private, package private and public) good enough?
* Java’s scoping is not very complete. One is often forced to make a class
- public in order for other internal components to use it. It does not have
- friends or sub-package-private like C++.
+ public in order for other internal components to use it. It also does not
+ have friends or sub-package-private like C++.
-* But I can easily access a private implementation interface if it is Java public.
- Where is the protection and control?
- * The purpose of this is not providing absolute access control. Its purpose
- is to communicate to users and developers. One can access private
- implementation functions in libc; however if they change the internal
- implementation details, your application will break and you will have
- little sympathy from the folks who are supplying libc. If you use a
- non-public interface you understand the risks.
+* But I can easily access a Private interface if it is Java public. Where is the
+ protection and control?
+ * The purpose of this classification scheme is not providing absolute
+ access control. Its purpose is to communicate to users and developers.
+ One can access private implementation functions in libc; however if
+ they change the internal implementation details, the application will
+ break and one will receive little sympathy from the folks who are
+ supplying libc. When using a non-public interface, the risks are
+ understood.
-* Why bother declaring the stability of a private interface?
- Aren’t private interfaces always unstable?
- * Private interfaces are not always unstable. In the cases where they are
- stable they capture internal properties of the system and can communicate
+* Why bother declaring the stability of a Private interface? Aren’t Private
+ interfaces always Unstable?
+ * Private interfaces are not always Unstable. In the cases where they are
+ Stable they capture internal properties of the system and can communicate
these properties to its internal users and to developers of the interface.
- * e.g. In HDFS, NN-DN protocol is private but stable and can help
- implement rolling upgrades. It communicates that this interface should
- not be changed in incompatible ways even though it is private.
- * e.g. In HDFS, FSImage stability provides more flexible rollback.
+ * e.g. In HDFS, NN-DN protocol is Private but Stable and can help
+ implement rolling upgrades. The stability annotation communicates that
+ this interface should not be changed in incompatible ways even though
+ it is Private.
+ * e.g. In HDFS, FSImage the Stabile designation provides more flexible
+ rollback.
-* What is the harm in applications using a private interface that is stable? How
- is it different than a public stable interface?
- * While a private interface marked as stable is targeted to change only at
+* What is the harm in applications using a Private interface that is Stable?
+ How is it different from a Public Stable interface?
+ * While a Private interface marked as Stable is targeted to change only at
major releases, it may break at other times if the providers of that
- interface are willing to change the internal users of that
- interface. Further, a public stable interface is less likely to break even
+ interface also are willing to change the internal consumers of that
+ interface. Further, a Public Stable interface is less likely to break even
at major releases (even though it is allowed to break compatibility)
- because the impact of the change is larger. If you use a private interface
+ because the impact of the change is larger. If you use a Private interface
(regardless of its stability) you run the risk of incompatibility.
-* Why bother with Limited-private? Isn’t it giving special treatment to some projects?
- That is not fair.
- * First, most interfaces should be public or private; actually let us state
- it even stronger: make it private unless you really want to expose it to
- public for general use.
- * Limited-private is for interfaces that are not intended for general
+* Why bother with Limited-Private? Isn’t it giving special treatment to some
+ projects? That is not fair.
+ * Most interfaces should be Public or Private. An interface should be
+ Private unless it is explicitly intended for general use.
+ * Limited-Private is for interfaces that are not intended for general
use. They are exposed to related projects that need special hooks. Such a
- classification has a cost to both the supplier and consumer of the limited
+ classification has a cost to both the supplier and consumer of the
interface. Both will have to work together if ever there is a need to
break the interface in the future; for example the supplier and the
consumers will have to work together to get coordinated releases of their
- respective projects. This should not be taken lightly – if you can get
- away with private then do so; if the interface is really for general use
- for all applications then do so. But remember that making an interface
- public has huge responsibility. Sometimes Limited-private is just right.
- * A good example of a limited-private interface is BlockLocations, This is a
- fairly low-level interface that we are willing to expose to MR and perhaps
- HBase. We are likely to change it down the road and at that time we will
- coordinate release effort with the MR team.
- While MR and HDFS are always released in sync today, they may
- change down the road.
- * If you have a limited-private interface with many projects listed then you
- are fooling yourself. It is practically public.
- * It might be worth declaring a special audience classification called
- Hadoop-Private for the Hadoop family.
+ respective projects. This contract should not be taken lightly–use
+ Private if possible; if the interface is really for general use
+ for all applications then use Public. Always remember that making an
+ interface Public comes with large burden of responsibility. Sometimes
+ Limited-Private is just right.
+ * A good example of a Limited-Private interface is BlockLocations. This
+ interface is a fairly low-level interface that is exposed to MapReduce
+ and HBase. The interface is likely to change down the road, and at that
+ time the release effort will have to be coordinated with the
+ MapReduce development team. While MapReduce and HDFS are always released
+ in sync today, that policy may change down the road.
+ * If you have a Limited-Private interface with many projects listed then
+ the interface is probably a good candidate to be made Public.
-* Lets treat all private interfaces as Hadoop-private. What is the harm in
- projects in the Hadoop family have access to private classes?
- * Do we want MR accessing class files that are implementation details inside
- HDFS. There used to be many such layer violations in the code that we have
- been cleaning up over the last few years. We don’t want such layer
- violations to creep back in by no separating between the major components
- like HDFS and MR.
+* Let's treat all Private interfaces as Limited-Private for all of Hadoop. What
+ is the harm if projects in the Hadoop family have access to private classes?
+ * There used to be many cases in the code where one project depended on the
+ internal implementation details of another. A significant effort went
+ into cleaning up those issues. Opening up all interfaces as
+ Limited-Private for all of Hadoop would open the door to reintroducing
+ such coupling issues.
-* Aren't all public interfaces stable?
- * One may mark a public interface as evolving in its early days. Here one is
+* Aren't all Public interfaces Stable?
+ * One may mark a Public interface as Evolving in its early days. Here one is
promising to make an effort to make compatible changes but may need to
break it at minor releases.
- * One example of a public interface that is unstable is where one is
+ * One example of a Public interface that is Unstable is where one is
providing an implementation of a standards-body based interface that is
still under development. For example, many companies, in an attempt to be
first to market, have provided implementations of a new NFS protocol even
when the protocol was not fully completed by IETF. The implementor cannot
- evolve the interface in a fashion that causes least distruption because
+ evolve the interface in a fashion that causes least disruption because
the stability is controlled by the standards body. Hence it is appropriate
- to label the interface as unstable.
+ to label the interface as Unstable.
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index 1e522c7782c..e67cbe32d42 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -605,7 +605,7 @@ The result is `FSDataOutputStream`, which through its operations may generate ne
clients creating files with `overwrite==true` to fail if the file is created
by another client between the two tests.
-* S3N, S3A, Swift and potentially other Object Stores do not currently change the FS state
+* S3A, Swift and potentially other Object Stores do not currently change the FS state
until the output stream `close()` operation is completed.
This MAY be a bug, as it allows >1 client to create a file with `overwrite==false`,
and potentially confuse file/directory logic
@@ -961,7 +961,7 @@ The outcome is no change to FileSystem state, with a return value of false.
FS' = FS; result = False
-*Local Filesystem, S3N*
+*Local Filesystem*
The outcome is as a normal rename, with the additional (implicit) feature
that the parent directories of the destination also exist.
@@ -1262,4 +1262,4 @@ It currently supports to query:
* `StreamCapabilties.HFLUSH` ("*hflush*"): the capability to flush out the data
in client's buffer.
* `StreamCapabilities.HSYNC` ("*hsync*"): capability to flush out the data in
- client's buffer and the disk device.
\ No newline at end of file
+ client's buffer and the disk device.
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
index 12a796717df..37191a5b2a6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
@@ -29,11 +29,10 @@ return codes of Unix filesystem actions as a reference. Even so, there
are places where HDFS diverges from the expected behaviour of a POSIX
filesystem.
-The behaviour of other Hadoop filesystems are not as rigorously tested.
-The bundled S3N and S3A FileSystem clients make Amazon's S3 Object Store ("blobstore")
+The bundled S3A FileSystem clients make Amazon's S3 Object Store ("blobstore")
accessible through the FileSystem API. The Swift FileSystem driver provides similar
-functionality for the OpenStack Swift blobstore. The Azure object storage
-FileSystem talks to Microsoft's Azure equivalent. All of these
+functionality for the OpenStack Swift blobstore. The Azure WASB and ADL object
+storage FileSystems talks to Microsoft's Azure storage. All of these
bind to object stores, which do have different behaviors, especially regarding
consistency guarantees, and atomicity of operations.
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md
index 6823e0c6a05..4c6fa3ff0f6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/testing.md
@@ -195,21 +195,21 @@ equivalent. Furthermore, the build MUST be configured to never bundle this file
In addition, `src/test/resources/auth-keys.xml` will need to be created. It can be a copy of `contract-test-options.xml`.
The `AbstractFSContract` class automatically loads this resource file if present; specific keys for specific test cases can be added.
-As an example, here are what S3N test keys look like:
+As an example, here are what S3A test keys look like:
- fs.contract.test.fs.s3n
- s3n://tests3contract
+ fs.contract.test.fs.s3a
+ s3a://tests3contract
- fs.s3n.awsAccessKeyId
+ fs.s3a.access.key
DONOTPCOMMITTHISKEYTOSCM
- fs.s3n.awsSecretAccessKey
+ fs.s3a.secret.key
DONOTEVERSHARETHISSECRETKEY!
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 33248864f2e..864c10ce207 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -94,14 +94,10 @@ public void initializeMemberVariables() {
xmlPropsToSkipCompare.add("hadoop.tmp.dir");
xmlPropsToSkipCompare.add("nfs3.mountd.port");
xmlPropsToSkipCompare.add("nfs3.server.port");
- xmlPropsToSkipCompare.add("test.fs.s3n.name");
xmlPropsToSkipCompare.add("fs.viewfs.rename.strategy");
- // S3N/S3A properties are in a different subtree.
- // - org.apache.hadoop.fs.s3native.S3NativeFileSystemConfigKeys
+ // S3A properties are in a different subtree.
xmlPrefixToSkipCompare.add("fs.s3a.");
- xmlPrefixToSkipCompare.add("fs.s3n.");
- xmlPrefixToSkipCompare.add("s3native.");
// WASB properties are in a different subtree.
// - org.apache.hadoop.fs.azure.NativeAzureFileSystem
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigRedactor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigRedactor.java
index 4790f7c6d39..313394293c0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigRedactor.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigRedactor.java
@@ -54,7 +54,6 @@ private void testRedact(Configuration conf) throws Exception {
"fs.s3a.bucket.BUCKET.secret.key",
"fs.s3a.server-side-encryption.key",
"fs.s3a.bucket.engineering.server-side-encryption.key",
- "fs.s3n.awsSecretKey",
"fs.azure.account.key.abcdefg.blob.core.windows.net",
"fs.adl.oauth2.refresh.token",
"fs.adl.oauth2.credential",
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index 2c19722ba89..d14dd59c773 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -23,12 +23,9 @@
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.verify;
import java.io.IOException;
-import java.net.NoRouteToHostException;
import java.net.URI;
-import java.net.UnknownHostException;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
@@ -36,9 +33,6 @@
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.net.ConnectTimeoutException;
-import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.junit.Test;
@@ -53,17 +47,14 @@ public void testCreation() throws Exception {
Configuration conf = new Configuration();
KeyProvider kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1/kms/foo"), conf);
- assertTrue(kp instanceof LoadBalancingKMSClientProvider);
- KMSClientProvider[] providers =
- ((LoadBalancingKMSClientProvider) kp).getProviders();
- assertEquals(1, providers.length);
- assertEquals(Sets.newHashSet("http://host1/kms/foo/v1/"),
- Sets.newHashSet(providers[0].getKMSUrl()));
+ assertTrue(kp instanceof KMSClientProvider);
+ assertEquals("http://host1/kms/foo/v1/",
+ ((KMSClientProvider) kp).getKMSUrl());
kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1;host2;host3/kms/foo"), conf);
assertTrue(kp instanceof LoadBalancingKMSClientProvider);
- providers =
+ KMSClientProvider[] providers =
((LoadBalancingKMSClientProvider) kp).getProviders();
assertEquals(3, providers.length);
assertEquals(Sets.newHashSet("http://host1/kms/foo/v1/",
@@ -131,7 +122,7 @@ public void testLoadBalancingWithFailure() throws Exception {
// This should be retried
KMSClientProvider p4 = mock(KMSClientProvider.class);
when(p4.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p4"));
+ .thenThrow(new IOException("p4"));
when(p4.getKMSUrl()).thenReturn("p4");
KeyProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] { p1, p2, p3, p4 }, 0, conf);
@@ -329,298 +320,4 @@ public void testWarmUpEncryptedKeysWhenOneProviderSucceeds()
Mockito.verify(p1, Mockito.times(1)).warmUpEncryptedKeys(keyName);
Mockito.verify(p2, Mockito.times(1)).warmUpEncryptedKeys(keyName);
}
-
- /**
- * Tests whether retryPolicy fails immediately on encountering IOException
- * which is not SocketException.
- * @throws Exception
- */
- @Test
- public void testClientRetriesWithIOException() throws Exception {
- Configuration conf = new Configuration();
- // Setting total failover attempts to .
- conf.setInt(
- CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 10);
- KMSClientProvider p1 = mock(KMSClientProvider.class);
- when(p1.getMetadata(Mockito.anyString()))
- .thenThrow(new IOException("p1"));
- KMSClientProvider p2 = mock(KMSClientProvider.class);
- when(p2.getMetadata(Mockito.anyString()))
- .thenThrow(new IOException("p2"));
- KMSClientProvider p3 = mock(KMSClientProvider.class);
- when(p3.getMetadata(Mockito.anyString()))
- .thenThrow(new IOException("p3"));
-
- when(p1.getKMSUrl()).thenReturn("p1");
- when(p2.getKMSUrl()).thenReturn("p2");
- when(p3.getKMSUrl()).thenReturn("p3");
- LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
- new KMSClientProvider[] {p1, p2, p3}, 0, conf);
- try {
- kp.getMetadata("test3");
- fail("Should fail since all providers threw an IOException");
- } catch (Exception e) {
- assertTrue(e instanceof IOException);
- }
- verify(kp.getProviders()[0], Mockito.times(1))
- .getMetadata(Mockito.eq("test3"));
- verify(kp.getProviders()[1], Mockito.never())
- .getMetadata(Mockito.eq("test3"));
- verify(kp.getProviders()[2], Mockito.never())
- .getMetadata(Mockito.eq("test3"));
- }
-
- /**
- * Tests that client doesn't retry once it encounters AccessControlException
- * from first provider.
- * This assumes all the kms servers are configured with identical access to
- * keys.
- * @throws Exception
- */
- @Test
- public void testClientRetriesWithAccessControlException() throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(
- CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
- KMSClientProvider p1 = mock(KMSClientProvider.class);
- when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new AccessControlException("p1"));
- KMSClientProvider p2 = mock(KMSClientProvider.class);
- when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new IOException("p2"));
- KMSClientProvider p3 = mock(KMSClientProvider.class);
- when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new IOException("p3"));
-
- when(p1.getKMSUrl()).thenReturn("p1");
- when(p2.getKMSUrl()).thenReturn("p2");
- when(p3.getKMSUrl()).thenReturn("p3");
- LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
- new KMSClientProvider[] {p1, p2, p3}, 0, conf);
- try {
- kp.createKey("test3", new Options(conf));
- fail("Should fail because provider p1 threw an AccessControlException");
- } catch (Exception e) {
- assertTrue(e instanceof AccessControlException);
- }
- verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p2, Mockito.never()).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p3, Mockito.never()).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- }
-
- /**
- * Tests that client doesn't retry once it encounters RunTimeException
- * from first provider.
- * This assumes all the kms servers are configured with identical access to
- * keys.
- * @throws Exception
- */
- @Test
- public void testClientRetriesWithRuntimeException() throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(
- CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
- KMSClientProvider p1 = mock(KMSClientProvider.class);
- when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new RuntimeException("p1"));
- KMSClientProvider p2 = mock(KMSClientProvider.class);
- when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new IOException("p2"));
-
- when(p1.getKMSUrl()).thenReturn("p1");
- when(p2.getKMSUrl()).thenReturn("p2");
-
- LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
- new KMSClientProvider[] {p1, p2}, 0, conf);
- try {
- kp.createKey("test3", new Options(conf));
- fail("Should fail since provider p1 threw RuntimeException");
- } catch (Exception e) {
- assertTrue(e instanceof RuntimeException);
- }
- verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p2, Mockito.never()).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- }
-
- /**
- * Tests the client retries until it finds a good provider.
- * @throws Exception
- */
- @Test
- public void testClientRetriesWithTimeoutsException() throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(
- CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 4);
- KMSClientProvider p1 = mock(KMSClientProvider.class);
- when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p1"));
- KMSClientProvider p2 = mock(KMSClientProvider.class);
- when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new UnknownHostException("p2"));
- KMSClientProvider p3 = mock(KMSClientProvider.class);
- when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new NoRouteToHostException("p3"));
- KMSClientProvider p4 = mock(KMSClientProvider.class);
- when(p4.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenReturn(
- new KMSClientProvider.KMSKeyVersion("test3", "v1", new byte[0]));
- when(p1.getKMSUrl()).thenReturn("p1");
- when(p2.getKMSUrl()).thenReturn("p2");
- when(p3.getKMSUrl()).thenReturn("p3");
- when(p4.getKMSUrl()).thenReturn("p4");
- LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
- new KMSClientProvider[] {p1, p2, p3, p4}, 0, conf);
- try {
- kp.createKey("test3", new Options(conf));
- } catch (Exception e) {
- fail("Provider p4 should have answered the request.");
- }
- verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p2, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p3, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p4, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- }
-
- /**
- * Tests the operation succeeds second time after ConnectTimeoutException.
- * @throws Exception
- */
- @Test
- public void testClientRetriesSucceedsSecondTime() throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(
- CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
- KMSClientProvider p1 = mock(KMSClientProvider.class);
- when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p1"))
- .thenReturn(new KMSClientProvider.KMSKeyVersion("test3", "v1",
- new byte[0]));
- KMSClientProvider p2 = mock(KMSClientProvider.class);
- when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p2"));
-
- when(p1.getKMSUrl()).thenReturn("p1");
- when(p2.getKMSUrl()).thenReturn("p2");
-
- LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
- new KMSClientProvider[] {p1, p2}, 0, conf);
- try {
- kp.createKey("test3", new Options(conf));
- } catch (Exception e) {
- fail("Provider p1 should have answered the request second time.");
- }
- verify(p1, Mockito.times(2)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p2, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- }
-
- /**
- * Tests whether retryPolicy retries specified number of times.
- * @throws Exception
- */
- @Test
- public void testClientRetriesSpecifiedNumberOfTimes() throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(
- CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 10);
- KMSClientProvider p1 = mock(KMSClientProvider.class);
- when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p1"));
- KMSClientProvider p2 = mock(KMSClientProvider.class);
- when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p2"));
-
- when(p1.getKMSUrl()).thenReturn("p1");
- when(p2.getKMSUrl()).thenReturn("p2");
-
- LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
- new KMSClientProvider[] {p1, p2}, 0, conf);
- try {
- kp.createKey("test3", new Options(conf));
- fail("Should fail");
- } catch (Exception e) {
- assert (e instanceof ConnectTimeoutException);
- }
- verify(p1, Mockito.times(6)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p2, Mockito.times(5)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- }
-
- /**
- * Tests whether retryPolicy retries number of times equals to number of
- * providers if conf kms.client.failover.max.attempts is not set.
- * @throws Exception
- */
- @Test
- public void testClientRetriesIfMaxAttemptsNotSet() throws Exception {
- Configuration conf = new Configuration();
- KMSClientProvider p1 = mock(KMSClientProvider.class);
- when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p1"));
- KMSClientProvider p2 = mock(KMSClientProvider.class);
- when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p2"));
-
- when(p1.getKMSUrl()).thenReturn("p1");
- when(p2.getKMSUrl()).thenReturn("p2");
-
- LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
- new KMSClientProvider[] {p1, p2}, 0, conf);
- try {
- kp.createKey("test3", new Options(conf));
- fail("Should fail");
- } catch (Exception e) {
- assert (e instanceof ConnectTimeoutException);
- }
- verify(p1, Mockito.times(2)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p2, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- }
-
- /**
- * Tests that client doesn't retry once it encounters AuthenticationException
- * wrapped in an IOException from first provider.
- * @throws Exception
- */
- @Test
- public void testClientRetriesWithAuthenticationExceptionWrappedinIOException()
- throws Exception {
- Configuration conf = new Configuration();
- conf.setInt(
- CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
- KMSClientProvider p1 = mock(KMSClientProvider.class);
- when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new IOException(new AuthenticationException("p1")));
- KMSClientProvider p2 = mock(KMSClientProvider.class);
- when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
- .thenThrow(new ConnectTimeoutException("p2"));
-
- when(p1.getKMSUrl()).thenReturn("p1");
- when(p2.getKMSUrl()).thenReturn("p2");
-
- LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
- new KMSClientProvider[] {p1, p2}, 0, conf);
- try {
- kp.createKey("test3", new Options(conf));
- fail("Should fail since provider p1 threw AuthenticationException");
- } catch (Exception e) {
- assertTrue(e.getCause() instanceof AuthenticationException);
- }
- verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- verify(p2, Mockito.never()).createKey(Mockito.eq("test3"),
- Mockito.any(Options.class));
- }
-}
\ No newline at end of file
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 9d8cd64ca4b..a4ccee3f7f5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -61,7 +61,16 @@ public abstract class FileSystemContractBaseTest {
protected byte[] data = dataset(getBlockSize() * 2, 0, 255);
@Rule
- public Timeout globalTimeout = new Timeout(30000);
+ public Timeout globalTimeout = new Timeout(getGlobalTimeout());
+
+ /**
+ * Get the timeout in milliseconds for each test case.
+ * @return a time in milliseconds.
+ */
+ protected int getGlobalTimeout() {
+ return 30 * 1000;
+ }
+
@Rule
public ExpectedException thrown = ExpectedException.none();
@@ -246,39 +255,18 @@ public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
@Test
public void testMkdirsWithUmask() throws Exception {
- if (!isS3(fs)) {
- Configuration conf = fs.getConf();
- String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
- try {
- conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
- final Path dir = path("newDir");
- assertTrue(fs.mkdirs(dir, new FsPermission((short) 0777)));
- FileStatus status = fs.getFileStatus(dir);
- assertTrue(status.isDirectory());
- assertEquals((short) 0715, status.getPermission().toShort());
- } finally {
- conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
- }
- }
- }
-
- /**
- * Skip permission tests for S3FileSystem until HDFS-1333 is fixed.
- * Classes that do not implement {@link FileSystem#getScheme()} method
- * (e.g {@link RawLocalFileSystem}) will throw an
- * {@link UnsupportedOperationException}.
- * @param fileSystem FileSystem object to determine if it is S3 or not
- * @return true if S3 false in any other case
- */
- private boolean isS3(FileSystem fileSystem) {
+ Configuration conf = fs.getConf();
+ String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
try {
- if (fileSystem.getScheme().equals("s3n")) {
- return true;
- }
- } catch (UnsupportedOperationException e) {
- LOG.warn("Unable to determine the schema of filesystem.");
+ conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
+ final Path dir = path("newDir");
+ assertTrue(fs.mkdirs(dir, new FsPermission((short) 0777)));
+ FileStatus status = fs.getFileStatus(dir);
+ assertTrue(status.isDirectory());
+ assertEquals((short) 0715, status.getPermission().toShort());
+ } finally {
+ conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
}
- return false;
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
index f9b16f47949..ccf188f1202 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
@@ -122,7 +122,7 @@ public void testOpenFileTwice() throws Throwable {
Path path = path("testopenfiletwice.txt");
byte[] block = dataset(TEST_FILE_LEN, 0, 255);
//this file now has a simple rule: offset => value
- createFile(getFileSystem(), path, false, block);
+ createFile(getFileSystem(), path, true, block);
//open first
FSDataInputStream instream1 = getFileSystem().open(path);
FSDataInputStream instream2 = null;
@@ -150,7 +150,7 @@ public void testSequentialRead() throws Throwable {
int base = 0x40; // 64
byte[] block = dataset(len, base, base + len);
//this file now has a simple rule: offset => (value | 0x40)
- createFile(getFileSystem(), path, false, block);
+ createFile(getFileSystem(), path, true, block);
//open first
instream = getFileSystem().open(path);
assertEquals(base, instream.read());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
index 3e716820ba0..7af3cb0a525 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
@@ -341,7 +341,7 @@ public void testRandomSeeks() throws Throwable {
int filesize = 10 * 1024;
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
- createFile(getFileSystem(), randomSeekFile, false, buf);
+ createFile(getFileSystem(), randomSeekFile, true, buf);
Random r = new Random();
// Record the sequence of seeks and reads which trigger a failure.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
index a22985de505..afddf80a25c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/permission/TestFsPermission.java
@@ -138,6 +138,63 @@ public void testFsPermission() {
}
}
+ @Test
+ public void testFsSymbolicConstructorWithNormalInput() {
+
+ // Test cases for symbolic representation
+
+ //Added both Octal and short representation to show with sticky bit
+
+ assertEquals(777, new FsPermission("+rwx").toOctal());
+ assertEquals(0777, new FsPermission("+rwx").toShort());
+
+ assertEquals(444, new FsPermission("+r").toOctal());
+ assertEquals(0444, new FsPermission("+r").toShort());
+
+ assertEquals(222, new FsPermission("+w").toOctal());
+ assertEquals(0222, new FsPermission("+w").toShort());
+
+ assertEquals(111, new FsPermission("+x").toOctal());
+ assertEquals(0111, new FsPermission("+x").toShort());
+
+ assertEquals(666, new FsPermission("+rw").toOctal());
+ assertEquals(0666, new FsPermission("+rw").toShort());
+
+ assertEquals(333, new FsPermission("+wx").toOctal());
+ assertEquals(0333, new FsPermission("+wx").toShort());
+
+ assertEquals(555, new FsPermission("+rx").toOctal());
+ assertEquals(0555, new FsPermission("+rx").toShort());
+
+
+ // Test case is to test with repeated values in mode.
+ // Repeated value in input will be ignored as duplicate.
+
+ assertEquals(666, new FsPermission("+rwr").toOctal());
+ assertEquals(0666, new FsPermission("+rwr").toShort());
+
+ assertEquals(000, new FsPermission("-rwr").toOctal());
+ assertEquals(0000, new FsPermission("-rwr").toShort());
+
+ assertEquals(1666, new FsPermission("+rwrt").toOctal());
+ assertEquals(01666, new FsPermission("+rwrt").toShort());
+
+ assertEquals(000, new FsPermission("-rwrt").toOctal());
+ assertEquals(0000, new FsPermission("-rwrt").toShort());
+
+ assertEquals(1777, new FsPermission("+rwxt").toOctal());
+ assertEquals(01777, new FsPermission("+rwxt").toShort());
+
+
+ assertEquals(000, new FsPermission("-rt").toOctal());
+ assertEquals(0000, new FsPermission("-rt").toShort());
+
+ assertEquals(000, new FsPermission("-rwx").toOctal());
+ assertEquals(0000, new FsPermission("-rwx").toShort());
+
+ }
+
+
@Test
public void testSymbolicPermission() {
for (int i = 0; i < SYMBOLIC.length; ++i) {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
index e8eedd949e0..af48cb6169b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
@@ -34,16 +34,16 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.sshd.SshServer;
+import org.apache.sshd.server.SshServer;
import org.apache.sshd.common.NamedFactory;
import org.apache.sshd.server.Command;
-import org.apache.sshd.server.PasswordAuthenticator;
-import org.apache.sshd.server.UserAuth;
-import org.apache.sshd.server.auth.UserAuthPassword;
+import org.apache.sshd.server.auth.password.PasswordAuthenticator;
+import org.apache.sshd.server.auth.UserAuth;
+import org.apache.sshd.server.auth.password.UserAuthPasswordFactory;
import org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider;
import org.apache.sshd.server.session.ServerSession;
-import org.apache.sshd.server.sftp.SftpSubsystem;
+import org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
@@ -76,7 +76,7 @@ private static void startSshdServer() throws IOException {
List> userAuthFactories =
new ArrayList>();
- userAuthFactories.add(new UserAuthPassword.Factory());
+ userAuthFactories.add(new UserAuthPasswordFactory());
sshd.setUserAuthFactories(userAuthFactories);
@@ -92,7 +92,7 @@ public boolean authenticate(String username, String password,
});
sshd.setSubsystemFactories(
- Arrays.>asList(new SftpSubsystem.Factory()));
+ Arrays.>asList(new SftpSubsystemFactory()));
sshd.start();
port = sshd.getPort();
@@ -140,7 +140,7 @@ public static void tearDown() {
if (sshd != null) {
try {
sshd.stop(true);
- } catch (InterruptedException e) {
+ } catch (IOException e) {
// ignore
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index 00cfa44f310..3ea9ab8fce5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
@@ -70,7 +70,7 @@ public interface TimeoutHandler {
* @throws Exception if the handler wishes to raise an exception
* that way.
*/
- Exception evaluate(int timeoutMillis, Exception caught) throws Exception;
+ Throwable evaluate(int timeoutMillis, Throwable caught) throws Throwable;
}
/**
@@ -116,7 +116,7 @@ public static int await(int timeoutMillis,
Preconditions.checkNotNull(timeoutHandler);
long endTime = Time.now() + timeoutMillis;
- Exception ex = null;
+ Throwable ex = null;
boolean running = true;
int iterations = 0;
while (running) {
@@ -128,9 +128,11 @@ public static int await(int timeoutMillis,
// the probe failed but did not raise an exception. Reset any
// exception raised by a previous probe failure.
ex = null;
- } catch (InterruptedException | FailFastException e) {
+ } catch (InterruptedException
+ | FailFastException
+ | VirtualMachineError e) {
throw e;
- } catch (Exception e) {
+ } catch (Throwable e) {
LOG.debug("eventually() iteration {}", iterations, e);
ex = e;
}
@@ -145,15 +147,20 @@ public static int await(int timeoutMillis,
}
}
// timeout
- Exception evaluate = timeoutHandler.evaluate(timeoutMillis, ex);
- if (evaluate == null) {
- // bad timeout handler logic; fall back to GenerateTimeout so the
- // underlying problem isn't lost.
- LOG.error("timeout handler {} did not throw an exception ",
- timeoutHandler);
- evaluate = new GenerateTimeout().evaluate(timeoutMillis, ex);
+ Throwable evaluate;
+ try {
+ evaluate = timeoutHandler.evaluate(timeoutMillis, ex);
+ if (evaluate == null) {
+ // bad timeout handler logic; fall back to GenerateTimeout so the
+ // underlying problem isn't lost.
+ LOG.error("timeout handler {} did not throw an exception ",
+ timeoutHandler);
+ evaluate = new GenerateTimeout().evaluate(timeoutMillis, ex);
+ }
+ } catch (Throwable throwable) {
+ evaluate = throwable;
}
- throw evaluate;
+ return raise(evaluate);
}
/**
@@ -217,6 +224,7 @@ public static int await(int timeoutMillis,
* @throws Exception the last exception thrown before timeout was triggered
* @throws FailFastException if raised -without any retry attempt.
* @throws InterruptedException if interrupted during the sleep operation.
+ * @throws OutOfMemoryError you've run out of memory.
*/
public static T eventually(int timeoutMillis,
Callable eval,
@@ -224,7 +232,7 @@ public static T eventually(int timeoutMillis,
Preconditions.checkArgument(timeoutMillis >= 0,
"timeoutMillis must be >= 0");
long endTime = Time.now() + timeoutMillis;
- Exception ex;
+ Throwable ex;
boolean running;
int sleeptime;
int iterations = 0;
@@ -232,10 +240,12 @@ public static T eventually(int timeoutMillis,
iterations++;
try {
return eval.call();
- } catch (InterruptedException | FailFastException e) {
+ } catch (InterruptedException
+ | FailFastException
+ | VirtualMachineError e) {
// these two exceptions trigger an immediate exit
throw e;
- } catch (Exception e) {
+ } catch (Throwable e) {
LOG.debug("evaluate() iteration {}", iterations, e);
ex = e;
}
@@ -245,7 +255,26 @@ public static T eventually(int timeoutMillis,
}
} while (running);
// timeout. Throw the last exception raised
- throw ex;
+ return raise(ex);
+ }
+
+ /**
+ * Take the throwable and raise it as an exception or an error, depending
+ * upon its type. This allows callers to declare that they only throw
+ * Exception (i.e. can be invoked by Callable) yet still rethrow a
+ * previously caught Throwable.
+ * @param throwable Throwable to rethrow
+ * @param expected return type
+ * @return never
+ * @throws Exception if throwable is an Exception
+ * @throws Error if throwable is not an Exception
+ */
+ private static T raise(Throwable throwable) throws Exception {
+ if (throwable instanceof Exception) {
+ throw (Exception) throwable;
+ } else {
+ throw (Error) throwable;
+ }
}
/**
@@ -365,6 +394,7 @@ public static E intercept(
* @throws Exception any other exception raised
* @throws AssertionError if the evaluation call didn't raise an exception.
*/
+ @SuppressWarnings("unchecked")
public static E intercept(
Class clazz,
VoidCallable eval)
@@ -487,14 +517,14 @@ public GenerateTimeout() {
* @return TimeoutException
*/
@Override
- public Exception evaluate(int timeoutMillis, Exception caught)
- throws Exception {
+ public Throwable evaluate(int timeoutMillis, Throwable caught)
+ throws Throwable {
String s = String.format("%s: after %d millis", message,
timeoutMillis);
String caughtText = caught != null
? ("; " + robustToString(caught)) : "";
- return (TimeoutException) (new TimeoutException(s + caughtText)
+ return (new TimeoutException(s + caughtText)
.initCause(caught));
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java
index d3d5cb4fde3..c790a180ede 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java
@@ -25,6 +25,7 @@
import java.io.IOException;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.test.LambdaTestUtils.*;
import static org.apache.hadoop.test.GenericTestUtils.*;
@@ -123,6 +124,27 @@ protected void assertMinRetryCount(int minCount) {
minCount <= retry.getInvocationCount());
}
+ /**
+ * Raise an exception.
+ * @param e exception to raise
+ * @return never
+ * @throws Exception passed in exception
+ */
+ private boolean r(Exception e) throws Exception {
+ throw e;
+ }
+
+ /**
+ * Raise an error.
+ * @param e error to raise
+ * @return never
+ * @throws Exception never
+ * @throws Error the passed in error
+ */
+ private boolean r(Error e) throws Exception {
+ throw e;
+ }
+
@Test
public void testAwaitAlwaysTrue() throws Throwable {
await(TIMEOUT,
@@ -140,7 +162,7 @@ public void testAwaitAlwaysFalse() throws Throwable {
TIMEOUT_FAILURE_HANDLER);
fail("should not have got here");
} catch (TimeoutException e) {
- assertTrue(retry.getInvocationCount() > 4);
+ assertMinRetryCount(1);
}
}
@@ -316,9 +338,7 @@ public void testInterceptAwaitLambdaException() throws Throwable {
IOException ioe = intercept(IOException.class,
() -> await(
TIMEOUT,
- () -> {
- throw new IOException("inner " + ++count);
- },
+ () -> r(new IOException("inner " + ++count)),
retry,
(timeout, ex) -> ex));
assertRetryCount(count - 1);
@@ -339,9 +359,7 @@ public void testInterceptAwaitLambdaDiagnostics() throws Throwable {
public void testInterceptAwaitFailFastLambda() throws Throwable {
intercept(FailFastException.class,
() -> await(TIMEOUT,
- () -> {
- throw new FailFastException("ffe");
- },
+ () -> r(new FailFastException("ffe")),
retry,
(timeout, ex) -> ex));
assertRetryCount(0);
@@ -361,14 +379,13 @@ public void testEventuallyLambda() throws Throwable {
assertRetryCount(0);
}
+
@Test
public void testInterceptEventuallyLambdaFailures() throws Throwable {
intercept(IOException.class,
"oops",
() -> eventually(TIMEOUT,
- () -> {
- throw new IOException("oops");
- },
+ () -> r(new IOException("oops")),
retry));
assertMinRetryCount(1);
}
@@ -385,11 +402,95 @@ public void testInterceptEventuallyLambdaFailFast() throws Throwable {
intercept(FailFastException.class, "oops",
() -> eventually(
TIMEOUT,
- () -> {
- throw new FailFastException("oops");
- },
+ () -> r(new FailFastException("oops")),
retry));
assertRetryCount(0);
}
+ /**
+ * Verify that assertions trigger catch and retry.
+ * @throws Throwable if the code is broken
+ */
+ @Test
+ public void testEventuallySpinsOnAssertions() throws Throwable {
+ AtomicInteger counter = new AtomicInteger(0);
+ eventually(TIMEOUT,
+ () -> {
+ while (counter.incrementAndGet() < 5) {
+ fail("if you see this, we are in trouble");
+ }
+ },
+ retry);
+ assertMinRetryCount(4);
+ }
+
+ /**
+ * Verify that VirtualMachineError errors are immediately rethrown.
+ * @throws Throwable if the code is broken
+ */
+ @Test
+ public void testInterceptEventuallyThrowsVMErrors() throws Throwable {
+ intercept(OutOfMemoryError.class, "OOM",
+ () -> eventually(
+ TIMEOUT,
+ () -> r(new OutOfMemoryError("OOM")),
+ retry));
+ assertRetryCount(0);
+ }
+
+ /**
+ * Verify that you can declare that an intercept will intercept Errors.
+ * @throws Throwable if the code is broken
+ */
+ @Test
+ public void testInterceptHandlesErrors() throws Throwable {
+ intercept(OutOfMemoryError.class, "OOM",
+ () -> r(new OutOfMemoryError("OOM")));
+ }
+
+ /**
+ * Verify that if an Error raised is not the one being intercepted,
+ * it gets rethrown.
+ * @throws Throwable if the code is broken
+ */
+ @Test
+ public void testInterceptRethrowsVMErrors() throws Throwable {
+ intercept(StackOverflowError.class, "",
+ () -> intercept(OutOfMemoryError.class, "",
+ () -> r(new StackOverflowError())));
+ }
+
+ @Test
+ public void testAwaitHandlesAssertions() throws Throwable {
+ // await a state which is never reached, expect a timeout exception
+ // with the text "failure" in it
+ TimeoutException ex = intercept(TimeoutException.class,
+ "failure",
+ () -> await(TIMEOUT,
+ () -> r(new AssertionError("failure")),
+ retry,
+ TIMEOUT_FAILURE_HANDLER));
+
+ // the retry handler must have been invoked
+ assertMinRetryCount(1);
+ // and the nested cause is tha raised assertion
+ if (!(ex.getCause() instanceof AssertionError)) {
+ throw ex;
+ }
+ }
+
+ @Test
+ public void testAwaitRethrowsVMErrors() throws Throwable {
+ // await a state which is never reached, expect a timeout exception
+ // with the text "failure" in it
+ intercept(StackOverflowError.class,
+ () -> await(TIMEOUT,
+ () -> r(new StackOverflowError()),
+ retry,
+ TIMEOUT_FAILURE_HANDLER));
+
+ // the retry handler must not have been invoked
+ assertMinRetryCount(0);
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCpuTimeTracker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCpuTimeTracker.java
new file mode 100644
index 00000000000..6246672f0eb
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCpuTimeTracker.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.junit.Test;
+import java.math.BigInteger;
+import static org.junit.Assert.assertTrue;
+
+public class TestCpuTimeTracker {
+ @Test
+ public void test() throws InterruptedException {
+ CpuTimeTracker tracker = new CpuTimeTracker(10);
+ tracker.updateElapsedJiffies(
+ BigInteger.valueOf(100),
+ System.currentTimeMillis());
+ float val1 = tracker.getCpuTrackerUsagePercent();
+ assertTrue(
+ "Not invalid CPU usage",
+ val1 == -1.0);
+ Thread.sleep(1000);
+ tracker.updateElapsedJiffies(
+ BigInteger.valueOf(200),
+ System.currentTimeMillis());
+ float val2 = tracker.getCpuTrackerUsagePercent();
+ assertTrue(
+ "Not positive CPU usage",
+ val2 > 0);
+ Thread.sleep(1000);
+ tracker.updateElapsedJiffies(
+ BigInteger.valueOf(0),
+ System.currentTimeMillis());
+ float val3 = tracker.getCpuTrackerUsagePercent();
+ assertTrue(
+ "Not positive CPU usage",
+ val3 == 0.0);
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml b/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
index d85472cd402..d9144ebb1a9 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
@@ -45,12 +45,6 @@
This is required by FTPFileSystem
-
- test.fs.s3n.name
- s3n:///
- The name of the s3n file system for testing.
-
-
hadoop.security.authentication
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/jets3t.properties b/hadoop-common-project/hadoop-common/src/test/resources/jets3t.properties
deleted file mode 100644
index 09cc46396ab..00000000000
--- a/hadoop-common-project/hadoop-common/src/test/resources/jets3t.properties
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Speed up the s3native jets3t test
-
-s3service.max-thread-count=10
-threaded-service.max-thread-count=10
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java
index cd870ca5aee..01381953457 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -115,6 +115,14 @@ public boolean delete(Path f, boolean recursive)
return dfs.delete(getUriPath(f), recursive);
}
+ /**
+ * The returned BlockLocation will have different formats for replicated
+ * and erasure coded file.
+ *
+ * Please refer to
+ * {@link FileContext#getFileBlockLocations(Path, long, long)}
+ * for more details.
+ */
@Override
public BlockLocation[] getFileBlockLocations(Path p, long start, long len)
throws IOException, UnresolvedLinkException {
@@ -165,6 +173,13 @@ public FsServerDefaults getServerDefaults(final Path f) throws IOException {
return dfs.getServerDefaults();
}
+ /**
+ * The BlockLocation of returned LocatedFileStatus will have different
+ * formats for replicated and erasure coded file.
+ * Please refer to
+ * {@link FileContext#getFileBlockLocations(Path, long, long)} for
+ * more details.
+ */
@Override
public RemoteIterator listLocatedStatus(
final Path p)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 9239df39c59..772049d35d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -72,6 +72,7 @@
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
@@ -866,6 +867,10 @@ boolean recoverLease(String src) throws IOException {
* data-placement when performing operations. For example, the
* MapReduce system tries to schedule tasks on the same machines
* as the data-block the task processes.
+ *
+ * Please refer to
+ * {@link FileSystem#getFileBlockLocations(FileStatus, long, long)}
+ * for more details.
*/
public BlockLocation[] getBlockLocations(String src, long start,
long length) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 7f053380f8c..44db3a68245 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -260,6 +260,7 @@ private void flipDataBuffers() {
private final Coordinator coordinator;
private final CellBuffers cellBuffers;
+ private final ErasureCodingPolicy ecPolicy;
private final RawErasureEncoder encoder;
private final List streamers;
private final DFSPacket[] currentPackets; // current Packet of each streamer
@@ -286,7 +287,7 @@ private void flipDataBuffers() {
LOG.debug("Creating DFSStripedOutputStream for " + src);
}
- final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
+ ecPolicy = stat.getErasureCodingPolicy();
final int numParityBlocks = ecPolicy.getNumParityUnits();
cellSize = ecPolicy.getCellSize();
numDataBlocks = ecPolicy.getNumDataUnits();
@@ -478,11 +479,6 @@ private void allocateNewBlock() throws IOException {
final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
currentBlockGroup, fileId, favoredNodes, getAddBlockFlags());
assert lb.isStriped();
- if (lb.getLocations().length < numDataBlocks) {
- throw new IOException("Failed to get " + numDataBlocks
- + " nodes from namenode: blockGroupSize= " + numAllBlocks
- + ", blocks.length= " + lb.getLocations().length);
- }
// assign the new block to the current block group
currentBlockGroup = lb.getBlock();
blockGroupIndex++;
@@ -494,11 +490,16 @@ private void allocateNewBlock() throws IOException {
StripedDataStreamer si = getStripedDataStreamer(i);
assert si.isHealthy();
if (blocks[i] == null) {
+ // allocBlock() should guarantee that all data blocks are successfully
+ // allocated.
+ assert i >= numDataBlocks;
// Set exception and close streamer as there is no block locations
// found for the parity block.
- LOG.warn("Failed to get block location for parity block, index=" + i);
+ LOG.warn("Cannot allocate parity block(index={}, policy={}). " +
+ "Not enough datanodes? Exclude nodes={}", i, ecPolicy.getName(),
+ excludedNodes);
si.getLastException().set(
- new IOException("Failed to get following block, i=" + i));
+ new IOException("Failed to get parity block, index=" + i));
si.getErrorState().setInternalError();
si.close(true);
} else {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 44caed60d60..f6331cf90d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -240,6 +240,13 @@ public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
return getFileBlockLocations(file.getPath(), start, len);
}
+ /**
+ * The returned BlockLocation will have different formats for replicated
+ * and erasure coded file.
+ * Please refer to
+ * {@link FileSystem#getFileBlockLocations(FileStatus, long, long)}
+ * for more details.
+ */
@Override
public BlockLocation[] getFileBlockLocations(Path p,
final long start, final long len) throws IOException {
@@ -1040,6 +1047,13 @@ public FileStatus[] next(final FileSystem fs, final Path p)
}.resolve(this, absF);
}
+ /**
+ * The BlockLocation of returned LocatedFileStatus will have different
+ * formats for replicated and erasure coded file.
+ * Please refer to
+ * {@link FileSystem#getFileBlockLocations(FileStatus, long, long)} for
+ * more details.
+ */
@Override
protected RemoteIterator listLocatedStatus(final Path p,
final PathFilter filter)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlocksStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlocksStats.java
deleted file mode 100644
index 7eb30ca7f49..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/BlocksStats.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Get statistics pertaining to blocks of type {@link BlockType#CONTIGUOUS}
- * in the filesystem.
- *
- * @see ClientProtocol#getBlocksStats()
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public final class BlocksStats {
- private final long lowRedundancyBlocksStat;
- private final long corruptBlocksStat;
- private final long missingBlocksStat;
- private final long missingReplicationOneBlocksStat;
- private final long bytesInFutureBlocksStat;
- private final long pendingDeletionBlocksStat;
-
- public BlocksStats(long lowRedundancyBlocksStat,
- long corruptBlocksStat, long missingBlocksStat,
- long missingReplicationOneBlocksStat, long bytesInFutureBlocksStat,
- long pendingDeletionBlocksStat) {
- this.lowRedundancyBlocksStat = lowRedundancyBlocksStat;
- this.corruptBlocksStat = corruptBlocksStat;
- this.missingBlocksStat = missingBlocksStat;
- this.missingReplicationOneBlocksStat = missingReplicationOneBlocksStat;
- this.bytesInFutureBlocksStat = bytesInFutureBlocksStat;
- this.pendingDeletionBlocksStat = pendingDeletionBlocksStat;
- }
-
- public long getLowRedundancyBlocksStat() {
- return lowRedundancyBlocksStat;
- }
-
- public long getCorruptBlocksStat() {
- return corruptBlocksStat;
- }
-
- public long getMissingReplicaBlocksStat() {
- return missingBlocksStat;
- }
-
- public long getMissingReplicationOneBlocksStat() {
- return missingReplicationOneBlocksStat;
- }
-
- public long getBytesInFutureBlocksStat() {
- return bytesInFutureBlocksStat;
- }
-
- public long getPendingDeletionBlocksStat() {
- return pendingDeletionBlocksStat;
- }
-
- @Override
- public String toString() {
- StringBuilder statsBuilder = new StringBuilder();
- statsBuilder.append("ReplicatedBlocksStats=[")
- .append("LowRedundancyBlocks=").append(getLowRedundancyBlocksStat())
- .append(", CorruptBlocks=").append(getCorruptBlocksStat())
- .append(", MissingReplicaBlocks=").append(getMissingReplicaBlocksStat())
- .append(", MissingReplicationOneBlocks=").append(
- getMissingReplicationOneBlocksStat())
- .append(", BytesInFutureBlocks=").append(getBytesInFutureBlocksStat())
- .append(", PendingDeletionBlocks=").append(
- getPendingDeletionBlocksStat())
- .append("]");
- return statsBuilder.toString();
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index b550467dd89..8d5503f9abb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -778,14 +778,14 @@ SnapshottableDirectoryStatus[] getSnapshottableDirListing()
* in the filesystem.
*/
@Idempotent
- BlocksStats getBlocksStats() throws IOException;
+ ReplicatedBlockStats getReplicatedBlockStats() throws IOException;
/**
* Get statistics pertaining to blocks of type {@link BlockType#STRIPED}
* in the filesystem.
*/
@Idempotent
- ECBlockGroupsStats getECBlockGroupsStats() throws IOException;
+ ECBlockGroupStats getECBlockGroupStats() throws IOException;
/**
* Get a report on the system's current datanodes.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
new file mode 100644
index 00000000000..9a8ad8cdb13
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Get statistics pertaining to blocks of type {@link BlockType#STRIPED}
+ * in the filesystem.
+ *
+ * @see ClientProtocol#getECBlockGroupStats()
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class ECBlockGroupStats {
+ private final long lowRedundancyBlockGroups;
+ private final long corruptBlockGroups;
+ private final long missingBlockGroups;
+ private final long bytesInFutureBlockGroups;
+ private final long pendingDeletionBlocks;
+
+ public ECBlockGroupStats(long lowRedundancyBlockGroups,
+ long corruptBlockGroups, long missingBlockGroups,
+ long bytesInFutureBlockGroups, long pendingDeletionBlocks) {
+ this.lowRedundancyBlockGroups = lowRedundancyBlockGroups;
+ this.corruptBlockGroups = corruptBlockGroups;
+ this.missingBlockGroups = missingBlockGroups;
+ this.bytesInFutureBlockGroups = bytesInFutureBlockGroups;
+ this.pendingDeletionBlocks = pendingDeletionBlocks;
+ }
+
+ public long getBytesInFutureBlockGroups() {
+ return bytesInFutureBlockGroups;
+ }
+
+ public long getCorruptBlockGroups() {
+ return corruptBlockGroups;
+ }
+
+ public long getLowRedundancyBlockGroups() {
+ return lowRedundancyBlockGroups;
+ }
+
+ public long getMissingBlockGroups() {
+ return missingBlockGroups;
+ }
+
+ public long getPendingDeletionBlocks() {
+ return pendingDeletionBlocks;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder statsBuilder = new StringBuilder();
+ statsBuilder.append("ECBlockGroupStats=[")
+ .append("LowRedundancyBlockGroups=").append(
+ getLowRedundancyBlockGroups())
+ .append(", CorruptBlockGroups=").append(getCorruptBlockGroups())
+ .append(", MissingBlockGroups=").append(getMissingBlockGroups())
+ .append(", BytesInFutureBlockGroups=").append(
+ getBytesInFutureBlockGroups())
+ .append(", PendingDeletionBlocks=").append(
+ getPendingDeletionBlocks())
+ .append("]");
+ return statsBuilder.toString();
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupsStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupsStats.java
deleted file mode 100644
index 80cf262d8af..00000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupsStats.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Get statistics pertaining to blocks of type {@link BlockType#STRIPED}
- * in the filesystem.
- *
- * @see ClientProtocol#getECBlockGroupsStats()
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public final class ECBlockGroupsStats {
- private final long lowRedundancyBlockGroupsStat;
- private final long corruptBlockGroupsStat;
- private final long missingBlockGroupsStat;
- private final long bytesInFutureBlockGroupsStat;
- private final long pendingDeletionBlockGroupsStat;
-
- public ECBlockGroupsStats(long lowRedundancyBlockGroupsStat, long
- corruptBlockGroupsStat, long missingBlockGroupsStat, long
- bytesInFutureBlockGroupsStat, long pendingDeletionBlockGroupsStat) {
- this.lowRedundancyBlockGroupsStat = lowRedundancyBlockGroupsStat;
- this.corruptBlockGroupsStat = corruptBlockGroupsStat;
- this.missingBlockGroupsStat = missingBlockGroupsStat;
- this.bytesInFutureBlockGroupsStat = bytesInFutureBlockGroupsStat;
- this.pendingDeletionBlockGroupsStat = pendingDeletionBlockGroupsStat;
- }
-
- public long getBytesInFutureBlockGroupsStat() {
- return bytesInFutureBlockGroupsStat;
- }
-
- public long getCorruptBlockGroupsStat() {
- return corruptBlockGroupsStat;
- }
-
- public long getLowRedundancyBlockGroupsStat() {
- return lowRedundancyBlockGroupsStat;
- }
-
- public long getMissingBlockGroupsStat() {
- return missingBlockGroupsStat;
- }
-
- public long getPendingDeletionBlockGroupsStat() {
- return pendingDeletionBlockGroupsStat;
- }
-
- @Override
- public String toString() {
- StringBuilder statsBuilder = new StringBuilder();
- statsBuilder.append("ECBlockGroupsStats=[")
- .append("LowRedundancyBlockGroups=").append(
- getLowRedundancyBlockGroupsStat())
- .append(", CorruptBlockGroups=").append(getCorruptBlockGroupsStat())
- .append(", MissingBlockGroups=").append(getMissingBlockGroupsStat())
- .append(", BytesInFutureBlockGroups=").append(
- getBytesInFutureBlockGroupsStat())
- .append(", PendingDeletionBlockGroups=").append(
- getPendingDeletionBlockGroupsStat())
- .append("]");
- return statsBuilder.toString();
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index b82a860cf4a..193aae25a25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@ -78,6 +78,17 @@ public LocatedBlocks getBlockLocations() {
return locations;
}
+ /**
+ * This function is used to transform the underlying HDFS LocatedBlocks to
+ * BlockLocations.
+ *
+ * The returned BlockLocation will have different formats for replicated
+ * and erasure coded file.
+ * Please refer to
+ * {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations
+ * (FileStatus, long, long)}
+ * for examples.
+ */
public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
Path path) {
makeQualified(defaultUri, path);
@@ -96,5 +107,4 @@ public int hashCode() {
// satisfy findbugs
return super.hashCode();
}
-
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java
new file mode 100644
index 00000000000..49aadedcdec
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Get statistics pertaining to blocks of type {@link BlockType#CONTIGUOUS}
+ * in the filesystem.
+ *
+ * @see ClientProtocol#getReplicatedBlockStats()
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class ReplicatedBlockStats {
+ private final long lowRedundancyBlocks;
+ private final long corruptBlocks;
+ private final long missingBlocks;
+ private final long missingReplicationOneBlocks;
+ private final long bytesInFutureBlocks;
+ private final long pendingDeletionBlocks;
+
+ public ReplicatedBlockStats(long lowRedundancyBlocks,
+ long corruptBlocks, long missingBlocks,
+ long missingReplicationOneBlocks, long bytesInFutureBlocks,
+ long pendingDeletionBlocks) {
+ this.lowRedundancyBlocks = lowRedundancyBlocks;
+ this.corruptBlocks = corruptBlocks;
+ this.missingBlocks = missingBlocks;
+ this.missingReplicationOneBlocks = missingReplicationOneBlocks;
+ this.bytesInFutureBlocks = bytesInFutureBlocks;
+ this.pendingDeletionBlocks = pendingDeletionBlocks;
+ }
+
+ public long getLowRedundancyBlocks() {
+ return lowRedundancyBlocks;
+ }
+
+ public long getCorruptBlocks() {
+ return corruptBlocks;
+ }
+
+ public long getMissingReplicaBlocks() {
+ return missingBlocks;
+ }
+
+ public long getMissingReplicationOneBlocks() {
+ return missingReplicationOneBlocks;
+ }
+
+ public long getBytesInFutureBlocks() {
+ return bytesInFutureBlocks;
+ }
+
+ public long getPendingDeletionBlocks() {
+ return pendingDeletionBlocks;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder statsBuilder = new StringBuilder();
+ statsBuilder.append("ReplicatedBlockStats=[")
+ .append("LowRedundancyBlocks=").append(getLowRedundancyBlocks())
+ .append(", CorruptBlocks=").append(getCorruptBlocks())
+ .append(", MissingReplicaBlocks=").append(getMissingReplicaBlocks())
+ .append(", MissingReplicationOneBlocks=").append(
+ getMissingReplicationOneBlocks())
+ .append(", BytesInFutureBlocks=").append(getBytesInFutureBlocks())
+ .append(", PendingDeletionBlocks=").append(
+ getPendingDeletionBlocks())
+ .append("]");
+ return statsBuilder.toString();
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index ec7d93f689c..209eee7b501 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -61,7 +61,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -73,7 +73,7 @@
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.BlocksStats;
+import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -120,8 +120,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
@@ -246,13 +246,13 @@ public class ClientNamenodeProtocolTranslatorPB implements
private final static GetFsStatusRequestProto VOID_GET_FSSTATUS_REQUEST =
GetFsStatusRequestProto.newBuilder().build();
- private final static GetFsBlocksStatsRequestProto
- VOID_GET_FS_REPLICABLOCKS_STATS_REQUEST =
- GetFsBlocksStatsRequestProto.newBuilder().build();
+ private final static GetFsReplicatedBlockStatsRequestProto
+ VOID_GET_FS_REPLICATED_BLOCK_STATS_REQUEST =
+ GetFsReplicatedBlockStatsRequestProto.newBuilder().build();
- private final static GetFsECBlockGroupsStatsRequestProto
- VOID_GET_FS_ECBLOCKGROUPS_STATS_REQUEST =
- GetFsECBlockGroupsStatsRequestProto.newBuilder().build();
+ private final static GetFsECBlockGroupStatsRequestProto
+ VOID_GET_FS_ECBLOCKGROUP_STATS_REQUEST =
+ GetFsECBlockGroupStatsRequestProto.newBuilder().build();
private final static RollEditsRequestProto VOID_ROLLEDITS_REQUEST =
RollEditsRequestProto.getDefaultInstance();
@@ -695,20 +695,20 @@ public long[] getStats() throws IOException {
}
@Override
- public BlocksStats getBlocksStats() throws IOException {
+ public ReplicatedBlockStats getReplicatedBlockStats() throws IOException {
try {
- return PBHelperClient.convert(rpcProxy.getFsBlocksStats(null,
- VOID_GET_FS_REPLICABLOCKS_STATS_REQUEST));
+ return PBHelperClient.convert(rpcProxy.getFsReplicatedBlockStats(null,
+ VOID_GET_FS_REPLICATED_BLOCK_STATS_REQUEST));
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
- public ECBlockGroupsStats getECBlockGroupsStats() throws IOException {
+ public ECBlockGroupStats getECBlockGroupStats() throws IOException {
try {
- return PBHelperClient.convert(rpcProxy.getFsECBlockGroupsStats(null,
- VOID_GET_FS_ECBLOCKGROUPS_STATS_REQUEST));
+ return PBHelperClient.convert(rpcProxy.getFsECBlockGroupStats(null,
+ VOID_GET_FS_ECBLOCKGROUP_STATS_REQUEST));
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 63a4271ea00..6dd65b1d230 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -76,7 +76,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
@@ -92,7 +92,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
-import org.apache.hadoop.hdfs.protocol.BlocksStats;
+import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
@@ -122,8 +122,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
@@ -1810,17 +1810,17 @@ public static long[] convert(GetFsStatsResponseProto res) {
return result;
}
- public static BlocksStats convert(
- GetFsBlocksStatsResponseProto res) {
- return new BlocksStats(res.getLowRedundancy(),
+ public static ReplicatedBlockStats convert(
+ GetFsReplicatedBlockStatsResponseProto res) {
+ return new ReplicatedBlockStats(res.getLowRedundancy(),
res.getCorruptBlocks(), res.getMissingBlocks(),
res.getMissingReplOneBlocks(), res.getBlocksInFuture(),
res.getPendingDeletionBlocks());
}
- public static ECBlockGroupsStats convert(
- GetFsECBlockGroupsStatsResponseProto res) {
- return new ECBlockGroupsStats(res.getLowRedundancy(),
+ public static ECBlockGroupStats convert(
+ GetFsECBlockGroupStatsResponseProto res) {
+ return new ECBlockGroupStats(res.getLowRedundancy(),
res.getCorruptBlocks(), res.getMissingBlocks(),
res.getBlocksInFuture(), res.getPendingDeletionBlocks());
}
@@ -2236,37 +2236,37 @@ public static GetFsStatsResponseProto convert(long[] fsStats) {
return result.build();
}
- public static GetFsBlocksStatsResponseProto convert(
- BlocksStats blocksStats) {
- GetFsBlocksStatsResponseProto.Builder result =
- GetFsBlocksStatsResponseProto.newBuilder();
+ public static GetFsReplicatedBlockStatsResponseProto convert(
+ ReplicatedBlockStats replicatedBlockStats) {
+ GetFsReplicatedBlockStatsResponseProto.Builder result =
+ GetFsReplicatedBlockStatsResponseProto.newBuilder();
result.setLowRedundancy(
- blocksStats.getLowRedundancyBlocksStat());
+ replicatedBlockStats.getLowRedundancyBlocks());
result.setCorruptBlocks(
- blocksStats.getCorruptBlocksStat());
+ replicatedBlockStats.getCorruptBlocks());
result.setMissingBlocks(
- blocksStats.getMissingReplicaBlocksStat());
+ replicatedBlockStats.getMissingReplicaBlocks());
result.setMissingReplOneBlocks(
- blocksStats.getMissingReplicationOneBlocksStat());
+ replicatedBlockStats.getMissingReplicationOneBlocks());
result.setBlocksInFuture(
- blocksStats.getBytesInFutureBlocksStat());
+ replicatedBlockStats.getBytesInFutureBlocks());
result.setPendingDeletionBlocks(
- blocksStats.getPendingDeletionBlocksStat());
+ replicatedBlockStats.getPendingDeletionBlocks());
return result.build();
}
- public static GetFsECBlockGroupsStatsResponseProto convert(
- ECBlockGroupsStats ecBlockGroupsStats) {
- GetFsECBlockGroupsStatsResponseProto.Builder result =
- GetFsECBlockGroupsStatsResponseProto.newBuilder();
+ public static GetFsECBlockGroupStatsResponseProto convert(
+ ECBlockGroupStats ecBlockGroupStats) {
+ GetFsECBlockGroupStatsResponseProto.Builder result =
+ GetFsECBlockGroupStatsResponseProto.newBuilder();
result.setLowRedundancy(
- ecBlockGroupsStats.getLowRedundancyBlockGroupsStat());
- result.setCorruptBlocks(ecBlockGroupsStats.getCorruptBlockGroupsStat());
- result.setMissingBlocks(ecBlockGroupsStats.getMissingBlockGroupsStat());
+ ecBlockGroupStats.getLowRedundancyBlockGroups());
+ result.setCorruptBlocks(ecBlockGroupStats.getCorruptBlockGroups());
+ result.setMissingBlocks(ecBlockGroupStats.getMissingBlockGroups());
result.setBlocksInFuture(
- ecBlockGroupsStats.getBytesInFutureBlockGroupsStat());
+ ecBlockGroupStats.getBytesInFutureBlockGroups());
result.setPendingDeletionBlocks(
- ecBlockGroupsStats.getPendingDeletionBlockGroupsStat());
+ ecBlockGroupStats.getPendingDeletionBlocks());
return result.build();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 7ec5fe5c15f..dcd73bfc7eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -22,7 +22,6 @@
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.ContentSummary.Builder;
import org.apache.hadoop.fs.FileChecksum;
@@ -645,56 +644,4 @@ private static StorageType[] toStorageTypes(List> list) {
}
}
- static BlockLocation[] toBlockLocationArray(Map, ?> json)
- throws IOException{
- final Map, ?> rootmap =
- (Map, ?>)json.get(BlockLocation.class.getSimpleName() + "s");
- final List> array = JsonUtilClient.getList(rootmap,
- BlockLocation.class.getSimpleName());
-
- Preconditions.checkNotNull(array);
- final BlockLocation[] locations = new BlockLocation[array.size()];
- int i = 0;
- for (Object object : array) {
- final Map, ?> m = (Map, ?>) object;
- locations[i++] = JsonUtilClient.toBlockLocation(m);
- }
- return locations;
- }
-
- /** Convert a Json map to BlockLocation. **/
- static BlockLocation toBlockLocation(Map, ?> m)
- throws IOException{
- if(m == null) {
- return null;
- }
-
- long length = ((Number) m.get("length")).longValue();
- long offset = ((Number) m.get("offset")).longValue();
- boolean corrupt = Boolean.
- getBoolean(m.get("corrupt").toString());
- String[] storageIds = toStringArray(getList(m, "storageIds"));
- String[] cachedHosts = toStringArray(getList(m, "cachedHosts"));
- String[] hosts = toStringArray(getList(m, "hosts"));
- String[] names = toStringArray(getList(m, "names"));
- String[] topologyPaths = toStringArray(getList(m, "topologyPaths"));
- StorageType[] storageTypes = toStorageTypeArray(
- getList(m, "storageTypes"));
- return new BlockLocation(names, hosts, cachedHosts,
- topologyPaths, storageIds, storageTypes,
- offset, length, corrupt);
- }
-
- static String[] toStringArray(List> list) {
- if (list == null) {
- return null;
- } else {
- final String[] array = new String[list.size()];
- int i = 0;
- for (Object object : list) {
- array[i++] = object.toString();
- }
- return array;
- }
- }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 1159e50de7f..ee8d5c1c325 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1616,68 +1616,14 @@ public BlockLocation[] getFileBlockLocations(final Path p,
final long offset, final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
- BlockLocation[] locations = null;
- try {
- locations = getFileBlockLocations(
- GetOpParam.Op.GETFILEBLOCKLOCATIONS,
- p, offset, length);
- } catch (RemoteException e) {
- // See the error message from ExceptionHandle
- if(e.getMessage() != null &&
- e.getMessage().contains(
- "Invalid value for webhdfs parameter") &&
- e.getMessage().contains(
- GetOpParam.Op.GETFILEBLOCKLOCATIONS.toString())) {
- // Old webhdfs server doesn't support GETFILEBLOCKLOCATIONS
- // operation, fall back to query again using old API
- // GET_BLOCK_LOCATIONS.
- LOG.info("Invalid webhdfs operation parameter "
- + GetOpParam.Op.GETFILEBLOCKLOCATIONS + ". Fallback to use "
- + GetOpParam.Op.GET_BLOCK_LOCATIONS + " instead.");
- locations = getFileBlockLocations(
- GetOpParam.Op.GET_BLOCK_LOCATIONS,
- p, offset, length);
- }
- }
- return locations;
- }
- /**
- * Get file block locations implementation. Provide a operation
- * parameter to determine how to get block locations from a webhdfs
- * server. Older server only supports GET_BLOCK_LOCATIONS but
- * not GETFILEBLOCKLOCATIONS.
- *
- * @param path path to the file
- * @param offset start offset in the given file
- * @param length of the file to get locations for
- * @param operation
- * Valid operation is either
- * {@link org.apache.hadoop.hdfs.web.resources.GetOpParam.Op
- * #GET_BLOCK_LOCATIONS} or
- * {@link org.apache.hadoop.hdfs.web.resources.GetOpParam.Op
- * #GET_BLOCK_LOCATIONS}
- * @throws IOException
- * Http connection error, decoding error or given
- * operation is not valid
- */
- @VisibleForTesting
- protected BlockLocation[] getFileBlockLocations(
- GetOpParam.Op operation, final Path path,
- final long offset, final long length) throws IOException {
- return new FsPathResponseRunner(operation, path,
+ final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
+ return new FsPathResponseRunner(op, p,
new OffsetParam(offset), new LengthParam(length)) {
@Override
BlockLocation[] decodeResponse(Map,?> json) throws IOException {
- switch(operation) {
- case GETFILEBLOCKLOCATIONS:
- return JsonUtilClient.toBlockLocationArray(json);
- case GET_BLOCK_LOCATIONS:
- return DFSUtilClient.locatedBlocks2Locations(
- JsonUtilClient.toLocatedBlocks(json));
- default :
- throw new IOException("Unknown operation " + operation.name());
- }
+ return DFSUtilClient.locatedBlocks2Locations(
+ JsonUtilClient.toLocatedBlocks(json));
}
}.run();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index d32af330ae7..6c2c674ad15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -33,18 +33,8 @@ public enum Op implements HttpOpParam.Op {
GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
- /**
- * GET_BLOCK_LOCATIONS is a private/stable API op. It returns a
- * {@link org.apache.hadoop.hdfs.protocol.LocatedBlocks}
- * json object.
- */
+ /** GET_BLOCK_LOCATIONS is a private unstable op. */
GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
- /**
- * GETFILEBLOCKLOCATIONS is the public op that complies with
- * {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations}
- * interface.
- */
- GETFILEBLOCKLOCATIONS(false, HttpURLConnection.HTTP_OK),
GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
GETXATTRS(false, HttpURLConnection.HTTP_OK),
GETTRASHROOT(false, HttpURLConnection.HTTP_OK),
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 3f108fa718b..6db6ad0804c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -327,10 +327,10 @@ message GetFsStatsResponseProto {
optional uint64 pending_deletion_blocks = 9;
}
-message GetFsBlocksStatsRequestProto { // no input paramters
+message GetFsReplicatedBlockStatsRequestProto { // no input paramters
}
-message GetFsBlocksStatsResponseProto {
+message GetFsReplicatedBlockStatsResponseProto {
required uint64 low_redundancy = 1;
required uint64 corrupt_blocks = 2;
required uint64 missing_blocks = 3;
@@ -339,10 +339,10 @@ message GetFsBlocksStatsResponseProto {
required uint64 pending_deletion_blocks = 6;
}
-message GetFsECBlockGroupsStatsRequestProto { // no input paramters
+message GetFsECBlockGroupStatsRequestProto { // no input paramters
}
-message GetFsECBlockGroupsStatsResponseProto {
+message GetFsECBlockGroupStatsResponseProto {
required uint64 low_redundancy = 1;
required uint64 corrupt_blocks = 2;
required uint64 missing_blocks = 3;
@@ -831,10 +831,10 @@ service ClientNamenodeProtocol {
rpc recoverLease(RecoverLeaseRequestProto)
returns(RecoverLeaseResponseProto);
rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
- rpc getFsBlocksStats(GetFsBlocksStatsRequestProto)
- returns (GetFsBlocksStatsResponseProto);
- rpc getFsECBlockGroupsStats(GetFsECBlockGroupsStatsRequestProto)
- returns (GetFsECBlockGroupsStatsResponseProto);
+ rpc getFsReplicatedBlockStats(GetFsReplicatedBlockStatsRequestProto)
+ returns (GetFsReplicatedBlockStatsResponseProto);
+ rpc getFsECBlockGroupStats(GetFsECBlockGroupStatsRequestProto)
+ returns (GetFsECBlockGroupStatsResponseProto);
rpc getDatanodeReport(GetDatanodeReportRequestProto)
returns(GetDatanodeReportResponseProto);
rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 1059a02f127..b5880e95bf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -23,12 +23,9 @@
import java.util.EnumSet;
import java.util.List;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.type.MapType;
import com.google.common.base.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -122,8 +119,6 @@ public class HttpFSFileSystem extends FileSystem
public static final String NEW_LENGTH_PARAM = "newlength";
public static final String START_AFTER_PARAM = "startAfter";
public static final String POLICY_NAME_PARAM = "storagepolicy";
- public static final String OFFSET_PARAM = "offset";
- public static final String LENGTH_PARAM = "length";
public static final String SNAPSHOT_NAME_PARAM = "snapshotname";
public static final String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname";
@@ -210,7 +205,6 @@ public static FILE_TYPE getType(FileStatus fileStatus) {
public static final String STORAGE_POLICIES_JSON = "BlockStoragePolicies";
public static final String STORAGE_POLICY_JSON = "BlockStoragePolicy";
- public static final String BLOCK_LOCATIONS_JSON = "BlockLocations";
public static final int HTTP_TEMPORARY_REDIRECT = 307;
@@ -1359,42 +1353,6 @@ public BlockStoragePolicy getStoragePolicy(Path src) throws IOException {
return createStoragePolicy((JSONObject) json.get(STORAGE_POLICY_JSON));
}
- @Override
- public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
- long len) throws IOException {
- Map params = new HashMap();
- params.put(OP_PARAM, Operation.GETFILEBLOCKLOCATIONS.toString());
- params.put(OFFSET_PARAM, Long.toString(start));
- params.put(LENGTH_PARAM, Long.toString(len));
- HttpURLConnection conn =
- getConnection(Operation.GETFILEBLOCKLOCATIONS.getMethod(), params,
- file.getPath(), true);
- HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
- JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
- return toBlockLocations(json);
- }
-
- private BlockLocation[] toBlockLocations(JSONObject json)
- throws IOException {
- ObjectMapper mapper = new ObjectMapper();
- MapType subType = mapper.getTypeFactory().constructMapType(
- Map.class,
- String.class,
- BlockLocation[].class);
- MapType rootType = mapper.getTypeFactory().constructMapType(
- Map.class,
- mapper.constructType(String.class),
- mapper.constructType(subType));
-
- Map> jsonMap = mapper
- .readValue(json.toJSONString(), rootType);
- Map locationMap = jsonMap
- .get(BLOCK_LOCATIONS_JSON);
- BlockLocation[] locationArray = locationMap.get(
- BlockLocation.class.getSimpleName());
- return locationArray;
- }
-
private BlockStoragePolicy createStoragePolicy(JSONObject policyJson)
throws IOException {
byte id = ((Number) policyJson.get("id")).byteValue();
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 4b5918abf50..a08bc54b0b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
@@ -36,7 +35,6 @@
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.util.StringUtils;
@@ -1458,41 +1456,6 @@ public Void execute(FileSystem fs) throws IOException {
}
}
- /**
- * Executor that performs a getFileBlockLocations FileSystemAccess
- * file system operation.
- */
- @InterfaceAudience.Private
- @SuppressWarnings("rawtypes")
- public static class FSFileBlockLocations implements
- FileSystemAccess.FileSystemExecutor
-
- dfs.namenode.ec.policies.enabled
-
- Comma-delimited list of enabled erasure coding policies.
- The NameNode will enforce this when setting an erasure coding policy
- on a directory. By default, none of the built-in erasure coding
- policies are enabled.
-
-
-
dfs.namenode.ec.system.default.policy
RS-6-3-1024k
@@ -3049,15 +3039,6 @@
-
- dfs.datanode.ec.reconstruction.stripedread.threads
- 20
-
- Number of threads used by the Datanode to read striped block
- during background reconstruction work.
-
-
-
dfs.datanode.ec.reconstruction.stripedread.buffer.size
65536
@@ -3066,7 +3047,7 @@
- dfs.datanode.ec.reconstruction.stripedblock.threads.size
+ dfs.datanode.ec.reconstruction.threads
8
Number of threads used by the Datanode for background
@@ -4160,11 +4141,11 @@
- dfs.namenode.authorization.provider.bypass.users
+ dfs.namenode.inode.attributes.provider.bypass.users
A list of user principals (in secure cluster) or user names (in insecure
- cluster) for whom the external attribute provider will be bypassed for all
+ cluster) for whom the external attributes provider will be bypassed for all
operations. This means file attributes stored in HDFS instead of the
external provider will be used for permission checking and be returned when
requested.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index b785274f4df..de62622e3d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -331,12 +331,12 @@
'columns': [
{ 'orderDataType': 'ng-value', 'searchable': true },
{ 'orderDataType': 'ng-value', 'searchable': true },
- { 'orderDataType': 'ng-value', 'type': 'numeric' },
- { 'orderDataType': 'ng-value', 'type': 'numeric' },
- { 'orderDataType': 'ng-value', 'type': 'numeric' },
- { 'orderData': 3, 'type': 'numeric' },
- { 'orderDataType': 'ng-value', 'type': 'numeric'},
- { 'orderData': 5 }
+ { 'orderDataType': 'ng-value', 'type': 'num' },
+ { 'orderDataType': 'ng-value', 'type': 'num' },
+ { 'orderDataType': 'ng-value', 'type': 'num' },
+ { 'type': 'num' },
+ { 'orderDataType': 'ng-value', 'type': 'num'},
+ { 'type': 'string' }
]});
renderHistogram(data);
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Federation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Federation.md
index ddf474ac492..be36cc2de50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Federation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Federation.md
@@ -113,7 +113,7 @@ Here is an example configuration with two Namenodes:
nn-host1:http-port
- dfs.namenode.secondaryhttp-address.ns1
+ dfs.namenode.secondary.http-address.ns1
snn-host1:http-port
@@ -125,7 +125,7 @@ Here is an example configuration with two Namenodes:
nn-host2:http-port
- dfs.namenode.secondaryhttp-address.ns2
+ dfs.namenode.secondary.http-address.ns2
snn-host2:http-port
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 5bd7c6d29ae..c8ef6c7e6c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -69,7 +69,7 @@ Architecture
`REPLICATION` is a special policy. It can only be set on directory, to force the directory to adopt 3x replication scheme, instead of inheriting its ancestor's erasure coding policy. This policy makes it possible to interleave 3x replication scheme directory with erasure coding directory.
- `REPLICATION` policy is always enabled. For other built-in policies, unless they are configured in `dfs.namenode.ec.policies.enabled` property, otherwise they are disabled by default.
+ `REPLICATION` policy is always enabled. For other built-in policies, they are disabled by default.
Similar to HDFS storage policies, erasure coding policies are set on a directory. When a file is created, it inherits the EC policy of its nearest ancestor directory.
@@ -110,11 +110,8 @@ Deployment
### Configuration keys
- The set of enabled erasure coding policies can be configured on the NameNode via `dfs.namenode.ec.policies.enabled` configuration. This restricts
- what EC policies can be set by clients. It does not affect the behavior of already set file or directory-level EC policies.
-
- By default, all built-in erasure coding policies are disabled. Typically, the cluster administrator will enable set of policies by including them
- in the `dfs.namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance,
+ By default, all built-in erasure coding policies are disabled, except the one defined in `dfs.namenode.ec.system.default.policy` which is enabled by default.
+ The cluster administrator can enable set of policies through `hdfs ec [-enablePolicy -policy ]` command based on the size of the cluster and the desired fault-tolerance properties. For instance,
for a cluster with 9 racks, a policy like `RS-10-4-1024k` will not preserve rack-level fault-tolerance, and `RS-6-3-1024k` or `RS-3-2-1024k` might
be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-1024k` would still be appropriate as long as
there are at least 14 DataNodes in the cluster.
@@ -137,9 +134,8 @@ Deployment
Erasure coding background recovery work on the DataNodes can also be tuned via the following configuration parameters:
1. `dfs.datanode.ec.reconstruction.stripedread.timeout.millis` - Timeout for striped reads. Default value is 5000 ms.
- 1. `dfs.datanode.ec.reconstruction.stripedread.threads` - Number of concurrent reader threads. Default value is 20 threads.
1. `dfs.datanode.ec.reconstruction.stripedread.buffer.size` - Buffer size for reader service. Default value is 64KB.
- 1. `dfs.datanode.ec.reconstruction.stripedblock.threads.size` - Number of threads used by the Datanode for background reconstruction work. Default value is 8 threads.
+ 1. `dfs.datanode.ec.reconstruction.threads` - Number of threads used by the Datanode for background reconstruction work. Default value is 8 threads.
### Enable Intel ISA-L
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md
index 4ab07ce2143..ce798b72186 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md
@@ -108,6 +108,10 @@ The output result of this processor should be like the following output:
...some output omitted...
OP_APPEND ( 47): 1
OP_SET_QUOTA_BY_STORAGETYPE ( 48): 1
+ OP_ADD_ERASURE_CODING_POLICY ( 49): 0
+ OP_ENABLE_ERASURE_CODING_POLICY ( 50): 1
+ OP_DISABLE_ERASURE_CODING_POLICY ( 51): 0
+ OP_REMOVE_ERASURE_CODING_POLICY ( 52): 0
OP_INVALID ( -1): 0
The output is formatted as a colon separated two column table: OpCode and OpCodeCount. Each OpCode corresponding to the specific operation(s) in NameNode.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 03834ebf07d..84e8a576b00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -50,7 +50,6 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
* [`CHECKACCESS`](#Check_access) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).access)
* [`GETALLSTORAGEPOLICY`](#Get_all_Storage_Policies) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getAllStoragePolicies)
* [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy)
- * [`GETFILEBLOCKLOCATIONS`](#Get_File_Block_Locations) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileBlockLocations)
* HTTP PUT
* [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create)
* [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs)
@@ -1069,7 +1068,7 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).unsetStor
{
"BlockStoragePolicy": {
"copyOnCreateFile": false,
- "creationFallbacks": [],
+ "creationFallbacks": [],
"id":7,
"name":"HOT",
"replicationFallbacks":["ARCHIVE"],
@@ -1079,51 +1078,6 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).unsetStor
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy
-### Get File Block Locations
-
-* Submit a HTTP GET request.
-
- curl -i "http://:/webhdfs/v1/?op=GETFILEBLOCKLOCATIONS
-
- The client receives a response with a [`BlockLocations` JSON Object](#Block_Locations_JSON_Schema):
-
- HTTP/1.1 200 OK
- Content-Type: application/json
- Transfer-Encoding: chunked
-
- {
- "BlockLocations" :
- {
- "BlockLocation":
- [
- {
- "cachedHosts" : [],
- "corrupt" : false,
- "hosts" : ["host"],
- "length" : 134217728, // length of this block
- "names" : ["host:ip"],
- "offset" : 0, // offset of the block in the file
- "storageIds" : ["storageid"],
- "storageTypes" : ["DISK"], // enum {RAM_DISK, SSD, DISK, ARCHIVE}
- "topologyPaths" : ["/default-rack/hostname:ip"]
- }, {
- "cachedHosts" : [],
- "corrupt" : false,
- "hosts" : ["host"],
- "length" : 62599364,
- "names" : ["host:ip"],
- "offset" : 134217728,
- "storageIds" : ["storageid"],
- "storageTypes" : ["DISK"],
- "topologyPaths" : ["/default-rack/hostname:ip"]
- },
- ...
- ]
- }
- }
-
-See also: [`offset`](#Offset), [`length`](#Length), [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileBlockLocations
-
Extended Attributes(XAttrs) Operations
--------------------------------------
@@ -2082,146 +2036,6 @@ A `BlockStoragePolicies` JSON object represents an array of `BlockStoragePolicy`
}
```
-#### BlockLocations JSON Schema
-
-A `BlockLocations` JSON object represents an array of `BlockLocation` JSON objects.
-
-```json
-{
- "name" : "BlockLocations",
- "properties":
- {
- "BlockLocations":
- {
- "type" : "object",
- "properties":
- {
- "BlockLocation":
- {
- "description": "An array of BlockLocation",
- "type" : "array",
- "items" : blockLocationProperties //See BlockLocation Properties
- }
- }
- }
- }
-}
-```
-
-See also [`BlockLocation` Properties](#BlockLocation_Properties), [`GETFILEBLOCKLOCATIONS`](#Get_File_Block_Locations), [BlockLocation](../../api/org/apache/hadoop/fs/BlockLocation.html)
-
-### BlockLocation JSON Schema
-
-```json
-{
- "name" : "BlockLocation",
- "properties":
- {
- "BlockLocation": blockLocationProperties //See BlockLocation Properties
- }
-}
-```
-
-See also [`BlockLocation` Properties](#BlockLocation_Properties), [`GETFILEBLOCKLOCATIONS`](#Get_File_Block_Locations), [BlockLocation](../../api/org/apache/hadoop/fs/BlockLocation.html)
-
-#### BlockLocation Properties
-
-JavaScript syntax is used to define `blockLocationProperties` so that it can be referred in both `BlockLocation` and `BlockLocations` JSON schemas.
-
-```javascript
-var blockLocationProperties =
-{
- "type" : "object",
- "properties":
- {
- "cachedHosts":
- {
- "description": "Datanode hostnames with a cached replica",
- "type" : "array",
- "required" : "true",
- "items" :
- {
- "description": "A datanode hostname",
- "type" : "string"
- }
- },
- "corrupt":
- {
- "description": "True if the block is corrupted",
- "type" : "boolean",
- "required" : "true"
- },
- "hosts":
- {
- "description": "Datanode hostnames store the block",
- "type" : "array",
- "required" : "true",
- "items" :
- {
- "description": "A datanode hostname",
- "type" : "string"
- }
- },
- "length":
- {
- "description": "Length of the block",
- "type" : "integer",
- "required" : "true"
- },
- "names":
- {
- "description": "Datanode IP:xferPort for accessing the block",
- "type" : "array",
- "required" : "true",
- "items" :
- {
- "description": "DatanodeIP:xferPort",
- "type" : "string"
- }
- },
- "offset":
- {
- "description": "Offset of the block in the file",
- "type" : "integer",
- "required" : "true"
- },
- "storageIds":
- {
- "description": "Storage ID of each replica",
- "type" : "array",
- "required" : "true",
- "items" :
- {
- "description": "Storage ID",
- "type" : "string"
- }
- },
- "storageTypes":
- {
- "description": "Storage type of each replica",
- "type" : "array",
- "required" : "true",
- "items" :
- {
- "description": "Storage type",
- "enum" : ["RAM_DISK", "SSD", "DISK", "ARCHIVE"]
- }
- },
- "topologyPaths":
- {
- "description": "Datanode addresses in network topology",
- "type" : "array",
- "required" : "true",
- "items" :
- {
- "description": "/rack/host:ip",
- "type" : "string"
- }
- }
- }
-};
-```
-
HTTP Query Parameter Dictionary
-------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
index 60f4f561a12..566755db996 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CLICommandErasureCodingCli;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Before;
@@ -34,7 +34,7 @@
public class TestErasureCodingCLI extends CLITestHelper {
private final int NUM_OF_DATANODES = 3;
private MiniDFSCluster dfsCluster = null;
- private FileSystem fs = null;
+ private DistributedFileSystem fs = null;
private String namenode = null;
@Rule
@@ -44,10 +44,6 @@ public class TestErasureCodingCLI extends CLITestHelper {
@Override
public void setUp() throws Exception {
super.setUp();
-
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- "RS-6-3-1024k,RS-3-2-1024k,XOR-2-1-1024k");
-
dfsCluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_OF_DATANODES).build();
dfsCluster.waitClusterUp();
@@ -56,6 +52,9 @@ public void setUp() throws Exception {
username = System.getProperty("user.name");
fs = dfsCluster.getFileSystem();
+ fs.enableErasureCodingPolicy("RS-6-3-1024k");
+ fs.enableErasureCodingPolicy("RS-3-2-1024k");
+ fs.enableErasureCodingPolicy("XOR-2-1-1024k");
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
index e25a7548852..b112e306c82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
@@ -27,12 +27,18 @@
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.IOUtils;
import org.junit.Assert;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.mockito.Mockito;
public class TestUnbuffer {
private static final Log LOG =
LogFactory.getLog(TestUnbuffer.class.getName());
+ @Rule
+ public ExpectedException exception = ExpectedException.none();
+
/**
* Test that calling Unbuffer closes sockets.
*/
@@ -123,4 +129,19 @@ public void testOpenManyFilesViaTcp() throws Exception {
}
}
}
+
+ /**
+ * Test unbuffer method which throws an Exception with class name included.
+ */
+ @Test
+ public void testUnbufferException() {
+ FSInputStream in = Mockito.mock(FSInputStream.class);
+ FSDataInputStream fs = new FSDataInputStream(in);
+
+ exception.expect(UnsupportedOperationException.class);
+ exception.expectMessage("this stream " + in.getClass().getName()
+ + " does not support unbuffering");
+
+ fs.unbuffer();
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index f3572ff1b7c..c6fe1a23b71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -71,7 +71,6 @@
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.Collectors;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
@@ -109,6 +108,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
+import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
@@ -117,8 +117,8 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
-import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
-import org.apache.hadoop.hdfs.protocol.BlocksStats;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
+import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -164,6 +164,8 @@
import org.apache.hadoop.hdfs.tools.JMXGet;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.unix.DomainSocket;
@@ -289,12 +291,13 @@ public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) {
Whitebox.setInternalState(fsn.getFSDirectory(), "editLog", newLog);
}
- public static void enableAllECPolicies(Configuration conf) {
- // Enable all the available EC policies
- String policies = SystemErasureCodingPolicies.getPolicies().stream()
- .map(ErasureCodingPolicy::getName)
- .collect(Collectors.joining(","));
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policies);
+ public static void enableAllECPolicies(DistributedFileSystem fs)
+ throws IOException {
+ // Enable all available EC policies
+ for (ErasureCodingPolicy ecPolicy :
+ SystemErasureCodingPolicies.getPolicies()) {
+ fs.enableErasureCodingPolicy(ecPolicy.getName());
+ }
}
/** class MyFile contains enough information to recreate the contents of
@@ -1464,6 +1467,33 @@ public static void runOperations(MiniDFSCluster cluster,
new byte[]{0x37, 0x38, 0x39});
// OP_REMOVE_XATTR
filesystem.removeXAttr(pathConcatTarget, "user.a2");
+
+ // OP_ADD_ERASURE_CODING_POLICY
+ ErasureCodingPolicy newPolicy1 =
+ new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA, 8 * 1024);
+ ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[] {newPolicy1};
+ AddECPolicyResponse[] responses =
+ filesystem.addErasureCodingPolicies(policyArray);
+ newPolicy1 = responses[0].getPolicy();
+
+ // OP_ADD_ERASURE_CODING_POLICY - policy with extra options
+ Map extraOptions = new HashMap();
+ extraOptions.put("dummyKey", "dummyValue");
+ ECSchema schema =
+ new ECSchema(ErasureCodeConstants.RS_CODEC_NAME, 6, 10, extraOptions);
+ ErasureCodingPolicy newPolicy2 = new ErasureCodingPolicy(schema, 4 * 1024);
+ policyArray = new ErasureCodingPolicy[] {newPolicy2};
+ responses = filesystem.addErasureCodingPolicies(policyArray);
+ newPolicy2 = responses[0].getPolicy();
+ // OP_ENABLE_ERASURE_CODING_POLICY
+ filesystem.enableErasureCodingPolicy(newPolicy1.getName());
+ filesystem.enableErasureCodingPolicy(newPolicy2.getName());
+ // OP_DISABLE_ERASURE_CODING_POLICY
+ filesystem.disableErasureCodingPolicy(newPolicy1.getName());
+ filesystem.disableErasureCodingPolicy(newPolicy2.getName());
+ // OP_REMOVE_ERASURE_CODING_POLICY
+ filesystem.removeErasureCodingPolicy(newPolicy1.getName());
+ filesystem.removeErasureCodingPolicy(newPolicy2.getName());
}
public static void abortStream(DFSOutputStream out) throws IOException {
@@ -1657,8 +1687,8 @@ public static boolean verifyFileReplicasOnStorageType(FileSystem fs,
/**
* Verify the aggregated {@link ClientProtocol#getStats()} block counts equal
- * the sum of {@link ClientProtocol#getBlocksStats()} and
- * {@link ClientProtocol#getECBlockGroupsStats()}.
+ * the sum of {@link ClientProtocol#getReplicatedBlockStats()} and
+ * {@link ClientProtocol#getECBlockGroupStats()}.
* @throws Exception
*/
public static void verifyClientStats(Configuration conf,
@@ -1667,36 +1697,36 @@ public static void verifyClientStats(Configuration conf,
cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
long[] aggregatedStats = cluster.getNameNode().getRpcServer().getStats();
- BlocksStats blocksStats =
- client.getBlocksStats();
- ECBlockGroupsStats ecBlockGroupsStats = client.getECBlockGroupsStats();
+ ReplicatedBlockStats replicatedBlockStats =
+ client.getReplicatedBlockStats();
+ ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupStats();
assertEquals("Under replicated stats not matching!",
aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
assertEquals("Low redundancy stats not matching!",
aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
- blocksStats.getLowRedundancyBlocksStat() +
- ecBlockGroupsStats.getLowRedundancyBlockGroupsStat());
+ replicatedBlockStats.getLowRedundancyBlocks() +
+ ecBlockGroupStats.getLowRedundancyBlockGroups());
assertEquals("Corrupt blocks stats not matching!",
aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
- blocksStats.getCorruptBlocksStat() +
- ecBlockGroupsStats.getCorruptBlockGroupsStat());
+ replicatedBlockStats.getCorruptBlocks() +
+ ecBlockGroupStats.getCorruptBlockGroups());
assertEquals("Missing blocks stats not matching!",
aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
- blocksStats.getMissingReplicaBlocksStat() +
- ecBlockGroupsStats.getMissingBlockGroupsStat());
+ replicatedBlockStats.getMissingReplicaBlocks() +
+ ecBlockGroupStats.getMissingBlockGroups());
assertEquals("Missing blocks with replication factor one not matching!",
aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
- blocksStats.getMissingReplicationOneBlocksStat());
+ replicatedBlockStats.getMissingReplicationOneBlocks());
assertEquals("Bytes in future blocks stats not matching!",
aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
- blocksStats.getBytesInFutureBlocksStat() +
- ecBlockGroupsStats.getBytesInFutureBlockGroupsStat());
+ replicatedBlockStats.getBytesInFutureBlocks() +
+ ecBlockGroupStats.getBytesInFutureBlockGroups());
assertEquals("Pending deletion blocks stats not matching!",
aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
- blocksStats.getPendingDeletionBlocksStat() +
- ecBlockGroupsStats.getPendingDeletionBlockGroupsStat());
+ replicatedBlockStats.getPendingDeletionBlocks() +
+ ecBlockGroupStats.getPendingDeletionBlocks());
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
index 4202969ee43..7057010663b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
@@ -81,11 +81,11 @@ public static MiniDFSCluster initializeCluster() throws IOException {
0);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
MiniDFSCluster myCluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATANODES)
.build();
+ myCluster.getFileSystem().enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
myCluster.getFileSystem().getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
return myCluster;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
index 97f34f29417..1b462a9a8ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
@@ -72,7 +72,7 @@ private static long checkTxid(EventBatch batch, long prevTxid){
*/
@Test
public void testOpcodeCount() {
- Assert.assertEquals(50, FSEditLogOpCodes.values().length);
+ Assert.assertEquals(54, FSEditLogOpCodes.values().length);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index 4f67a0a1a29..f94b7abeee2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -93,8 +93,6 @@ public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- getEcPolicy().getName());
if (ErasureCodeNative.isNativeCodeLoaded()) {
conf.set(
CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
@@ -108,6 +106,7 @@ public void setup() throws IOException {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(getEcPolicy().getName());
fs.mkdirs(dirPath);
fs.getClient()
.setErasureCodingPolicy(dirPath.toString(), ecPolicy.getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index c0cfea22007..3714542411d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -94,11 +94,10 @@ public void setup() throws IOException {
CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
NativeRSRawErasureCoderFactory.CODER_NAME);
}
- DFSTestUtil.enableAllECPolicies(conf);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
- cluster.getFileSystem().getClient().setErasureCodingPolicy("/", ecPolicy
- .getName());
fs = cluster.getFileSystem();
+ DFSTestUtil.enableAllECPolicies(fs);
+ fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
}
@After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index f63a3538150..57da4399491 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
@@ -216,10 +217,10 @@ private void setup(Configuration conf) throws IOException {
CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
NativeRSRawErasureCoderFactory.CODER_NAME);
}
- DFSTestUtil.enableAllECPolicies(conf);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
+ DFSTestUtil.enableAllECPolicies(dfs);
dfs.mkdirs(dir);
dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
}
@@ -282,7 +283,7 @@ public void testBlockTokenExpired() throws Exception {
@Test(timeout = 90000)
public void testAddBlockWhenNoSufficientDataBlockNumOfNodes()
- throws IOException {
+ throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
try {
@@ -301,20 +302,18 @@ public void testAddBlockWhenNoSufficientDataBlockNumOfNodes()
DatanodeReportType.LIVE);
assertEquals("Mismatches number of live Dns ", numDatanodes, info.length);
final Path dirFile = new Path(dir, "ecfile");
- FSDataOutputStream out;
- try {
- out = dfs.create(dirFile, true);
- out.write("something".getBytes());
- out.flush();
- out.close();
- Assert.fail("Failed to validate available dns against blkGroupSize");
- } catch (IOException ioe) {
- // expected
- GenericTestUtils.assertExceptionContains("Failed to get " +
- dataBlocks + " nodes from namenode: blockGroupSize= " +
- (dataBlocks + parityBlocks) + ", blocks.length= " +
- numDatanodes, ioe);
- }
+ LambdaTestUtils.intercept(
+ IOException.class,
+ "File " + dirFile + " could only be written to " +
+ numDatanodes + " of the " + dataBlocks + " required nodes for " +
+ getEcPolicy().getName(),
+ () -> {
+ try (FSDataOutputStream out = dfs.create(dirFile, true)) {
+ out.write("something".getBytes());
+ out.flush();
+ }
+ return 0;
+ });
} finally {
tearDown();
}
@@ -493,8 +492,8 @@ private void runTest(final int length, final int[] killPos,
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
- // set a short token lifetime (1 second)
- SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
+ // set a short token lifetime (6 second)
+ SecurityTestUtil.setBlockTokenLifetime(sm, 6000L);
}
final AtomicInteger pos = new AtomicInteger();
@@ -631,6 +630,8 @@ int getBase() {
private void run(int offset) {
int base = getBase();
+ // TODO: Fix and re-enable these flaky tests. See HDFS-12417.
+ assumeTrue("Test has been temporarily disabled. See HDFS-12417.", false);
assumeTrue(base >= 0);
final int i = offset + base;
final Integer length = getLength(i);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index bb394feb7cd..7bd85b4989c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -131,8 +131,6 @@ public void setup() throws IOException {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
numDNs = dataBlocks + parityBlocks + 2;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
@@ -142,6 +140,8 @@ public void setup() throws IOException {
bm = fsn.getBlockManager();
client = getDfsClient(cluster.getNameNode(0), conf);
+ dfs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
dfs.mkdirs(ecDir);
dfs.setErasureCodingPolicy(ecDir,
StripedFileTestUtil.getDefaultECPolicy().getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 8e54e5f833b..987992e5a38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -50,7 +50,6 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicReference;
-import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
new file mode 100644
index 00000000000..d4e01b72d4e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Testing correctness of FileSystem.getFileBlockLocations and
+ * FileSystem.listFiles for erasure coded files.
+ */
+public class TestDistributedFileSystemWithECFile {
+ private final ErasureCodingPolicy ecPolicy =
+ StripedFileTestUtil.getDefaultECPolicy();
+ private final int cellSize = ecPolicy.getCellSize();
+ private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
+ private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
+ private final int numDNs = dataBlocks + parityBlocks;
+ private final int stripesPerBlock = 4;
+ private final int blockSize = stripesPerBlock * cellSize;
+ private final int blockGroupSize = blockSize * dataBlocks;
+
+ private MiniDFSCluster cluster;
+ private FileContext fileContext;
+ private DistributedFileSystem fs;
+ private Configuration conf = new HdfsConfiguration();
+
+ @Before
+ public void setup() throws IOException {
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
+ false);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
+ fileContext = FileContext.getFileContext(cluster.getURI(0), conf);
+ fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
+ fs.mkdirs(new Path("/ec"));
+ cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
+ StripedFileTestUtil.getDefaultECPolicy().getName());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
+ private void createFile(String path, int size) throws Exception {
+ byte[] expected = StripedFileTestUtil.generateBytes(size);
+ Path src = new Path(path);
+ DFSTestUtil.writeFile(fs, src, new String(expected));
+ StripedFileTestUtil.waitBlockGroupsReported(fs, src.toString());
+ StripedFileTestUtil.verifyLength(fs, src, size);
+ }
+
+ @Test(timeout=60000)
+ public void testListECFilesSmallerThanOneCell() throws Exception {
+ createFile("/ec/smallcell", 1);
+ final List retVal = new ArrayList<>();
+ final RemoteIterator iter =
+ cluster.getFileSystem().listFiles(new Path("/ec"), true);
+ while (iter.hasNext()) {
+ retVal.add(iter.next());
+ }
+ assertTrue(retVal.size() == 1);
+ LocatedFileStatus fileStatus = retVal.get(0);
+ assertSmallerThanOneCell(fileStatus.getBlockLocations());
+
+ BlockLocation[] locations = cluster.getFileSystem().getFileBlockLocations(
+ fileStatus, 0, fileStatus.getLen());
+ assertSmallerThanOneCell(locations);
+
+ //Test FileContext
+ fileStatus = fileContext.listLocatedStatus(new Path("/ec")).next();
+ assertSmallerThanOneCell(fileStatus.getBlockLocations());
+ locations = fileContext.getFileBlockLocations(new Path("/ec/smallcell"),
+ 0, fileStatus.getLen());
+ assertSmallerThanOneCell(locations);
+ }
+
+ private void assertSmallerThanOneCell(BlockLocation[] locations)
+ throws IOException {
+ assertTrue(locations.length == 1);
+ BlockLocation blockLocation = locations[0];
+ assertTrue(blockLocation.getOffset() == 0);
+ assertTrue(blockLocation.getLength() == 1);
+ assertTrue(blockLocation.getHosts().length == 1 + parityBlocks);
+ }
+
+ @Test(timeout=60000)
+ public void testListECFilesSmallerThanOneStripe() throws Exception {
+ int dataBlocksNum = 3;
+ createFile("/ec/smallstripe", cellSize * dataBlocksNum);
+ RemoteIterator iter =
+ cluster.getFileSystem().listFiles(new Path("/ec"), true);
+ LocatedFileStatus fileStatus = iter.next();
+ assertSmallerThanOneStripe(fileStatus.getBlockLocations(), dataBlocksNum);
+
+ BlockLocation[] locations = cluster.getFileSystem().getFileBlockLocations(
+ fileStatus, 0, fileStatus.getLen());
+ assertSmallerThanOneStripe(locations, dataBlocksNum);
+
+ //Test FileContext
+ fileStatus = fileContext.listLocatedStatus(new Path("/ec")).next();
+ assertSmallerThanOneStripe(fileStatus.getBlockLocations(), dataBlocksNum);
+ locations = fileContext.getFileBlockLocations(new Path("/ec/smallstripe"),
+ 0, fileStatus.getLen());
+ assertSmallerThanOneStripe(locations, dataBlocksNum);
+ }
+
+ private void assertSmallerThanOneStripe(BlockLocation[] locations,
+ int dataBlocksNum) throws IOException {
+ int expectedHostNum = dataBlocksNum + parityBlocks;
+ assertTrue(locations.length == 1);
+ BlockLocation blockLocation = locations[0];
+ assertTrue(blockLocation.getHosts().length == expectedHostNum);
+ assertTrue(blockLocation.getOffset() == 0);
+ assertTrue(blockLocation.getLength() == dataBlocksNum * cellSize);
+ }
+
+ @Test(timeout=60000)
+ public void testListECFilesMoreThanOneBlockGroup() throws Exception {
+ createFile("/ec/group", blockGroupSize + 123);
+ RemoteIterator iter =
+ cluster.getFileSystem().listFiles(new Path("/ec"), true);
+ LocatedFileStatus fileStatus = iter.next();
+ assertMoreThanOneBlockGroup(fileStatus.getBlockLocations(), 123);
+
+ BlockLocation[] locations = cluster.getFileSystem().getFileBlockLocations(
+ fileStatus, 0, fileStatus.getLen());
+ assertMoreThanOneBlockGroup(locations, 123);
+
+ //Test FileContext
+ iter = fileContext.listLocatedStatus(new Path("/ec"));
+ fileStatus = iter.next();
+ assertMoreThanOneBlockGroup(fileStatus.getBlockLocations(), 123);
+ locations = fileContext.getFileBlockLocations(new Path("/ec/group"),
+ 0, fileStatus.getLen());
+ assertMoreThanOneBlockGroup(locations, 123);
+ }
+
+ private void assertMoreThanOneBlockGroup(BlockLocation[] locations,
+ int lastBlockSize) throws IOException {
+ assertTrue(locations.length == 2);
+ BlockLocation fistBlockGroup = locations[0];
+ assertTrue(fistBlockGroup.getHosts().length == numDNs);
+ assertTrue(fistBlockGroup.getOffset() == 0);
+ assertTrue(fistBlockGroup.getLength() == blockGroupSize);
+ BlockLocation lastBlock = locations[1];
+ assertTrue(lastBlock.getHosts().length == 1 + parityBlocks);
+ assertTrue(lastBlock.getOffset() == blockGroupSize);
+ assertTrue(lastBlock.getLength() == lastBlockSize);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
index 6f533625cc7..959e724b58f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZonesWithKMS.java
@@ -21,7 +21,6 @@
import com.google.common.base.Supplier;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
-import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
@@ -70,21 +69,14 @@ public void teardown() {
protected void setProvider() {
}
- private KMSClientProvider getKMSClientProvider() {
- LoadBalancingKMSClientProvider lbkmscp =
- (LoadBalancingKMSClientProvider) Whitebox
- .getInternalState(cluster.getNamesystem().getProvider(), "extension");
- assert lbkmscp.getProviders().length == 1;
- return lbkmscp.getProviders()[0];
- }
-
@Test(timeout = 120000)
public void testCreateEZPopulatesEDEKCache() throws Exception {
final Path zonePath = new Path("/TestEncryptionZone");
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY, NO_TRASH);
@SuppressWarnings("unchecked")
- KMSClientProvider kcp = getKMSClientProvider();
+ KMSClientProvider kcp = (KMSClientProvider) Whitebox
+ .getInternalState(cluster.getNamesystem().getProvider(), "extension");
assertTrue(kcp.getEncKeyQueueSize(TEST_KEY) > 0);
}
@@ -118,7 +110,8 @@ public void testWarmupEDEKCacheOnStartup() throws Exception {
dfsAdmin.createEncryptionZone(zonePath, anotherKey, NO_TRASH);
@SuppressWarnings("unchecked")
- KMSClientProvider spy = getKMSClientProvider();
+ KMSClientProvider spy = (KMSClientProvider) Whitebox
+ .getInternalState(cluster.getNamesystem().getProvider(), "extension");
assertTrue("key queue is empty after creating encryption zone",
spy.getEncKeyQueueSize(TEST_KEY) > 0);
@@ -129,7 +122,9 @@ public void testWarmupEDEKCacheOnStartup() throws Exception {
GenericTestUtils.waitFor(new Supplier() {
@Override
public Boolean get() {
- final KMSClientProvider kspy = getKMSClientProvider();
+ final KMSClientProvider kspy = (KMSClientProvider) Whitebox
+ .getInternalState(cluster.getNamesystem().getProvider(),
+ "extension");
return kspy.getEncKeyQueueSize(TEST_KEY) > 0;
}
}, 1000, 60000);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
index be962dc4cd8..da3407d2fc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java
@@ -48,11 +48,11 @@ public static void setup() throws IOException {
conf = new HdfsConfiguration();
int numDN = ErasureCodeBenchmarkThroughput.getEcPolicy().getNumDataUnits() +
ErasureCodeBenchmarkThroughput.getEcPolicy().getNumParityUnits();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ErasureCodeBenchmarkThroughput.getEcPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ ((DistributedFileSystem)fs).enableErasureCodingPolicy(
+ ErasureCodeBenchmarkThroughput.getEcPolicy().getName());
}
@AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index e095602c6ea..4f2040b60f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -77,13 +77,13 @@ public void setupCluster() throws IOException {
ecPolicy = getEcPolicy();
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
- DFSTestUtil.enableAllECPolicies(conf);
cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()).
build();
cluster.waitActive();
fs = cluster.getFileSystem();
namesystem = cluster.getNamesystem();
+ DFSTestUtil.enableAllECPolicies(fs);
}
@After
@@ -206,16 +206,9 @@ public void testBasicSetECPolicy()
// Verify that policies are successfully loaded even when policies
// are disabled
- cluster.getConfiguration(0).set(
- DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, "");
cluster.restartNameNodes();
cluster.waitActive();
- // Only default policy should be enabled after restart
- Assert.assertEquals("Only default policy should be enabled after restart",
- 1,
- ErasureCodingPolicyManager.getInstance().getEnabledPolicies().length);
-
// Already set directory-level policies should still be in effect
Path disabledPolicy = new Path(dir1, "afterDisabled");
Assert.assertEquals("Dir does not have policy set",
@@ -725,7 +718,7 @@ public void testAddErasureCodingPolicies() throws Exception {
policyArray = new ErasureCodingPolicy[]{policy0};
responses = fs.addErasureCodingPolicies(policyArray);
assertEquals(1, responses.length);
- assertFalse(responses[0].isSucceed());
+ assertTrue(responses[0].isSucceed());
// Test add policy successfully
newPolicy =
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
index fbeada67dc7..6ab018bbea8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
@@ -53,11 +53,10 @@ public void setupCluster() throws IOException {
groupSize = (short) (ecPolicy.getNumDataUnits()
+ ecPolicy.getNumParityUnits());
conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(ecPolicy.getName());
}
@After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
index b804523b3c0..9d6687c6c46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
@@ -77,8 +77,6 @@ public void setup() throws IOException {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
Path ecPath = new Path(ecDir);
cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault());
@@ -86,7 +84,8 @@ public void setup() throws IOException {
StripedFileTestUtil.getDefaultECPolicy().getName());
fs = cluster.getFileSystem();
client = fs.getClient();
-
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
bytesPerCRC = conf.getInt(
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index e04f9573256..077cf3a115f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -45,13 +45,13 @@ public class TestFileStatusWithECPolicy {
@Before
public void before() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
client = fs.getClient();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 86b1aadf6ea..2846dbf7f00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -88,12 +88,11 @@ public void setup() throws IOException {
false);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
final int numDNs = dataBlocks + parityBlocks;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
+ dfs.enableErasureCodingPolicy(ecPolicy.getName());
dfs.mkdirs(dir);
dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
index 34cba92ad21..f3b8dd84f82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
@@ -58,12 +58,11 @@ public class TestReadStripedFileWithMissingBlocks {
public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().setErasureCodingPolicy(
"/", ecPolicy.getName());
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(ecPolicy.getName());
}
public void tearDown() throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 7cd34c2acd7..72b14129484 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -105,12 +105,12 @@ public void setup() throws IOException {
CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
NativeRSRawErasureCoderFactory.CODER_NAME);
}
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
fs.getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
@@ -447,7 +447,7 @@ private void testNNSendsErasureCodingTasks(int deadDN) throws Exception {
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 10);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 20);
- conf.setInt(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_BLK_THREADS_KEY,
+ conf.setInt(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_KEY,
2);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDataNodes).build();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index edecbf27a6a..3d3ec9c6c66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -61,12 +61,12 @@ public void setup() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
cluster.waitActive();
+ cluster.getFileSystem().enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
index 50d7b2756f9..497d450de25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
@@ -110,13 +110,13 @@ public void testSetRepWithStoragePolicyOnEmptyFile() throws Exception {
public void testSetRepOnECFile() throws Exception {
ClientProtocol client;
Configuration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
cluster.waitActive();
client = NameNodeProxies.createProxy(conf,
cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
+ client.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
client.setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
index 5371e205ac3..529a110c0ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
@@ -70,11 +70,11 @@ public void setup() throws IOException {
CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
NativeRSRawErasureCoderFactory.CODER_NAME);
}
- DFSTestUtil.enableAllECPolicies(conf);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
dataBlocks + parityBlocks).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ DFSTestUtil.enableAllECPolicies(fs);
}
@After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index 9b14df14c34..f27c9786db6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -76,10 +76,10 @@ public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
fs.mkdirs(new Path("/ec"));
cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
StripedFileTestUtil.getDefaultECPolicy().getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
index 8555e5d0ad9..2fe0a1c2957 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
@@ -118,7 +118,7 @@ public void testClientAndServerDoNotHaveCommonQop() throws Exception {
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
exception.expect(IOException.class);
- exception.expectMessage("could only be replicated to 0 nodes");
+ exception.expectMessage("could only be written to 0");
doTest(clientConf);
}
@@ -140,7 +140,7 @@ public void testServerSaslNoClientSasl() throws Exception {
"configured or not supported in client");
} catch (IOException e) {
GenericTestUtils.assertMatches(e.getMessage(),
- "could only be replicated to 0 nodes");
+ "could only be written to 0");
} finally {
logs.stopCapturing();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
index 506497e6ae4..97cf2f3c068 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
@@ -23,7 +23,7 @@
import java.util.TreeMap;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.hdfs.qjournal.client.QuorumCall;
+import org.apache.hadoop.util.FakeTimer;
import org.junit.Test;
import com.google.common.base.Joiner;
@@ -83,4 +83,33 @@ public void testQuorumFailsWithoutResponse() throws Exception {
}
}
+ @Test(timeout=10000)
+ public void testQuorumSucceedsWithLongPause() throws Exception {
+ final Map> futures = ImmutableMap.of(
+ "f1", SettableFuture.create());
+
+ FakeTimer timer = new FakeTimer() {
+ private int callCount = 0;
+ @Override
+ public long monotonicNowNanos() {
+ callCount++;
+ if (callCount == 1) {
+ long old = super.monotonicNowNanos();
+ advance(1000000);
+ return old;
+ } else if (callCount == 10) {
+ futures.get("f1").set("first future");
+ return super.monotonicNowNanos();
+ } else {
+ return super.monotonicNowNanos();
+ }
+ }
+ };
+
+ QuorumCall q = QuorumCall.create(futures, timer);
+ assertEquals(0, q.countResponses());
+
+ q.waitFor(1, 0, 0, 3000, "test"); // wait for 1 response
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
index 28ec7082537..77b50a178eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
@@ -55,6 +55,7 @@
import com.google.common.base.Charsets;
import com.google.common.primitives.Bytes;
import com.google.common.primitives.Ints;
+import org.mockito.Mockito;
public class TestJournalNode {
@@ -342,4 +343,24 @@ private void doPerfTest(int editsSize, int numEdits) throws Exception {
System.err.println("Time per batch: " + avgRtt + "ms");
System.err.println("Throughput: " + throughput + " bytes/sec");
}
+
+ /**
+ * Test case to check if JournalNode exits cleanly when httpserver or rpc
+ * server fails to start. Call to JournalNode start should fail with bind
+ * exception as the port is in use by the JN started in @Before routine
+ */
+ @Test
+ public void testJournalNodeStartupFailsCleanly() {
+ JournalNode jNode = Mockito.spy(new JournalNode());
+ try {
+ jNode.setConf(conf);
+ jNode.start();
+ fail("Should throw bind exception");
+ } catch (Exception e) {
+ GenericTestUtils
+ .assertExceptionContains("java.net.BindException: Port in use", e);
+ }
+ Mockito.verify(jNode).stop(1);
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index ec9c39a622a..a900ad191da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1981,8 +1981,6 @@ private void doTestBalancerWithStripedFile(Configuration conf) throws Exception
for (int i = 0; i < numOfDatanodes; i++) {
racks[i] = "/rack" + (i % numOfRacks);
}
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numOfDatanodes)
.racks(racks)
@@ -1993,6 +1991,8 @@ private void doTestBalancerWithStripedFile(Configuration conf) throws Exception
cluster.waitActive();
client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
ClientProtocol.class).getProxy();
+ client.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
client.setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 4092e5ef33a..10289ed0e9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -1030,8 +1030,7 @@ public void testStorageWithRemainingCapacity() throws Exception {
0x1BAD5EED);
}
catch (RemoteException re) {
- GenericTestUtils.assertExceptionContains("nodes instead of "
- + "minReplication", re);
+ GenericTestUtils.assertExceptionContains("of the 1 minReplication", re);
}
}
finally {
@@ -1368,8 +1367,6 @@ public void testPlacementPolicySatisfied() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
@@ -1382,6 +1379,8 @@ public void testPlacementPolicySatisfied() throws Exception {
final Path ecDir = new Path("/ec");
final Path testFileUnsatisfied = new Path(ecDir, "test1");
final Path testFileSatisfied = new Path(ecDir, "test2");
+ dfs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
cluster.getFileSystem().getClient().mkdirs(ecDir.toString(), null, true);
cluster.getFileSystem().getClient()
.setErasureCodingPolicy(ecDir.toString(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index 701928d2ff4..5b03d8e9799 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -41,8 +41,10 @@
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.After;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
import org.eclipse.jetty.util.ajax.JSON;
+import org.junit.rules.Timeout;
/**
* Class for testing {@link BlockStatsMXBean} implementation
@@ -51,6 +53,9 @@ public class TestBlockStatsMXBean {
private MiniDFSCluster cluster;
+ @Rule
+ public Timeout globalTimeout = new Timeout(300000);
+
@Before
public void setup() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
@@ -181,7 +186,7 @@ public void testStorageTypeStatsWhenStorageFailed() throws Exception {
fail("Should throw exception, becuase no DISK storage available");
} catch (Exception e) {
assertTrue(e.getMessage().contains(
- "could only be replicated to 0 nodes instead"));
+ "could only be written to 0 of the 1 minReplication"));
}
// wait for heartbeat
Thread.sleep(6000);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
index 54f28053f64..7627cf5c6a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
@@ -55,8 +55,6 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
private Configuration getConf() {
Configuration conf = super.getConf(numDNs);
conf.setInt("io.bytes.per.checksum", cellSize);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
return conf;
}
@@ -85,6 +83,8 @@ public void testRead() throws Exception {
.nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
.numDataNodes(numDNs)
.build();
+ cluster.getFileSystem().enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
try {
@@ -116,8 +116,6 @@ public void testAppend() throws Exception {
public void testEnd2End() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
new TestBalancer().integrationTestWithStripedFile(conf);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
index 241391821fb..cf4299b5015 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
@@ -24,7 +24,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -70,8 +69,6 @@ public void setup() throws Exception {
ecPolicy = SystemErasureCodingPolicies.getByID(
SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES)
.build();
cluster.waitActive();
@@ -84,6 +81,7 @@ public void setup() throws Exception {
// Create a striped file
Path ecDir = new Path("/ec");
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(ecPolicy.getName());
fs.mkdirs(ecDir);
fs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
ecFile = new Path(ecDir, "ec-file");
@@ -268,9 +266,9 @@ public void testDatanodeReRegistration() throws Exception {
"Striped BlockGroups!",
(long) expected, invalidateBlocks.numBlocks());
assertEquals("Unexpected invalidate count for replicas!",
- totalReplicas, invalidateBlocks.getBlocksStat());
+ totalReplicas, invalidateBlocks.getBlocks());
assertEquals("Unexpected invalidate count for striped block groups!",
- totalStripedDataBlocks, invalidateBlocks.getECBlockGroupsStat());
+ totalStripedDataBlocks, invalidateBlocks.getECBlocks());
} finally {
namesystem.writeUnlock();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
index 3f8a5cd4845..3510bc3d769 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
@@ -78,10 +78,10 @@ private void verifyCorruptBlocksCount(CorruptReplicasMap corruptReplicasMap,
assertEquals("Unexpected total corrupt blocks count!",
totalExpectedCorruptBlocks, corruptReplicasMap.size());
assertEquals("Unexpected replica blocks count!",
- expectedReplicaCount, corruptReplicasMap.getCorruptBlocksStat());
+ expectedReplicaCount, corruptReplicasMap.getCorruptBlocks());
assertEquals("Unexpected striped blocks count!",
expectedStripedBlockCount,
- corruptReplicasMap.getCorruptECBlockGroupsStat());
+ corruptReplicasMap.getCorruptECBlockGroups());
}
@Test
@@ -93,9 +93,9 @@ public void testCorruptReplicaInfo()
assertEquals("Total number of corrupt blocks must initially be 0!",
0, crm.size());
assertEquals("Number of corrupt replicas must initially be 0!",
- 0, crm.getCorruptBlocksStat());
+ 0, crm.getCorruptBlocks());
assertEquals("Number of corrupt striped block groups must initially be 0!",
- 0, crm.getCorruptECBlockGroupsStat());
+ 0, crm.getCorruptECBlockGroups());
assertNull("Param n cannot be less than 0",
crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, -1, null));
assertNull("Param n cannot be greater than 100",
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
index c65fc6495f8..2b28f1ef3ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java
@@ -50,16 +50,16 @@ private void verifyBlockStats(LowRedundancyBlocks queues,
int corruptReplicationOneCount, int lowRedundancyStripedCount,
int corruptStripedCount) {
assertEquals("Low redundancy replica count incorrect!",
- lowRedundancyReplicaCount, queues.getLowRedundancyBlocksStat());
+ lowRedundancyReplicaCount, queues.getLowRedundancyBlocks());
assertEquals("Corrupt replica count incorrect!",
- corruptReplicaCount, queues.getCorruptBlocksStat());
+ corruptReplicaCount, queues.getCorruptBlocks());
assertEquals("Corrupt replica one count incorrect!",
corruptReplicationOneCount,
- queues.getCorruptReplicationOneBlocksStat());
+ queues.getCorruptReplicationOneBlocks());
assertEquals("Low redundancy striped blocks count incorrect!",
- lowRedundancyStripedCount, queues.getLowRedundancyECBlockGroupsStat());
+ lowRedundancyStripedCount, queues.getLowRedundancyECBlockGroups());
assertEquals("Corrupt striped blocks count incorrect!",
- corruptStripedCount, queues.getCorruptECBlockGroupsStat());
+ corruptStripedCount, queues.getCorruptECBlockGroups());
assertEquals("Low Redundancy count incorrect!",
lowRedundancyReplicaCount + lowRedundancyStripedCount,
queues.getLowRedundancyBlockCount());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index aaa48997eac..7d16017c0d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -145,13 +145,12 @@ private DataNode getDataNode(String host) {
public void testReconstructForNotEnoughRacks() throws Exception {
LOG.info("cluster hosts: {}, racks: {}", Arrays.asList(hosts),
Arrays.asList(racks));
-
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
.numDataNodes(hosts.length).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
fs.setErasureCodingPolicy(new Path("/"),
StripedFileTestUtil.getDefaultECPolicy().getName());
FSNamesystem fsn = cluster.getNamesystem();
@@ -219,12 +218,12 @@ public void testReconstructForNotEnoughRacks() throws Exception {
@Test
public void testChooseExcessReplicasToDelete() throws Exception {
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
.numDataNodes(hosts.length).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
fs.setErasureCodingPolicy(new Path("/"),
StripedFileTestUtil.getDefaultECPolicy().getName());
@@ -271,8 +270,6 @@ public void testChooseExcessReplicasToDelete() throws Exception {
*/
@Test
public void testReconstructionWithDecommission() throws Exception {
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
final String[] rackNames = getRacks(dataBlocks + parityBlocks + 2,
dataBlocks);
final String[] hostNames = getHosts(dataBlocks + parityBlocks + 2);
@@ -281,6 +278,8 @@ public void testReconstructionWithDecommission() throws Exception {
.numDataNodes(hostNames.length).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
fs.setErasureCodingPolicy(new Path("/"),
StripedFileTestUtil.getDefaultECPolicy().getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
index c5066a04a2a..241c2dcf991 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
@@ -31,10 +31,10 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
@@ -72,7 +72,7 @@ public class TestSequentialBlockGroupId {
private final int fileLen = blockSize * dataBlocks * blockGrpCount;
private MiniDFSCluster cluster;
- private FileSystem fs;
+ private DistributedFileSystem fs;
private SequentialBlockGroupIdGenerator blockGrpIdGenerator;
private Path ecDir = new Path("/ecDir");
@@ -81,12 +81,12 @@ public void setup() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
blockGrpIdGenerator = cluster.getNamesystem().getBlockManager()
.getBlockIdManager().getBlockGroupIdGenerator();
fs.mkdirs(ecDir);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
index ee2afbbd8b5..7194385090e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
@@ -72,13 +72,13 @@ public void setup() throws IOException {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After
@@ -90,6 +90,10 @@ public void tearDown() {
@Test(timeout = 120000)
public void testFullBlock() throws Exception {
+ Assert.assertEquals(0, getLongMetric("EcReconstructionReadTimeMillis"));
+ Assert.assertEquals(0, getLongMetric("EcReconstructionDecodingTimeMillis"));
+ Assert.assertEquals(0, getLongMetric("EcReconstructionWriteTimeMillis"));
+
doTest("/testEcMetrics", blockGroupSize, 0);
Assert.assertEquals("EcReconstructionTasks should be ",
@@ -103,6 +107,9 @@ public void testFullBlock() throws Exception {
blockSize, getLongMetric("EcReconstructionBytesWritten"));
Assert.assertEquals("EcReconstructionRemoteBytesRead should be ",
0, getLongMetricWithoutCheck("EcReconstructionRemoteBytesRead"));
+ Assert.assertTrue(getLongMetric("EcReconstructionReadTimeMillis") > 0);
+ Assert.assertTrue(getLongMetric("EcReconstructionDecodingTimeMillis") > 0);
+ Assert.assertTrue(getLongMetric("EcReconstructionWriteTimeMillis") > 0);
}
// A partial block, reconstruct the partial block
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index a08f071ec90..eca102ed192 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -478,9 +478,12 @@ public void testPlanNode() throws Exception {
public void testPlanJsonNode() throws Exception {
final String planArg = String.format("-%s %s", PLAN,
"a87654a9-54c7-4693-8dd9-c9c7021dc340");
+ final Path testPath = new Path(
+ PathUtils.getTestPath(getClass()),
+ GenericTestUtils.getMethodName());
final String cmdLine = String
.format(
- "hdfs diskbalancer %s", planArg);
+ "hdfs diskbalancer -out %s %s", testPath, planArg);
runCommand(cmdLine);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index 707d46fd3cd..8ff660fb8ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -508,8 +508,6 @@ public void testMoverWithStripedFile() throws Exception {
capacities[i][j]=capacity;
}
}
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numOfDatanodes)
.storagesPerDatanode(storagesPerDatanode)
@@ -529,6 +527,8 @@ public void testMoverWithStripedFile() throws Exception {
try {
cluster.waitActive();
+ cluster.getFileSystem().enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
// set "/bar" directory with HOT storage policy.
ClientProtocol client = NameNodeProxies.createProxy(conf,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
index ecbf99d8042..aad8e9b96a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
@@ -76,12 +76,11 @@ public void setup() throws IOException {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
SimulatedFSDataset.setFactory(conf);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(ecPolicy.getName());
fs.mkdirs(dirPath);
fs.getClient().setErasureCodingPolicy(dirPath.toString(),
ecPolicy.getName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
index a4f470b34d8..45e98ea30e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
@@ -20,7 +20,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -62,11 +61,11 @@ public class TestAddStripedBlockInFBR {
@Before
public void setup() throws IOException {
Configuration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
+ dfs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index 623c444f714..ec13b448e23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -19,7 +19,6 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSStripedOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -85,11 +84,10 @@ public class TestAddStripedBlocks {
@Before
public void setup() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
cluster.waitActive();
dfs = cluster.getFileSystem();
+ dfs.enableErasureCodingPolicy(ecPolicy.getName());
dfs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 74be90cfb5a..b6c13188c23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -175,8 +175,8 @@ public void testDeadNodeAsBlockTarget() throws Exception {
// choose the targets, but local node should not get selected as this is not
// part of the cluster anymore
DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
- clientNode, new HashSet(), 256 * 1024 * 1024L, null, (byte) 7,
- BlockType.CONTIGUOUS, null);
+ clientNode, new HashSet<>(), 256 * 1024 * 1024L, null, (byte) 7,
+ BlockType.CONTIGUOUS, null, null);
for (DatanodeStorageInfo datanodeStorageInfo : results) {
assertFalse("Dead node should not be choosen", datanodeStorageInfo
.getDatanodeDescriptor().equals(clientNode));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
index 133a18e72d5..a13574fbdb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
@@ -20,11 +20,13 @@
import java.io.FileNotFoundException;
import java.util.AbstractMap;
import java.util.ArrayList;
+import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -49,16 +51,21 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.junit.Assert;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.Timeout;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY;
/**
* Test race between delete and other operations. For now only addBlock()
@@ -71,6 +78,9 @@ public class TestDeleteRace {
private static final Configuration conf = new HdfsConfiguration();
private MiniDFSCluster cluster;
+ @Rule
+ public Timeout timeout = new Timeout(60000 * 3);
+
@Test
public void testDeleteAddBlockRace() throws Exception {
testDeleteAddBlockRace(false);
@@ -358,4 +368,78 @@ public void testDeleteAndCommitBlockSynchronizationRaceHasSnapshot()
throws Exception {
testDeleteAndCommitBlockSynchronizationRace(true);
}
+
+
+ /**
+ * Test the sequence of deleting a file that has snapshot,
+ * and lease manager's hard limit recovery.
+ */
+ @Test
+ public void testDeleteAndLeaseRecoveryHardLimitSnapshot() throws Exception {
+ final Path rootPath = new Path("/");
+ final Configuration config = new Configuration();
+ // Disable permissions so that another user can recover the lease.
+ config.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+ config.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+ FSDataOutputStream stm = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(config).numDataNodes(3).build();
+ cluster.waitActive();
+
+ final DistributedFileSystem fs = cluster.getFileSystem();
+ final Path testPath = new Path("/testfile");
+ stm = fs.create(testPath);
+ LOG.info("test on " + testPath);
+
+ // write a half block
+ AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
+ stm.hflush();
+
+ // create a snapshot, so delete does not release the file's inode.
+ SnapshotTestHelper.createSnapshot(fs, rootPath, "snap");
+
+ // delete the file without closing it.
+ fs.delete(testPath, false);
+
+ // write enough bytes to trigger an addBlock, which would fail in
+ // the streamer.
+ AppendTestUtil.write(stm, 0, BLOCK_SIZE);
+
+ // Mock a scenario that the lease reached hard limit.
+ final LeaseManager lm = (LeaseManager) Whitebox
+ .getInternalState(cluster.getNameNode().getNamesystem(),
+ "leaseManager");
+ final TreeSet leases =
+ (TreeSet) Whitebox.getInternalState(lm, "sortedLeases");
+ final TreeSet spyLeases = new TreeSet<>(new Comparator() {
+ @Override
+ public int compare(Lease o1, Lease o2) {
+ return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
+ }
+ });
+ while (!leases.isEmpty()) {
+ final Lease lease = leases.first();
+ final Lease spyLease = Mockito.spy(lease);
+ Mockito.doReturn(true).when(spyLease).expiredHardLimit();
+ spyLeases.add(spyLease);
+ leases.remove(lease);
+ }
+ Whitebox.setInternalState(lm, "sortedLeases", spyLeases);
+
+ // wait for lease manager's background 'Monitor' class to check leases.
+ Thread.sleep(2 * conf.getLong(DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_KEY,
+ DFS_NAMENODE_LEASE_RECHECK_INTERVAL_MS_DEFAULT));
+
+ LOG.info("Now check we can restart");
+ cluster.restartNameNodes();
+ LOG.info("Restart finished");
+ } finally {
+ if (stm != null) {
+ IOUtils.closeStream(stm);
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java
index d769f8bc6b7..63bfa27b4c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java
@@ -28,10 +28,8 @@
import org.junit.Test;
import org.junit.rules.Timeout;
-import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
-import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@@ -47,7 +45,7 @@ public class TestEnabledECPolicies {
private void expectInvalidPolicy(String value) {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+ conf.set(DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
value);
try {
ErasureCodingPolicyManager.getInstance().init(conf);
@@ -60,11 +58,10 @@ private void expectInvalidPolicy(String value) {
private void expectValidPolicy(String value, final int numEnabled) throws
Exception {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- value);
ErasureCodingPolicyManager manager =
ErasureCodingPolicyManager.getInstance();
manager.init(conf);
+ manager.enablePolicy(value);
assertEquals("Incorrect number of enabled policies",
numEnabled, manager.getEnabledPolicies().length);
}
@@ -73,8 +70,8 @@ private void expectValidPolicy(String value, final int numEnabled) throws
public void testDefaultPolicy() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
String defaultECPolicies = conf.get(
- DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT);
+ DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
+ DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
expectValidPolicy(defaultECPolicies, 1);
}
@@ -97,11 +94,6 @@ public void testInvalid() throws Exception {
public void testValid() throws Exception {
String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy().getName();
expectValidPolicy(ecPolicyName, 1);
- expectValidPolicy(ecPolicyName + ", ", 1);
- expectValidPolicy(",", 1);
- expectValidPolicy(", " + ecPolicyName, 1);
- expectValidPolicy(" ", 1);
- expectValidPolicy(" , ", 1);
}
@Test
@@ -128,13 +120,12 @@ public void testGetPolicies() throws Exception {
private void testGetPolicies(ErasureCodingPolicy[] enabledPolicies)
throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- Arrays.asList(enabledPolicies).stream()
- .map(ErasureCodingPolicy::getName)
- .collect(Collectors.joining(", ")));
ErasureCodingPolicyManager manager =
ErasureCodingPolicyManager.getInstance();
manager.init(conf);
+ for (ErasureCodingPolicy p : enabledPolicies) {
+ manager.enablePolicy(p.getName());
+ }
// Check that returned values are unique
Set found = new HashSet<>();
for (ErasureCodingPolicy p : manager.getEnabledPolicies()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 4467dc1068e..ec80bff3e0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -47,8 +47,10 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
@@ -57,6 +59,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
@@ -458,8 +461,6 @@ public void testFSEditLogOpCodes() throws IOException {
public void testAddNewStripedBlock() throws IOException{
// start a cluster
Configuration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- testECPolicy.getName());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
@@ -467,6 +468,7 @@ public void testAddNewStripedBlock() throws IOException{
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
+ fs.enableErasureCodingPolicy(testECPolicy.getName());
String testDir = "/ec";
String testFile = "testfile_001";
@@ -533,8 +535,6 @@ public void testAddNewStripedBlock() throws IOException{
public void testUpdateStripedBlocks() throws IOException{
// start a cluster
Configuration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- testECPolicy.getName());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
@@ -542,6 +542,7 @@ public void testUpdateStripedBlocks() throws IOException{
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
+ fs.enableErasureCodingPolicy(testECPolicy.getName());
String testDir = "/ec";
String testFile = "testfile_002";
@@ -714,4 +715,84 @@ public void testHasNonEcBlockUsingStripedIDForUpdateBlocks()
}
}
}
+
+ @Test
+ public void testErasureCodingPolicyOperations() throws IOException {
+ // start a cluster
+ Configuration conf = new HdfsConfiguration();
+ final int blockSize = 16 * 1024;
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
+ .build();
+ cluster.waitActive();
+ DistributedFileSystem fs = cluster.getFileSystem();
+
+ // 1. add new policy
+ ECSchema schema = new ECSchema("rs", 5, 3);
+ int cellSize = 2 * 1024;
+ ErasureCodingPolicy newPolicy =
+ new ErasureCodingPolicy(schema, cellSize, (byte) 0);
+ ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy};
+ AddECPolicyResponse[] responses =
+ fs.addErasureCodingPolicies(policyArray);
+ assertEquals(1, responses.length);
+ assertTrue(responses[0].isSucceed());
+ newPolicy = responses[0].getPolicy();
+
+ // Restart NameNode without saving namespace
+ cluster.restartNameNodes();
+ cluster.waitActive();
+
+ // check if new policy is reapplied through edit log
+ ErasureCodingPolicy ecPolicy =
+ ErasureCodingPolicyManager.getInstance().getByID(newPolicy.getId());
+ assertEquals(ErasureCodingPolicyState.DISABLED, ecPolicy.getState());
+
+ // 2. enable policy
+ fs.enableErasureCodingPolicy(newPolicy.getName());
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ ecPolicy =
+ ErasureCodingPolicyManager.getInstance().getByID(newPolicy.getId());
+ assertEquals(ErasureCodingPolicyState.ENABLED, ecPolicy.getState());
+
+ // create a new file, use the policy
+ final Path dirPath = new Path("/striped");
+ final Path filePath = new Path(dirPath, "file");
+ final int fileLength = blockSize * newPolicy.getNumDataUnits();
+ fs.mkdirs(dirPath);
+ fs.setErasureCodingPolicy(dirPath, newPolicy.getName());
+ final byte[] bytes = StripedFileTestUtil.generateBytes(fileLength);
+ DFSTestUtil.writeFile(fs, filePath, bytes);
+
+ // 3. disable policy
+ fs.disableErasureCodingPolicy(newPolicy.getName());
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ ecPolicy =
+ ErasureCodingPolicyManager.getInstance().getByID(newPolicy.getId());
+ assertEquals(ErasureCodingPolicyState.DISABLED, ecPolicy.getState());
+ // read file
+ DFSTestUtil.readFileAsBytes(fs, filePath);
+
+ // 4. remove policy
+ fs.removeErasureCodingPolicy(newPolicy.getName());
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ ecPolicy =
+ ErasureCodingPolicyManager.getInstance().getByID(newPolicy.getId());
+ assertEquals(ErasureCodingPolicyState.REMOVED, ecPolicy.getState());
+ // read file
+ DFSTestUtil.readFileAsBytes(fs, filePath);
+
+ cluster.shutdown();
+ cluster = null;
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 9256056b4e1..c9d3255a310 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
@@ -34,7 +35,9 @@
import java.util.EnumSet;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -43,6 +46,8 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.protocol.BlockType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.ipc.RemoteException;
import org.junit.Assert;
import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -241,11 +246,11 @@ private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration con
@Test
public void testSaveAndLoadStripedINodeFile() throws IOException{
Configuration conf = new Configuration();
- DFSTestUtil.enableAllECPolicies(conf);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
+ DFSTestUtil.enableAllECPolicies(cluster.getFileSystem());
testSaveAndLoadStripedINodeFile(cluster.getNamesystem(), conf, false);
} finally {
if (cluster != null) {
@@ -262,11 +267,11 @@ public void testSaveAndLoadStripedINodeFile() throws IOException{
public void testSaveAndLoadStripedINodeFileUC() throws IOException {
// construct a INode with StripedBlock for saving and loading
Configuration conf = new Configuration();
- DFSTestUtil.enableAllECPolicies(conf);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
+ DFSTestUtil.enableAllECPolicies(cluster.getFileSystem());
testSaveAndLoadStripedINodeFile(cluster.getNamesystem(), conf, true);
} finally {
if (cluster != null) {
@@ -462,13 +467,13 @@ public void testSupportBlockGroup() throws Exception {
final int BLOCK_SIZE = 8 * 1024 * 1024;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
- DFSTestUtil.enableAllECPolicies(conf);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE)
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
+ DFSTestUtil.enableAllECPolicies(fs);
Path parentDir = new Path("/ec-10-4");
Path childDir = new Path(parentDir, "ec-3-2");
ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies
@@ -732,13 +737,13 @@ public void testBlockTypeProtoDefaultsToContiguous() throws Exception {
public void testSaveAndLoadFileUnderReplicationPolicyDir()
throws IOException {
Configuration conf = new Configuration();
- DFSTestUtil.enableAllECPolicies(conf);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
DistributedFileSystem fs = cluster.getFileSystem();
+ DFSTestUtil.enableAllECPolicies(fs);
ErasureCodingPolicy replicaPolicy =
SystemErasureCodingPolicies.getReplicationPolicy();
ErasureCodingPolicy defaultEcPolicy =
@@ -810,4 +815,150 @@ public void testSaveAndLoadFileUnderReplicationPolicyDir()
}
}
}
+
+ /**
+ * Test persist and load erasure coding policies.
+ */
+ @Test
+ public void testSaveAndLoadErasureCodingPolicies() throws IOException{
+ Configuration conf = new Configuration();
+ final int blockSize = 16 * 1024 * 1024;
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+ try (MiniDFSCluster cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(10).build()) {
+ cluster.waitActive();
+ DistributedFileSystem fs = cluster.getFileSystem();
+ DFSTestUtil.enableAllECPolicies(fs);
+
+ // Save namespace and restart NameNode
+ fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ fs.saveNamespace();
+ fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ cluster.restartNameNodes();
+ cluster.waitActive();
+
+ assertEquals("Erasure coding policy number should match",
+ SystemErasureCodingPolicies.getPolicies().size(),
+ ErasureCodingPolicyManager.getInstance().getPolicies().length);
+
+ // Add new erasure coding policy
+ ECSchema newSchema = new ECSchema("rs", 5, 4);
+ ErasureCodingPolicy newPolicy =
+ new ErasureCodingPolicy(newSchema, 2 * 1024, (byte) 254);
+ ErasureCodingPolicy[] policies = new ErasureCodingPolicy[]{newPolicy};
+ AddECPolicyResponse[] ret = fs.addErasureCodingPolicies(policies);
+ assertEquals(1, ret.length);
+ assertEquals(true, ret[0].isSucceed());
+ newPolicy = ret[0].getPolicy();
+
+ // Save namespace and restart NameNode
+ fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ fs.saveNamespace();
+ fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ cluster.restartNameNodes();
+ cluster.waitActive();
+
+ assertEquals("Erasure coding policy number should match",
+ SystemErasureCodingPolicies.getPolicies().size() + 1,
+ ErasureCodingPolicyManager.getInstance().getPolicies().length);
+ ErasureCodingPolicy ecPolicy =
+ ErasureCodingPolicyManager.getInstance().getByID(newPolicy.getId());
+ assertEquals("Newly added erasure coding policy is not found",
+ newPolicy, ecPolicy);
+ assertEquals(
+ "Newly added erasure coding policy should be of disabled state",
+ ErasureCodingPolicyState.DISABLED, ecPolicy.getState());
+
+ // Test enable/disable/remove user customized erasure coding policy
+ testChangeErasureCodingPolicyState(cluster, blockSize, newPolicy);
+ // Test enable/disable built-in erasure coding policy
+ testChangeErasureCodingPolicyState(cluster, blockSize,
+ SystemErasureCodingPolicies.getByID((byte) 1));
+ }
+ }
+
+
+ private void testChangeErasureCodingPolicyState(MiniDFSCluster cluster,
+ int blockSize, ErasureCodingPolicy targetPolicy) throws IOException {
+ DistributedFileSystem fs = cluster.getFileSystem();
+
+ // 1. Enable an erasure coding policy
+ fs.enableErasureCodingPolicy(targetPolicy.getName());
+ targetPolicy.setState(ErasureCodingPolicyState.ENABLED);
+ // Create file, using the new policy
+ final Path dirPath = new Path("/striped");
+ final Path filePath = new Path(dirPath, "file");
+ final int fileLength = blockSize * targetPolicy.getNumDataUnits();
+ fs.mkdirs(dirPath);
+ fs.setErasureCodingPolicy(dirPath, targetPolicy.getName());
+ final byte[] bytes = StripedFileTestUtil.generateBytes(fileLength);
+ DFSTestUtil.writeFile(fs, filePath, bytes);
+
+
+ // Save namespace and restart NameNode
+ fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ fs.saveNamespace();
+ fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ ErasureCodingPolicy ecPolicy =
+ ErasureCodingPolicyManager.getInstance().getByID(targetPolicy.getId());
+ assertEquals("The erasure coding policy is not found",
+ targetPolicy, ecPolicy);
+ assertEquals("The erasure coding policy should be of enabled state",
+ ErasureCodingPolicyState.ENABLED, ecPolicy.getState());
+ // Read file regardless of the erasure coding policy state
+ DFSTestUtil.readFileAsBytes(fs, filePath);
+
+ // 2. Disable an erasure coding policy
+ fs.disableErasureCodingPolicy(ecPolicy.getName());
+ targetPolicy.setState(ErasureCodingPolicyState.DISABLED);
+ // Save namespace and restart NameNode
+ fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ fs.saveNamespace();
+ fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ ecPolicy =
+ ErasureCodingPolicyManager.getInstance().getByID(targetPolicy.getId());
+ assertEquals("The erasure coding policy is not found",
+ targetPolicy, ecPolicy);
+ assertEquals("The erasure coding policy should be of disabled state",
+ ErasureCodingPolicyState.DISABLED, ecPolicy.getState());
+ // Read file regardless of the erasure coding policy state
+ DFSTestUtil.readFileAsBytes(fs, filePath);
+
+ // 3. Remove an erasure coding policy
+ try {
+ fs.removeErasureCodingPolicy(ecPolicy.getName());
+ } catch (RemoteException e) {
+ // built-in policy cannot been removed
+ assertTrue("Built-in policy cannot be removed",
+ ecPolicy.isSystemPolicy());
+ assertExceptionContains("System erasure coding policy", e);
+ return;
+ }
+
+ targetPolicy.setState(ErasureCodingPolicyState.REMOVED);
+ // Save namespace and restart NameNode
+ fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ fs.saveNamespace();
+ fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ cluster.restartNameNodes();
+ cluster.waitActive();
+ ecPolicy = ErasureCodingPolicyManager.getInstance().getByID(
+ targetPolicy.getId());
+ assertEquals("The erasure coding policy saved into and loaded from " +
+ "fsImage is bad", targetPolicy, ecPolicy);
+ assertEquals("The erasure coding policy should be of removed state",
+ ErasureCodingPolicyState.REMOVED, ecPolicy.getState());
+ // Read file regardless of the erasure coding policy state
+ DFSTestUtil.readFileAsBytes(fs, filePath);
+ fs.delete(dirPath, true);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 7cdbde21d0b..558e3377708 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -685,13 +685,12 @@ public void testFsckOpenECFiles() throws Exception {
final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits();
int blockSize = 2 * cellSize;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
numAllUnits + 1).build();
String topDir = "/myDir";
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(ecPolicy.getName());
util.createFiles(fs, topDir);
// set topDir to EC when it has replicated files
cluster.getFileSystem().getClient().setErasureCodingPolicy(
@@ -1999,19 +1998,19 @@ public Boolean get() {
@Test
public void testECFsck() throws Exception {
- FileSystem fs = null;
+ DistributedFileSystem fs = null;
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks =
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int totalSize = dataBlocks + parityBlocks;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
// create a contiguous file
Path replDirPath = new Path("/replicated");
@@ -2301,11 +2300,11 @@ public void testFsckCorruptECFile() throws Exception {
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
int totalSize = dataBlocks + parityBlocks;
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
Map dnIndices = new HashMap<>();
ArrayList dnList = cluster.getDataNodes();
for (int i = 0; i < totalSize; i++) {
@@ -2372,11 +2371,11 @@ public void testFsckMissingECFile() throws Exception {
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
int totalSize = dataBlocks + parityBlocks;
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
// create file
Path ecDirPath = new Path("/striped");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 937bb61c7ec..36638e00195 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -726,8 +726,6 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception {
DistributedFileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks =
StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
@@ -736,6 +734,8 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
// create file
Path ecDirPath = new Path("/striped");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index d217813bd5f..42ff6989e1a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -436,7 +436,7 @@ public void testRetryCacheRebuild() throws Exception {
LightWeightCache cacheSet =
(LightWeightCache) namesystem.getRetryCache().getCacheSet();
- assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+ assertEquals("Retry cache size is wrong", 34, cacheSet.size());
Map oldEntries =
new HashMap();
@@ -455,7 +455,7 @@ public void testRetryCacheRebuild() throws Exception {
assertTrue(namesystem.hasRetryCache());
cacheSet = (LightWeightCache) namesystem
.getRetryCache().getCacheSet();
- assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+ assertEquals("Retry cache size is wrong", 34, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
index f97492b7e04..9995393e675 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
@@ -65,13 +65,12 @@ public class TestQuotaWithStripedBlocks {
public void setUp() throws IOException {
final Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
cluster.waitActive();
dir = cluster.getNamesystem().getFSDirectory();
dfs = cluster.getFileSystem();
+ dfs.enableErasureCodingPolicy(ecPolicy.getName());
dfs.mkdirs(ecDir);
dfs.getClient()
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
index 02075f045d0..46907fd64e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
@@ -110,13 +110,12 @@ private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy)
throws Exception {
Configuration conf = new HdfsConfiguration();
initConf(conf);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1)
.build();
-
try {
cluster.waitActive();
+ cluster.getFileSystem().enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
final int numBlocks = 4;
DFSTestUtil.createStripedFile(cluster, filePath,
dirPath, numBlocks, 1, true);
@@ -203,14 +202,14 @@ public void test2RecoveryTasksForSameBlockGroup() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
1000);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2)
.build();
try {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
BlockManager bm = cluster.getNamesystem().getBlockManager();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
fs.getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
int fileLen = dataBlocks * blockSize;
@@ -280,13 +279,12 @@ public void testCountLiveReplicas() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2)
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
-
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
try {
fs.mkdirs(dirPath);
fs.setErasureCodingPolicy(dirPath,
@@ -383,8 +381,6 @@ public void testReconstructionWork() throws Exception {
ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByID(
SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
- conf.setStrings(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- policy.getName());
Path ecDir = new Path("/ec");
Path ecFilePath = new Path(ecDir, "ec-file");
int blockGroups = 2;
@@ -396,6 +392,7 @@ public void testReconstructionWork() throws Exception {
try {
// create an EC file with 2 block groups
final DistributedFileSystem fs = dfsCluster.getFileSystem();
+ fs.enableErasureCodingPolicy(policy.getName());
fs.mkdirs(ecDir);
fs.setErasureCodingPolicy(ecDir, policy.getName());
DFSTestUtil.createStripedFile(dfsCluster, ecFilePath, ecDir,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
index 5612d6597fe..33c52bf81b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
@@ -91,7 +91,7 @@ public class TestReencryption {
private FileSystemTestHelper fsHelper;
private MiniDFSCluster cluster;
- private HdfsAdmin dfsAdmin;
+ protected HdfsAdmin dfsAdmin;
private DistributedFileSystem fs;
private FSNamesystem fsn;
private File testRootDir;
@@ -199,8 +199,7 @@ public void testReencryptionBasic() throws Exception {
verifyZoneStatus(zone, null, 0);
// test re-encrypt after keyroll
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(2);
FileEncryptionInfo fei1 = getFileEncryptionInfo(encFile1);
@@ -316,8 +315,7 @@ public void testReencryptOrdering() throws Exception {
final Path notReencrypted = new Path(zone, "f0");
final FileEncryptionInfo fei = getFileEncryptionInfo(lastReencryptedFile);
final FileEncryptionInfo feiLast = getFileEncryptionInfo(notReencrypted);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// mark pause after first checkpoint (5 files)
getEzManager().pauseForTestingAfterNthSubmission(1);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -363,8 +361,7 @@ public void testZoneDeleteDuringReencrypt() throws Exception {
0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// test zone deleted during re-encrypt's checkpointing
getEzManager().pauseForTestingAfterNthSubmission(1);
getEzManager().resetMetricsForTesting();
@@ -409,8 +406,7 @@ public void testRestartAfterReencrypt() throws Exception {
final Path encFile9 = new Path(zone, "9");
final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
@@ -443,8 +439,7 @@ public void testRestartWithRenames() throws Exception {
fsWrapper.rename(new Path(zone, "f"), new Path(zone, "f1"));
// re-encrypt
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
@@ -495,8 +490,7 @@ public void testRestartDuringReencrypt() throws Exception {
final Path encFile9 = new Path(subdir, "9");
final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// mark pause after first checkpoint (5 files)
getEzManager().pauseForTestingAfterNthSubmission(1);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -540,8 +534,7 @@ public void testRestartAfterReencryptAndCheckpoint() throws Exception {
final Path encFile9 = new Path(zone, "9");
final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
@@ -585,8 +578,7 @@ public void testReencryptLoadedFromEdits() throws Exception {
final Path encFile9 = new Path(zone, "9");
final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// disable re-encrypt for testing, and issue a command
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -645,8 +637,7 @@ public void testReencryptLoadedFromFsimage() throws Exception {
final Path encFile9 = new Path(zone, "9");
final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile0);
final FileEncryptionInfo fei9 = getFileEncryptionInfo(encFile9);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// disable re-encrypt for testing, and issue a command
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -770,8 +761,7 @@ public void testReencryptNestedZones() throws Exception {
0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Disable re-encrypt, send re-encrypt on '/', verify queue
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zoneRoot, ReencryptAction.START);
@@ -816,8 +806,7 @@ public void testRaceCreateHandler() throws Exception {
.createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -883,8 +872,7 @@ public void testRaceDeleteHandler() throws Exception {
.createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -930,8 +918,7 @@ public void testRaceDeleteUpdater() throws Exception {
.createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -984,8 +971,7 @@ public void testRaceDeleteCurrentDirHandler() throws Exception {
0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1029,8 +1015,7 @@ public void testRaceDeleteCurrentDirUpdater() throws Exception {
0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1071,8 +1056,7 @@ public void testRaceDeleteZoneHandler() throws Exception {
.createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1122,8 +1106,7 @@ public void testRaceDeleteCreateHandler() throws Exception {
.createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1162,8 +1145,7 @@ public void testRaceDeleteCreateUpdater() throws Exception {
.createFile(fs, new Path(zone, "file" + i), len, (short) 1, 0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1220,8 +1202,7 @@ public void testReencryptRaceRename() throws Exception {
.createFile(fs, new Path(subdir, "file" + i), len, (short) 1, 0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// Issue the command re-encrypt and pause it
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1283,8 +1264,7 @@ public void testReencryptSnapshots() throws Exception {
// test re-encrypt on snapshot dir
final Path encFile1 = new Path(zone, "0");
final FileEncryptionInfo fei0 = getFileEncryptionInfo(encFile1);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
try {
dfsAdmin.reencryptEncryptionZone(zoneSnap, ReencryptAction.START);
fail("Reencrypt command on snapshot path should fail.");
@@ -1423,8 +1403,7 @@ public void testReencryptCancel() throws Exception {
fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// disable, test basic
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1442,8 +1421,7 @@ public void testReencryptCancel() throws Exception {
assertExceptionContains("not under re-encryption", expected);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// test cancelling half-way
getEzManager().pauseForTestingAfterNthSubmission(1);
getEzManager().resumeReencryptForTesting();
@@ -1537,8 +1515,7 @@ public void reencryptEncryptedKeys() throws IOException {
// re-encrypt 10 files, so 2 callables. Hang 1, pause the updater so the
// callable is taken from the executor but not processed.
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
getEzManager().pauseReencryptForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForQueuedZones(1);
@@ -1593,8 +1570,7 @@ public void testReencryptCancelForUpdater() throws Exception {
fsWrapper.mkdir(subdir, FsPermission.getDirDefault(), true);
DFSTestUtil.createFile(fs, new Path(subdir, "f"), len, (short) 1, 0xFEED);
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// disable, test basic
getEzManager().pauseReencryptUpdaterForTesting();
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1625,8 +1601,7 @@ public void testReencryptionWithoutProvider() throws Exception {
}
// re-encrypt the zone
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
@@ -1678,8 +1653,7 @@ public void testReencryptionNNSafeMode() throws Exception {
0xFEED);
}
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
+ rollKey(TEST_KEY);
// mark pause after first checkpoint (5 files)
getEzManager().pauseForTestingAfterNthSubmission(1);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
@@ -1736,9 +1710,7 @@ public void reencryptEncryptedKeys() throws IOException {
}
// re-encrypt the zone
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
-
+ rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
assertEquals(0, injector.exceptionCount);
@@ -1790,9 +1762,7 @@ public void reencryptUpdaterProcessOneTask() throws IOException {
}
// re-encrypt the zone
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
-
+ rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
assertEquals(0, injector.exceptionCount);
@@ -1845,9 +1815,7 @@ public void reencryptUpdaterProcessCheckpoint() throws IOException {
}
// re-encrypt the zone
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
-
+ rollKey(TEST_KEY);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
assertEquals(0, injector.exceptionCount);
@@ -1899,9 +1867,7 @@ public void reencryptUpdaterProcessOneTask() throws IOException {
}
// re-encrypt the zone
- fsn.getProvider().rollNewVersion(TEST_KEY);
- fsn.getProvider().flush();
-
+ rollKey(TEST_KEY);
Whitebox.setInternalState(getUpdater(), "faultRetryInterval", 50);
dfsAdmin.reencryptEncryptionZone(zone, ReencryptAction.START);
waitForReencryptedZones(1);
@@ -1929,4 +1895,11 @@ private ReencryptionUpdater getUpdater() {
return (ReencryptionUpdater) Whitebox
.getInternalState(getHandler(), "reencryptionUpdater");
}
+
+ protected void rollKey(final String keyName) throws Exception {
+ dfsAdmin.getKeyProvider().rollNewVersion(keyName);
+ // need to flush for jceks provider to make the key version it returned
+ // after NN restart consistent.
+ dfsAdmin.getKeyProvider().flush();
+ }
}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionWithKMS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionWithKMS.java
index af9c381ac86..642d5e53707 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionWithKMS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionWithKMS.java
@@ -88,4 +88,9 @@ public void testReencryptionKMSACLs() throws Exception {
KMSWebApp.getACLs().run();
testReencryptionBasic();
}
+
+ @Override
+ protected void rollKey(final String keyName) throws Exception {
+ dfsAdmin.getKeyProvider().rollNewVersion(keyName);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 94172bbe696..d5f548736f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -577,7 +577,6 @@ public void testCorruptImageFallbackLostECPolicy() throws IOException {
.getDefaultECPolicy();
final String policy = defaultPolicy.getName();
final Path f1 = new Path("/f1");
- config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policy);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(0)
@@ -586,6 +585,7 @@ public void testCorruptImageFallbackLostECPolicy() throws IOException {
try {
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(policy);
// set root directory to use the default ec policy
Path srcECDir = new Path("/");
fs.setErasureCodingPolicy(srcECDir,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
index c71d049243b..468e47fd18a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -318,8 +318,6 @@ public void testDeleteOp() throws Exception {
final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() +
testECPolicy.getNumParityUnits());
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE)
.build();
@@ -327,6 +325,8 @@ public void testDeleteOp() throws Exception {
FSNamesystem fsn = cluster.getNamesystem();
dfs = cluster.getFileSystem();
+ dfs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
dfs.mkdirs(ecDir);
// set erasure coding policy
@@ -395,8 +395,6 @@ public void testUnsuitableStoragePoliciesWithECStripedMode()
1L);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
false);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
// start 10 datanodes
int numOfDatanodes = 10;
@@ -426,6 +424,8 @@ public void testUnsuitableStoragePoliciesWithECStripedMode()
try {
cluster.waitActive();
+ cluster.getFileSystem().enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
// set "/foo" directory with ONE_SSD storage policy.
ClientProtocol client = NameNodeProxies.createProxy(conf,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index b40006be732..1d114d62e4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -166,7 +166,7 @@ public void testRetryCacheOnStandbyNN() throws Exception {
FSNamesystem fsn0 = cluster.getNamesystem(0);
LightWeightCache cacheSet =
(LightWeightCache) fsn0.getRetryCache().getCacheSet();
- assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+ assertEquals("Retry cache size is wrong", 34, cacheSet.size());
Map oldEntries =
new HashMap();
@@ -187,7 +187,7 @@ public void testRetryCacheOnStandbyNN() throws Exception {
FSNamesystem fsn1 = cluster.getNamesystem(1);
cacheSet = (LightWeightCache) fsn1
.getRetryCache().getCacheSet();
- assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+ assertEquals("Retry cache size is wrong", 34, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index b983fd16262..077a5f898a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -137,9 +137,6 @@ public class TestNameNodeMetrics {
// Enable stale DataNodes checking
CONF.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
- // Enable erasure coding
- CONF.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- EC_POLICY.getName());
GenericTestUtils.setLogLevel(LogFactory.getLog(MetricsAsserts.class),
Level.DEBUG);
}
@@ -166,6 +163,7 @@ public void setUp() throws Exception {
namesystem = cluster.getNamesystem();
bm = namesystem.getBlockManager();
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(EC_POLICY.getName());
ecDir = getTestPath("/ec");
fs.mkdirs(ecDir);
fs.setErasureCodingPolicy(ecDir, EC_POLICY.getName());
@@ -449,7 +447,7 @@ private void verifyAggregatedMetricsTally() throws Exception {
assertEquals("Pending deletion blocks metrics not matching!",
namesystem.getPendingDeletionBlocks(),
namesystem.getPendingDeletionReplicatedBlocks() +
- namesystem.getPendingDeletionECBlockGroups());
+ namesystem.getPendingDeletionECBlocks());
}
/** Corrupt a block and ensure metrics reflects it */
@@ -891,7 +889,7 @@ public void testTransactionAndCheckpointMetrics() throws Exception {
public void testSyncAndBlockReportMetric() throws Exception {
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
// We have one sync when the cluster starts up, just opening the journal
- assertCounter("SyncsNumOps", 3L, rb);
+ assertCounter("SyncsNumOps", 4L, rb);
// Each datanode reports in when the cluster comes up
assertCounter("StorageBlockReportNumOps",
(long) DATANODE_COUNT * cluster.getStoragesPerDatanode(), rb);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 9b8260814a6..ae8f585e2c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -537,8 +537,6 @@ public void testReportCommand() throws Exception {
final Configuration dfsConf = new HdfsConfiguration();
ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID(
SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
- dfsConf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
dfsConf.setInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
dfsConf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -568,6 +566,7 @@ public void testReportCommand() throws Exception {
final long fileLength = 512L;
final DistributedFileSystem fs = miniCluster.getFileSystem();
final Path file = new Path(baseDir, "/corrupted");
+ fs.enableErasureCodingPolicy(ecPolicy.getName());
DFSTestUtil.createFile(fs, file, fileLength, replFactor, 12345L);
DFSTestUtil.waitReplication(fs, file, replFactor);
final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
@@ -781,9 +780,9 @@ private void verifyNodesAndCorruptBlocks(
assertEquals(numCorruptBlocks + numCorruptECBlockGroups,
client.getCorruptBlocksCount());
assertEquals(numCorruptBlocks, client.getNamenode()
- .getBlocksStats().getCorruptBlocksStat());
+ .getReplicatedBlockStats().getCorruptBlocks());
assertEquals(numCorruptECBlockGroups, client.getNamenode()
- .getECBlockGroupsStats().getCorruptBlockGroupsStat());
+ .getECBlockGroupStats().getCorruptBlockGroups());
}
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
index bbad73c0418..9e1fa79a52f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
@@ -182,7 +182,7 @@ public void testStored() throws IOException {
hasAllOpCodes(editsStored));
assertTrue("Reference XML edits and parsed to XML should be same",
FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),
- new File(editsStoredParsedXml), "UTF-8"));
+ new File(editsStoredParsedXml), "UTF-8"));
assertTrue(
"Reference edits and reparsed (bin to XML to bin) should be same",
filesEqualIgnoreTrailingZeros(editsStored, editsStoredReparsed));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 46f194107ce..b32b308958b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -137,11 +137,10 @@ public static void createOriginalFSImage() throws IOException {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- ecPolicy.getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
+ hdfs.enableErasureCodingPolicy(ecPolicy.getName());
// Create a reasonable namespace
for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
index d04ef99d630..187b297b42d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
@@ -61,13 +61,13 @@ public void setup() throws IOException {
int numDNs = dataBlocks + parityBlocks + 2;
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.waitActive();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
fs = cluster.getFileSystem();
+ fs.enableErasureCodingPolicy(
+ StripedFileTestUtil.getDefaultECPolicy().getName());
Path eczone = new Path("/eczone");
fs.mkdirs(eczone);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 60d90fb37b4..de051b3d5d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -29,7 +29,6 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.io.PrintWriter;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.SocketException;
@@ -39,16 +38,8 @@
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
import java.util.Random;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.MediaType;
-
import com.google.common.collect.ImmutableList;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
@@ -80,12 +71,8 @@
import org.apache.hadoop.hdfs.TestFileCreation;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
@@ -96,8 +83,6 @@
import org.apache.hadoop.hdfs.web.resources.NoRedirectParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.Param;
-import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.http.HttpServerFunctionalTest;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
@@ -114,12 +99,8 @@
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.type.MapType;
-
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
-import static org.mockito.Matchers.anyLong;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
@@ -519,13 +500,12 @@ public void testWebHdfsAllowandDisallowSnapshots() throws Exception {
public void testWebHdfsErasureCodingFiles() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
- conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- SystemErasureCodingPolicies.getByID(
- SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName());
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
+ dfs.enableErasureCodingPolicy(SystemErasureCodingPolicies.getByID(
+ SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName());
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
@@ -975,76 +955,6 @@ public void testWebHdfsGetBlockLocationsWithStorageType() throws Exception{
Assert.assertTrue(storageTypes != null && storageTypes.length > 0 &&
storageTypes[0] == StorageType.DISK);
}
-
- // Query webhdfs REST API to get block locations
- InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
-
- // Case 1
- // URL without length or offset parameters
- URL url1 = new URL("http", addr.getHostString(), addr.getPort(),
- WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS");
- LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url1);
-
- String response1 = getResponse(url1, "GET");
- LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response1);
- // Parse BlockLocation array from json output using object mapper
- BlockLocation[] locationArray1 = toBlockLocationArray(response1);
-
- // Verify the result from rest call is same as file system api
- verifyEquals(locations, locationArray1);
-
- // Case 2
- // URL contains length and offset parameters
- URL url2 = new URL("http", addr.getHostString(), addr.getPort(),
- WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS"
- + "&length=" + LENGTH + "&offset=" + OFFSET);
- LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url2);
-
- String response2 = getResponse(url2, "GET");
- LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response2);
- BlockLocation[] locationArray2 = toBlockLocationArray(response2);
-
- verifyEquals(locations, locationArray2);
-
- // Case 3
- // URL contains length parameter but without offset parameters
- URL url3 = new URL("http", addr.getHostString(), addr.getPort(),
- WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS"
- + "&length=" + LENGTH);
- LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url3);
-
- String response3 = getResponse(url3, "GET");
- LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response3);
- BlockLocation[] locationArray3 = toBlockLocationArray(response3);
-
- verifyEquals(locations, locationArray3);
-
- // Case 4
- // URL contains offset parameter but without length parameter
- URL url4 = new URL("http", addr.getHostString(), addr.getPort(),
- WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS"
- + "&offset=" + OFFSET);
- LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url4);
-
- String response4 = getResponse(url4, "GET");
- LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response4);
- BlockLocation[] locationArray4 = toBlockLocationArray(response4);
-
- verifyEquals(locations, locationArray4);
-
- // Case 5
- // URL specifies offset exceeds the file length
- URL url5 = new URL("http", addr.getHostString(), addr.getPort(),
- WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS"
- + "&offset=1200");
- LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url5);
-
- String response5 = getResponse(url5, "GET");
- LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response5);
- BlockLocation[] locationArray5 = toBlockLocationArray(response5);
-
- // Expected an empty array of BlockLocation
- verifyEquals(new BlockLocation[] {}, locationArray5);
} finally {
if (cluster != null) {
cluster.shutdown();
@@ -1052,66 +962,6 @@ public void testWebHdfsGetBlockLocationsWithStorageType() throws Exception{
}
}
- private BlockLocation[] toBlockLocationArray(String json)
- throws IOException {
- ObjectMapper mapper = new ObjectMapper();
- MapType subType = mapper.getTypeFactory().constructMapType(
- Map.class,
- String.class,
- BlockLocation[].class);
- MapType rootType = mapper.getTypeFactory().constructMapType(
- Map.class,
- mapper.constructType(String.class),
- mapper.constructType(subType));
-
- Map> jsonMap = mapper
- .readValue(json, rootType);
- Map locationMap = jsonMap
- .get("BlockLocations");
- BlockLocation[] locationArray = locationMap.get(
- BlockLocation.class.getSimpleName());
- return locationArray;
- }
-
- private void verifyEquals(BlockLocation[] locations1,
- BlockLocation[] locations2) throws IOException {
- for(int i=0; i
- * First time call it return a wrapped json response with a
- * IllegalArgumentException
- *
- * Second time call it return a valid GET_BLOCK_LOCATIONS
- * json response
- *
- * Third time call it return a wrapped json response with
- * a random IOException
- *
- */
- public static class MockWebHdfsServlet extends HttpServlet {
-
- private static final long serialVersionUID = 1L;
- private static int respondTimes = 0;
- private static final String RANDOM_EXCEPTION_MSG =
- "This is a random exception";
-
- @Override
- public void doGet(HttpServletRequest request,
- HttpServletResponse response) throws ServletException, IOException {
- response.setHeader("Content-Type",
- MediaType.APPLICATION_JSON);
- String param = request.getParameter("op");
- if(respondTimes == 0) {
- Exception mockException = new IllegalArgumentException(
- "Invalid value for webhdfs parameter \"op\". "
- + "" + "No enum constant " + param);
- sendException(request, response, mockException);
- } else if (respondTimes == 1) {
- sendResponse(request, response);
- } else if (respondTimes == 2) {
- Exception mockException = new IOException(RANDOM_EXCEPTION_MSG);
- sendException(request, response, mockException);
- }
- respondTimes++;
- }
-
- private void sendResponse(HttpServletRequest request,
- HttpServletResponse response) throws IOException {
- response.setStatus(HttpServletResponse.SC_OK);
- // Construct a LocatedBlock for testing
- DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
- DatanodeInfo[] ds = new DatanodeInfo[1];
- ds[0] = d;
- ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 121, 1);
- LocatedBlock l1 = new LocatedBlock(b1, ds);
- l1.setStartOffset(0);
- l1.setCorrupt(false);
- List ls = Arrays.asList(l1);
- LocatedBlocks locatedblocks =
- new LocatedBlocks(10, false, ls, l1,
- true, null, null);
-
- try (PrintWriter pw = response.getWriter()) {
- pw.write(JsonUtil.toJsonString(locatedblocks));
- }
- }
-
- private void sendException(HttpServletRequest request,
- HttpServletResponse response,
- Exception mockException) throws IOException {
- response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
- String errJs = JsonUtil.toJsonString(mockException);
- try (PrintWriter pw = response.getWriter()) {
- pw.write(errJs);
- }
- }
- }
-
- @Test
- public void testGetFileBlockLocationsBackwardsCompatibility()
- throws Exception {
- final Configuration conf = WebHdfsTestUtil.createConf();
- final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
- HttpServer2 http = null;
- try {
- http = HttpServerFunctionalTest.createTestServer(conf);
- http.addServlet("test", pathSpec, MockWebHdfsServlet.class);
- http.start();
-
- // Write the address back to configuration so
- // WebHdfsFileSystem could connect to the mock server
- conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
- "localhost:" + http.getConnectorAddress(0).getPort());
-
- final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(
- conf, WebHdfsConstants.WEBHDFS_SCHEME);
-
- WebHdfsFileSystem spyFs = spy(webFS);
- BlockLocation[] locations = spyFs
- .getFileBlockLocations(new Path("p"), 0, 100);
-
- // Verify result
- assertEquals(1, locations.length);
- assertEquals(121, locations[0].getLength());
-
- // Verify the fall back
- // The function should be called exactly 2 times
- // 1st time handles GETFILEBLOCKLOCATIONS and found it is not supported
- // 2nd time fall back to handle GET_FILE_BLOCK_LOCATIONS
- verify(spyFs, times(2)).getFileBlockLocations(any(),
- any(), anyLong(), anyLong());
-
- // Verify it doesn't erroneously fall back
- // When server returns a different error, it should directly
- // throw an exception.
- try {
- spyFs.getFileBlockLocations(new Path("p"), 0, 100);
- } catch (Exception e) {
- assertTrue(e instanceof IOException);
- assertEquals(e.getMessage(), MockWebHdfsServlet.RANDOM_EXCEPTION_MSG);
- // Totally this function has been called 3 times
- verify(spyFs, times(3)).getFileBlockLocations(any(),
- any(), anyLong(), anyLong());
- }
- } finally {
- if(http != null) {
- http.stop();
- }
- }
- }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
index e271cb574bd..80295750f11 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
index f9011450555..0a1c25e4712 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
@@ -1179,23 +1179,107 @@
- OP_ROLLING_UPGRADE_START
+ OP_ADD_ERASURE_CODING_POLICY
89
+ rs
+ 3
+ 2
+ 8192
+ 0
+ 7334ec24-dd6b-4efd-807d-ed0d18625534
+ 84
+
+
+
+ OP_ADD_ERASURE_CODING_POLICY
+
+ 90
+ rs
+ 6
+ 10
+ 4096
+ 1
+
+ dummyKey
+ dummyValue
+
+ 7334ec24-dd6b-4efd-807d-ed0d18625534
+ 85
+
+
+
+ OP_ENABLE_ERASURE_CODING_POLICY
+
+ 91
+ RS-3-2-8k
+ 7334ec24-dd6b-4efd-807d-ed0d18625534
+ 86
+
+
+
+ OP_ENABLE_ERASURE_CODING_POLICY
+
+ 92
+ RS-6-10-4k
+ 7334ec24-dd6b-4efd-807d-ed0d18625534
+ 87
+
+
+
+ OP_DISABLE_ERASURE_CODING_POLICY
+
+ 93
+ RS-3-2-8k
+ 7334ec24-dd6b-4efd-807d-ed0d18625534
+ 88
+
+
+
+ OP_DISABLE_ERASURE_CODING_POLICY
+
+ 94
+ RS-6-10-4k
+ 7334ec24-dd6b-4efd-807d-ed0d18625534
+ 89
+
+
+
+ OP_REMOVE_ERASURE_CODING_POLICY
+
+ 95
+ RS-3-2-8k
+ 7334ec24-dd6b-4efd-807d-ed0d18625534
+ 90
+
+
+
+ OP_REMOVE_ERASURE_CODING_POLICY
+
+ 96
+ RS-6-10-4k
+ 7334ec24-dd6b-4efd-807d-ed0d18625534
+ 91
+
+
+
+ OP_ROLLING_UPGRADE_START
+
+ 97
1422406383706
OP_ROLLING_UPGRADE_FINALIZE
- 90
+ 98
1422406383706
OP_END_LOG_SEGMENT
- 91
+ 99
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index c34f7bd32f8..ce5fdc86111 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -42,6 +42,10 @@
org.apache.hadoop
hadoop-yarn-common