diff --git a/LICENSE.txt b/LICENSE.txt
index 9819dea0464..2e08754fea8 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -765,6 +765,7 @@ hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
Apache HBase - Server which contains JQuery minified javascript library version 1.8.3
+Microsoft JDBC Driver for SQLServer - version 6.2.1.jre7
--------------------------------------------------------------------------------
Copyright 2005, 2012, 2013 jQuery Foundation and other contributors, https://jquery.org/
diff --git a/dev-support/docker/hadoop_env_checks.sh b/dev-support/docker/hadoop_env_checks.sh
index 910c802291a..5cb4b2b3b95 100755
--- a/dev-support/docker/hadoop_env_checks.sh
+++ b/dev-support/docker/hadoop_env_checks.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
diff --git a/dev-support/findHangingTest.sh b/dev-support/findHangingTest.sh
index f7ebe47f093..fcda9ffb8c9 100644
--- a/dev-support/findHangingTest.sh
+++ b/dev-support/findHangingTest.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
diff --git a/dev-support/verify-xml.sh b/dev-support/verify-xml.sh
index abab4e69f2b..9ef456a777d 100755
--- a/dev-support/verify-xml.sh
+++ b/dev-support/verify-xml.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 74ce9bcf768..289061f8add 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -61,6 +61,7 @@
stop-yarn.shstart-yarn.cmdstop-yarn.cmd
+ FederationStateStore**/**0755
diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
index e495a69e1ac..2f31fa6df27 100644
--- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
@@ -46,7 +46,6 @@
org.apache.maven.pluginsmaven-enforcer-plugin
- 1.4org.codehaus.mojo
diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
index 68d1f5b8d35..0e23db939c5 100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
@@ -50,7 +50,6 @@
org.apache.maven.pluginsmaven-enforcer-plugin
- 1.4org.codehaus.mojo
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 93811ad698d..5cf1fad8c8b 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -126,6 +126,10 @@
javax.xml.bindjaxb-api
+
+ xml-apis
+ xml-apis
+ org.apache.avroavro
@@ -624,6 +628,13 @@
**/*.class
+
+ org.apache.hadoop:hadoop-mapreduce-client-jobclient:*
+
+ testjar/*
+ testshell/*
+
+
@@ -646,6 +657,7 @@
org/junit/*org/junit/**/*
+
org/ietf/jgss/*org/omg/**/*org/w3c/dom/*
@@ -654,6 +666,13 @@
org/xml/sax/**/*
+
+ contribs/
+ ${shaded.dependency.prefix}.contribs.
+
+ **/pom.xml
+
+ com/${shaded.dependency.prefix}.com.
@@ -691,6 +710,13 @@
io/serializations
+
+ javassist/
+ ${shaded.dependency.prefix}.javassist.
+
+ **/pom.xml
+
+ javax/el/${shaded.dependency.prefix}.javax.el.
@@ -712,6 +738,13 @@
**/pom.xml
+
+ jersey/
+ ${shaded.dependency.prefix}.jersey.
+
+ **/pom.xml
+
+ net/${shaded.dependency.prefix}.net.
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 2f64152b8b8..24c6b7a8365 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -174,13 +174,6 @@
org/apache/jasper/compiler/Localizer.class
-
-
- xerces:xercesImpl
-
- META-INF/services/*
-
- com.sun.jersey:*
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index de76afbcbc3..4bafd8e0223 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -323,6 +323,10 @@
+
+
+
+
+
+ fs.wasb.impl
+ org.apache.hadoop.fs.azure.NativeAzureFileSystem
+ The implementation class of the Native Azure Filesystem
+
+
+
+ fs.wasbs.impl
+ org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure
+ The implementation class of the Secure Native Azure Filesystem
+
+
fs.azure.secure.modefalse
@@ -2574,11 +2586,16 @@
ClientCredential
Defines Azure Active Directory OAuth2 access token provider type.
- Supported types are ClientCredential, RefreshToken, and Custom.
+ Supported types are ClientCredential, RefreshToken, MSI, DeviceCode,
+ and Custom.
The ClientCredential type requires property fs.adl.oauth2.client.id,
fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url.
The RefreshToken type requires property fs.adl.oauth2.client.id and
fs.adl.oauth2.refresh.token.
+ The MSI type requires properties fs.adl.oauth2.msi.port and
+ fs.adl.oauth2.msi.tenantguid.
+ The DeviceCode type requires property
+ fs.adl.oauth2.devicecode.clientapp.id.
The Custom type requires property fs.adl.oauth2.access.token.provider.
@@ -2615,6 +2632,36 @@
+
+ fs.adl.oauth2.msi.port
+
+
+ The localhost port for the MSI token service. This is the port specified
+ when creating the Azure VM.
+ Used by MSI token provider.
+
+
+
+
+ fs.adl.oauth2.msi.tenantguid
+
+
+ The tenant guid for the Azure AAD tenant under which the azure data lake
+ store account is created.
+ Used by MSI token provider.
+
+
+
+
+ fs.adl.oauth2.devicecode.clientapp.id
+
+
+ The app id of the AAD native app in whose context the auth request
+ should be made.
+ Used by DeviceCode token provider.
+
+
+
@@ -2663,4 +2710,50 @@
This determines the number of open file handles.
+
+
+ Host:Port of the ZooKeeper server to be used.
+
+ hadoop.zk.address
+
+
+
+
+ Number of tries to connect to ZooKeeper.
+ hadoop.zk.num-retries
+ 1000
+
+
+
+ Retry interval in milliseconds when connecting to ZooKeeper.
+
+ hadoop.zk.retry-interval-ms
+ 1000
+
+
+
+ ZooKeeper session timeout in milliseconds. Session expiration
+ is managed by the ZooKeeper cluster itself, not by the client. This value is
+ used by the cluster to determine when the client's session expires.
+ Expirations happens when the cluster does not hear from the client within
+ the specified session timeout period (i.e. no heartbeat).
+ hadoop.zk.timeout-ms
+ 10000
+
+
+
+ ACL's to be used for ZooKeeper znodes.
+ hadoop.zk.acl
+ world:anyone:rwcda
+
+
+
+
+ Specify the auths to be used for the ACL's specified in hadoop.zk.acl.
+ This takes a comma-separated list of authentication mechanisms, each of the
+ form 'scheme:auth' (the same syntax used for the 'addAuth' command in
+ the ZK CLI).
+
+ hadoop.zk.auth
+
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 0a594abe0c9..71eec75be92 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -676,11 +676,11 @@ stat
Usage: `hadoop fs -stat [format] ...`
-Print statistics about the file/directory at \ in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), and modification date (%y, %Y). %y shows UTC date as "yyyy-MM-dd HH:mm:ss" and %Y shows milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
+Print statistics about the file/directory at \ in the specified format. Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes (%b), type (%F), group name of owner (%g), name (%n), block size (%o), replication (%r), user name of owner(%u), access date(%x, %X), and modification date (%y, %Y). %x and %y show UTC date as "yyyy-MM-dd HH:mm:ss", and %X and %Y show milliseconds since January 1, 1970 UTC. If the format is not specified, %y is used by default.
Example:
-* `hadoop fs -stat "%F %a %u:%g %b %y %n" /file`
+* `hadoop fs -stat "type:%F perm:%a %u:%g size:%b mtime:%y atime:%x name:%n" /file`
Exit Code: Returns 0 on success and -1 on error.
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 4b89bc2a581..4543facc441 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -145,6 +145,9 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
| `CreateSymlinkOps` | Total number of createSymlink operations |
| `GetLinkTargetOps` | Total number of getLinkTarget operations |
| `FilesInGetListingOps` | Total number of files and directories listed by directory listing operations |
+| `SuccessfulReReplications` | Total number of successful block re-replications |
+| `NumTimesReReplicationNotScheduled` | Total number of times that failed to schedule a block re-replication |
+| `TimeoutReReplications` | Total number of timed out block re-replications |
| `AllowSnapshotOps` | Total number of allowSnapshot operations |
| `DisallowSnapshotOps` | Total number of disallowSnapshot operations |
| `CreateSnapshotOps` | Total number of createSnapshot operations |
@@ -157,8 +160,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
| `SyncsNumOps` | Total number of Journal syncs |
| `SyncsAvgTime` | Average time of Journal syncs in milliseconds |
| `TransactionsBatchedInSync` | Total number of Journal transactions batched in sync |
-| `BlockReportNumOps` | Total number of processing block reports from DataNode |
-| `BlockReportAvgTime` | Average time of processing block reports in milliseconds |
+| `StorageBlockReportNumOps` | Total number of processing block reports from individual storages in DataNode |
+| `StorageBlockReportAvgTime` | Average time of processing block reports in milliseconds |
| `CacheReportNumOps` | Total number of processing cache reports from DataNode |
| `CacheReportAvgTime` | Average time of processing cache reports in milliseconds |
| `SafeModeTime` | The interval between FSNameSystem starts and the last time safemode leaves in milliseconds. (sometimes not equal to the time in SafeMode, see [HDFS-5156](https://issues.apache.org/jira/browse/HDFS-5156)) |
@@ -176,6 +179,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
| `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds |
| `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
| `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
+| `ResourceCheckTime`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `StorageBlockReport`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
FSNamesystem
------------
@@ -213,7 +218,15 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
| `PendingDataNodeMessageCount` | (HA-only) Current number of pending block-related messages for later processing in the standby NameNode |
| `MillisSinceLastLoadedEdits` | (HA-only) Time in milliseconds since the last time standby NameNode load edit log. In active NameNode, set to 0 |
| `BlockCapacity` | Current number of block capacity |
+| `NumLiveDataNodes` | Number of datanodes which are currently live |
+| `NumDeadDataNodes` | Number of datanodes which are currently dead |
+| `NumDecomLiveDataNodes` | Number of datanodes which have been decommissioned and are now live |
+| `NumDecomDeadDataNodes` | Number of datanodes which have been decommissioned and are now dead |
+| `NumDecommissioningDataNodes` | Number of datanodes in decommissioning state |
+| `VolumeFailuresTotal` | Total number of volume failures across all Datanodes |
+| `EstimatedCapacityLostTotal` | An estimate of the total capacity lost due to volume failures |
| `StaleDataNodes` | Current number of DataNodes marked stale due to delayed heartbeat |
+| `NumStaleStorages` | Number of storages marked as content stale (after NameNode restart/failover before first block report is received) |
| `MissingReplOneBlocks` | Current number of missing blocks with replication factor 1 |
| `NumFilesUnderConstruction` | Current number of files under construction |
| `NumActiveClients` | Current number of active clients holding lease |
@@ -224,6 +237,9 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
| `TotalSyncTimes` | Total number of milliseconds spent by various edit logs in sync operation|
| `NameDirSize` | NameNode name directories size in bytes |
| `NumTimedOutPendingReconstructions` | The number of timed out reconstructions. Not the number of unique blocks that timed out. |
+| `NumInMaintenanceLiveDataNodes` | Number of live Datanodes which are in maintenance state |
+| `NumInMaintenanceDeadDataNodes` | Number of dead Datanodes which are in maintenance state |
+| `NumEnteringMaintenanceDataNodes` | Number of Datanodes that are entering the maintenance state |
| `FSN(Read|Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations |
| `FSN(Read|Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds |
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
index e1aad5ac8c7..5a62c4fc9dd 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
@@ -142,12 +142,9 @@ In a typical cluster HDFS and YARN services will be launched as the system `hdfs
hadoop.security.auth_to_local
- RULE:[2:$1@$0](nn/.*@.*REALM.TLD)s/.*/hdfs/
- RULE:[2:$1@$0](jn/.*@.*REALM.TLD)s/.*/hdfs/
- RULE:[2:$1@$0](dn/.*@.*REALM.TLD)s/.*/hdfs/
- RULE:[2:$1@$0](nm/.*@.*REALM.TLD)s/.*/yarn/
- RULE:[2:$1@$0](rm/.*@.*REALM.TLD)s/.*/yarn/
- RULE:[2:$1@$0](jhs/.*@.*REALM.TLD)s/.*/mapred/
+ RULE:[2:$1/$2@$0]([ndj]n/.*@REALM.TLD)s/.*/hdfs/
+ RULE:[2:$1/$2@$0]([rn]m/.*@REALM.TLD)s/.*/yarn/
+ RULE:[2:$1/$2@$0](jhs/.*@REALM.TLD)s/.*/mapred/
DEFAULT
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md b/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md
index 7f115c2ecfe..eb3b1227236 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md
@@ -78,13 +78,27 @@ A special value of `*` implies that all users are allowed to access the service.
If access control list is not defined for a service, the value of `security.service.authorization.default.acl` is applied. If `security.service.authorization.default.acl` is not defined, `*` is applied.
-* Blocked Access Control ListsIn some cases, it is required to specify blocked access control list for a service. This specifies the list of users and groups who are not authorized to access the service. The format of the blocked access control list is same as that of access control list. The blocked access control list can be specified via `$HADOOP_CONF_DIR/hadoop-policy.xml`. The property name is derived by suffixing with ".blocked".
+### Blocked Access Control Lists
- Example: The property name of blocked access control list for `security.client.protocol.acl` will be `security.client.protocol.acl.blocked`
+In some cases, it is required to specify blocked access control list for a service. This specifies the list of users and groups who are not authorized to access the service. The format of the blocked access control list is same as that of access control list. The blocked access control list can be specified via `$HADOOP_CONF_DIR/hadoop-policy.xml`. The property name is derived by suffixing with ".blocked".
- For a service, it is possible to specify both an access control list and a blocked control list. A user is authorized to access the service if the user is in the access control and not in the blocked access control list.
+Example: The property name of blocked access control list for `security.client.protocol.acl` will be `security.client.protocol.acl.blocked`
- If blocked access control list is not defined for a service, the value of `security.service.authorization.default.acl.blocked` is applied. If `security.service.authorization.default.acl.blocked` is not defined, empty blocked access control list is applied.
+For a service, it is possible to specify both an access control list and a blocked control list. A user is authorized to access the service if the user is in the access control and not in the blocked access control list.
+
+If blocked access control list is not defined for a service, the value of `security.service.authorization.default.acl.blocked` is applied. If `security.service.authorization.default.acl.blocked` is not defined, empty blocked access control list is applied.
+
+### Access Control using Lists of IP Addresses, Host Names and IP Ranges
+
+Access to a service can be controlled based on the ip address of the client accessing the service. It is possible to restrict access to a service from a set of machines by specifying a list of ip addresses, host names and ip ranges. The property name for each service is derived from the corresponding acl's property name. If the property name of acl is security.client.protocol.acl, property name for the hosts list will be security.client.protocol.hosts.
+
+If hosts list is not defined for a service, the value of `security.service.authorization.default.hosts` is applied. If `security.service.authorization.default.hosts` is not defined, `*` is applied.
+
+It is possible to specify a blocked list of hosts. Only those machines which are in the hosts list, but not in the blocked hosts list will be granted access to the service. The property name is derived by suffixing with ".blocked".
+
+Example: The property name of blocked hosts list for `security.client.protocol.hosts` will be `security.client.protocol.hosts.blocked`
+
+If blocked hosts list is not defined for a service, the value of `security.service.authorization.default.hosts.blocked` is applied. If `security.service.authorization.default.hosts.blocked` is not defined, empty blocked hosts list is applied.
### Refreshing Service Level Authorization Configuration
@@ -100,16 +114,6 @@ Refresh the service-level authorization configuration for the ResourceManager:
Of course, one can use the `security.refresh.policy.protocol.acl` property in `$HADOOP_CONF_DIR/hadoop-policy.xml` to restrict access to the ability to refresh the service-level authorization configuration to certain users/groups.
-* Access Control using list of ip addresses, host names and ip rangesAccess to a service can be controlled based on the ip address of the client accessing the service. It is possible to restrict access to a service from a set of machines by specifying a list of ip addresses, host names and ip ranges. The property name for each service is derived from the corresponding acl's property name. If the property name of acl is security.client.protocol.acl, property name for the hosts list will be security.client.protocol.hosts.
-
- If hosts list is not defined for a service, the value of `security.service.authorization.default.hosts` is applied. If `security.service.authorization.default.hosts` is not defined, `*` is applied.
-
- It is possible to specify a blocked list of hosts. Only those machines which are in the hosts list, but not in the blocked hosts list will be granted access to the service. The property name is derived by suffixing with ".blocked".
-
- Example: The property name of blocked hosts list for `security.client.protocol.hosts` will be `security.client.protocol.hosts.blocked`
-
- If blocked hosts list is not defined for a service, the value of `security.service.authorization.default.hosts.blocked` is applied. If `security.service.authorization.default.hosts.blocked` is not defined, empty blocked hosts list is applied.
-
### Examples
Allow only users `alice`, `bob` and users in the `mapreduce` group to submit jobs to the MapReduce cluster:
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md b/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
index 97f9e9aa92a..ffe2aec96ab 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
@@ -180,11 +180,11 @@ It is also possible to add the new subcommands to the usage output. The `hadoop_
```bash
if [[ "${HADOOP_SHELL_EXECNAME}" = "yarn" ]]; then
- hadoop_add_subcommand "hello" "Print some text to the screen"
+ hadoop_add_subcommand "hello" client "Print some text to the screen"
fi
```
-This functionality may also be use to override the built-ins. For example, defining:
+We set the subcommand type to be "client" as there are no special restrictions, extra capabilities, etc. This functionality may also be use to override the built-ins. For example, defining:
```bash
function hdfs_subcommand_fetchdt
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index b56666c4a26..1e522c7782c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -553,7 +553,7 @@ on a path that exists and is a file. Instead the operation returns false.
FS' = FS
result = False
-### `FSDataOutputStream create(Path, ...)`
+### `FSDataOutputStream create(Path, ...)`
FSDataOutputStream create(Path p,
@@ -616,7 +616,24 @@ this precondition fails.
* Not covered: symlinks. The resolved path of the symlink is used as the final path argument to the `create()` operation
-### `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)`
+### `FSDataOutputStreamBuilder createFile(Path p)`
+
+Make a `FSDataOutputStreamBuilder` to specify the parameters to create a file.
+
+#### Implementation Notes
+
+`createFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make
+change on filesystem immediately. When `build()` is invoked on the `FSDataOutputStreamBuilder`,
+the builder parameters are verified and [`create(Path p)`](#FileSystem.create)
+is invoked on the underlying filesystem. `build()` has the same preconditions
+and postconditions as [`create(Path p)`](#FileSystem.create).
+
+* Similar to [`create(Path p)`](#FileSystem.create), files are overwritten
+by default, unless specify `builder.overwrite(false)`.
+* Unlike [`create(Path p)`](#FileSystem.create), missing parent directories are
+not created by default, unless specify `builder.recursive()`.
+
+### `FSDataOutputStream append(Path p, int bufferSize, Progressable progress)`
Implementations without a compliant call SHOULD throw `UnsupportedOperationException`.
@@ -634,6 +651,18 @@ Implementations without a compliant call SHOULD throw `UnsupportedOperationExcep
Return: `FSDataOutputStream`, which can update the entry `FS.Files[p]`
by appending data to the existing list.
+### `FSDataOutputStreamBuilder appendFile(Path p)`
+
+Make a `FSDataOutputStreamBuilder` to specify the parameters to append to an
+existing file.
+
+#### Implementation Notes
+
+`appendFile(p)` returns a `FSDataOutputStreamBuilder` only and does not make
+change on filesystem immediately. When `build()` is invoked on the `FSDataOutputStreamBuilder`,
+the builder parameters are verified and [`append()`](#FileSystem.append) is
+invoked on the underlying filesystem. `build()` has the same preconditions and
+postconditions as [`append()`](#FileSystem.append).
### `FSDataInputStream open(Path f, int bufferSize)`
@@ -1210,3 +1239,27 @@ try {
It is notable that this is *not* done in the Hadoop codebase. This does not imply
that robust loops are not recommended —more that the concurrency
problems were not considered during the implementation of these loops.
+
+
+## interface `StreamCapabilities`
+
+The `StreamCapabilities` provides a way to programmatically query the
+capabilities that an `OutputStream` supports.
+
+```java
+public interface StreamCapabilities {
+ boolean hasCapability(String capability);
+}
+```
+
+### `boolean hasCapability(capability)`
+
+Return true if the `OutputStream` has the desired capability.
+
+The caller can query the capabilities of a stream using a string value.
+It currently supports to query:
+
+ * `StreamCapabilties.HFLUSH` ("*hflush*"): the capability to flush out the data
+ in client's buffer.
+ * `StreamCapabilities.HSYNC` ("*hsync*"): capability to flush out the data in
+ client's buffer and the disk device.
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md
new file mode 100644
index 00000000000..4ea1fd168f2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/fsdataoutputstreambuilder.md
@@ -0,0 +1,182 @@
+
+
+
+
+
+
+# class `org.apache.hadoop.fs.FSDataOutputStreamBuilder`
+
+
+
+Builder pattern for `FSDataOutputStream` and its subclasses. It is used to
+create a new file or open an existing file on `FileSystem` for write.
+
+## Invariants
+
+The `FSDataOutputStreamBuilder` interface does not validate parameters
+and modify the state of `FileSystem` until [`build()`](#Builder.build) is
+invoked.
+
+## Implementation-agnostic parameters.
+
+### `FSDataOutputStreamBuilder create()`
+
+Specify `FSDataOutputStreamBuilder` to create a file on `FileSystem`, equivalent
+to `CreateFlag#CREATE`.
+
+### `FSDataOutputStreamBuilder append()`
+
+Specify `FSDataOutputStreamBuilder` to append to an existing file on
+`FileSystem`, equivalent to `CreateFlag#APPEND`.
+
+### `FSDataOutputStreamBuilder overwrite(boolean overwrite)`
+
+Specify `FSDataOutputStreamBuilder` to overwrite an existing file or not. If
+giving `overwrite==true`, it truncates an existing file, equivalent to
+`CreateFlag#OVERWITE`.
+
+### `FSDataOutputStreamBuilder permission(FsPermission permission)`
+
+Set permission for the file.
+
+### `FSDataOutputStreamBuilder bufferSize(int bufSize)`
+
+Set the size of the buffer to be used.
+
+### `FSDataOutputStreamBuilder replication(short replica)`
+
+Set the replication factor.
+
+### `FSDataOutputStreamBuilder blockSize(long size)`
+
+Set block size in bytes.
+
+### `FSDataOutputStreamBuilder recursive()`
+
+Create parent directories if they do not exist.
+
+### `FSDataOutputStreamBuilder progress(Progresable prog)`
+
+Set the facility of reporting progress.
+
+### `FSDataOutputStreamBuilder checksumOpt(ChecksumOpt chksumOpt)`
+
+Set checksum opt.
+
+### Set optional or mandatory parameters
+
+ FSDataOutputStreamBuilder opt(String key, ...)
+ FSDataOutputStreamBuilder must(String key, ...)
+
+Set optional or mandatory parameters to the builder. Using `opt()` or `must()`,
+client can specify FS-specific parameters without inspecting the concrete type
+of `FileSystem`.
+
+ // Don't
+ if (fs instanceof FooFileSystem) {
+ FooFileSystem fs = (FooFileSystem) fs;
+ out = dfs.createFile(path)
+ .optionA()
+ .optionB("value")
+ .cache()
+ .build()
+ } else if (fs instanceof BarFileSystem) {
+ ...
+ }
+
+ // Do
+ out = fs.createFile(path)
+ .permission(perm)
+ .bufferSize(bufSize)
+ .opt("foofs:option.a", true)
+ .opt("foofs:option.b", "value")
+ .opt("barfs:cache", true)
+ .must("foofs:cache", true)
+ .must("barfs:cache-size", 256 * 1024 * 1024)
+ .build();
+
+#### Implementation Notes
+
+The concrete `FileSystem` and/or `FSDataOutputStreamBuilder` implementation
+MUST verify that implementation-agnostic parameters (i.e., "syncable") or
+implementation-specific parameters (i.e., "foofs:cache")
+are supported. `FileSystem` will satisfy optional parameters (via `opt(key, ...)`)
+on best effort. If the mandatory parameters (via `must(key, ...)`) can not be satisfied
+in the `FileSystem`, `IllegalArgumentException` should be thrown in `build()`.
+
+The behavior of resolving the conflicts between the parameters set by
+builder methods (i.e., `bufferSize()`) and `opt()`/`must()` is undefined.
+
+## HDFS-specific parameters.
+
+`HdfsDataOutputStreamBuilder extends FSDataOutputStreamBuilder` provides additional
+HDFS-specific parameters, for further customize file creation / append behavior.
+
+### `FSDataOutpuStreamBuilder favoredNodes(InetSocketAddress[] nodes)`
+
+Set favored DataNodes for new blocks.
+
+### `FSDataOutputStreamBuilder syncBlock()`
+
+Force closed blocks to the disk device. See `CreateFlag#SYNC_BLOCK`
+
+### `FSDataOutputStreamBuilder lazyPersist()`
+
+Create the block on transient storage if possible.
+
+### `FSDataOutputStreamBuilder newBlock()`
+
+Append data to a new block instead of the end of the last partial block.
+
+### `FSDataOutputStreamBuilder noLocalWrite()`
+
+Advise that a block replica NOT be written to the local DataNode.
+
+### `FSDataOutputStreamBuilder ecPolicyName()`
+
+Enforce the file to be a striped file with erasure coding policy 'policyName',
+no matter what its parent directory's replication or erasure coding policy is.
+
+### `FSDataOutputStreamBuilder replicate()`
+
+Enforce the file to be a replicated file, no matter what its parent directory's
+replication or erasure coding policy is.
+
+## Builder interface
+
+### `FSDataOutputStream build()`
+
+Create a new file or append an existing file on the underlying `FileSystem`,
+and return `FSDataOutputStream` for write.
+
+#### Preconditions
+
+The following combinations of parameters are not supported:
+
+ if APPEND|OVERWRITE: raise HadoopIllegalArgumentException
+ if CREATE|APPEND|OVERWRITE: raise HadoopIllegalArgumentExdeption
+
+`FileSystem` may reject the request for other reasons and throw `IOException`,
+see `FileSystem#create(path, ...)` and `FileSystem#append()`.
+
+#### Postconditions
+
+ FS' where :
+ FS'.Files'[p] == []
+ ancestors(p) is-subset-of FS'.Directories'
+
+ result = FSDataOutputStream
+
+The result is `FSDataOutputStream` to be used to write data to filesystem.
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
index 66a7eb3f364..532b6c7b688 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/index.md
@@ -33,5 +33,6 @@ HDFS as these are commonly expected by Hadoop client applications.
1. [Model](model.html)
1. [FileSystem class](filesystem.html)
1. [FSDataInputStream class](fsdatainputstream.html)
+1. [FSDataOutputStreamBuilder class](fsdataoutputstreambuilder.html)
2. [Testing with the Filesystem specification](testing.html)
2. [Extending the specification and its tests](extending.html)
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index da37e68e2eb..d0e0a351b53 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -103,6 +103,12 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
xmlPrefixToSkipCompare.add("fs.s3n.");
xmlPrefixToSkipCompare.add("s3native.");
+ // WASB properties are in a different subtree.
+ // - org.apache.hadoop.fs.azure.NativeAzureFileSystem
+ xmlPrefixToSkipCompare.add("fs.wasb.impl");
+ xmlPrefixToSkipCompare.add("fs.wasbs.impl");
+ xmlPrefixToSkipCompare.add("fs.azure.");
+
// ADL properties are in a different subtree
// - org.apache.hadoop.hdfs.web.ADLConfKeys
xmlPrefixToSkipCompare.add("adl.");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 5ced541af3b..91f25fa1cad 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -36,6 +36,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
@@ -48,6 +49,7 @@ import static org.junit.Assert.assertArrayEquals;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
@@ -55,6 +57,9 @@ import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.mockito.Mockito;
public class TestConfiguration extends TestCase {
@@ -78,6 +83,11 @@ public class TestConfiguration extends TestCase {
/** Four apostrophes. */
public static final String ESCAPED = "''''";
+ private static final String SENSITIVE_CONFIG_KEYS =
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;
+
+ private BufferedWriter out;
+
@Override
protected void setUp() throws Exception {
super.setUp();
@@ -86,6 +96,9 @@ public class TestConfiguration extends TestCase {
@Override
protected void tearDown() throws Exception {
+ if(out != null) {
+ out.close();
+ }
super.tearDown();
new File(CONFIG).delete();
new File(CONFIG2).delete();
@@ -151,16 +164,189 @@ public class TestConfiguration extends TestCase {
startConfig();
declareProperty("prop", "A", "A");
endConfig();
-
- InputStream in1 = new ByteArrayInputStream(writer.toString().getBytes());
+
+ InputStream in1 = Mockito.spy(new ByteArrayInputStream(
+ writer.toString().getBytes()));
Configuration conf = new Configuration(false);
conf.addResource(in1);
assertEquals("A", conf.get("prop"));
+ Mockito.verify(in1, Mockito.times(1)).close();
InputStream in2 = new ByteArrayInputStream(writer.toString().getBytes());
conf.addResource(in2);
assertEquals("A", conf.get("prop"));
}
+ public void testFinalWarnings() throws Exception {
+ // Make a configuration file with a final property
+ StringWriter writer = new StringWriter();
+ out = new BufferedWriter(writer);
+ startConfig();
+ declareProperty("prop", "A", "A", true);
+ endConfig();
+ byte[] bytes = writer.toString().getBytes();
+ InputStream in1 = new ByteArrayInputStream(bytes);
+
+ // Make a second config file with a final property with a different value
+ writer = new StringWriter();
+ out = new BufferedWriter(writer);
+ startConfig();
+ declareProperty("prop", "BB", "BB", true);
+ endConfig();
+ byte[] bytes2 = writer.toString().getBytes();
+ InputStream in2 = new ByteArrayInputStream(bytes2);
+
+ // Attach our own log appender so we can verify output
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
+ try {
+ // Add the 2 different resources - this should generate a warning
+ conf.addResource(in1);
+ conf.addResource(in2);
+ assertEquals("should see the first value", "A", conf.get("prop"));
+
+ List events = appender.getLog();
+ assertEquals("overriding a final parameter should cause logging", 1,
+ events.size());
+ LoggingEvent loggingEvent = events.get(0);
+ String renderedMessage = loggingEvent.getRenderedMessage();
+ assertTrue("did not see expected string inside message "+ renderedMessage,
+ renderedMessage.contains("an attempt to override final parameter: "
+ + "prop; Ignoring."));
+ } finally {
+ // Make sure the appender is removed
+ logger.removeAppender(appender);
+ }
+ }
+
+ public void testNoFinalWarnings() throws Exception {
+ // Make a configuration file with a final property
+ StringWriter writer = new StringWriter();
+ out = new BufferedWriter(writer);
+ startConfig();
+ declareProperty("prop", "A", "A", true);
+ endConfig();
+ byte[] bytes = writer.toString().getBytes();
+ // The 2 input streams both have the same config file
+ InputStream in1 = new ByteArrayInputStream(bytes);
+ InputStream in2 = new ByteArrayInputStream(bytes);
+
+ // Attach our own log appender so we can verify output
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
+ try {
+ // Add the resource twice from a stream - should not generate warnings
+ conf.addResource(in1);
+ conf.addResource(in2);
+ assertEquals("A", conf.get("prop"));
+
+ List events = appender.getLog();
+ for (LoggingEvent loggingEvent : events) {
+ System.out.println("Event = " + loggingEvent.getRenderedMessage());
+ }
+ assertTrue("adding same resource twice should not cause logging",
+ events.isEmpty());
+ } finally {
+ // Make sure the appender is removed
+ logger.removeAppender(appender);
+ }
+ }
+
+
+
+ public void testFinalWarningsMultiple() throws Exception {
+ // Make a configuration file with a repeated final property
+ StringWriter writer = new StringWriter();
+ out = new BufferedWriter(writer);
+ startConfig();
+ declareProperty("prop", "A", "A", true);
+ declareProperty("prop", "A", "A", true);
+ endConfig();
+ byte[] bytes = writer.toString().getBytes();
+ InputStream in1 = new ByteArrayInputStream(bytes);
+
+ // Attach our own log appender so we can verify output
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
+ try {
+ // Add the resource - this should not produce a warning
+ conf.addResource(in1);
+ assertEquals("should see the value", "A", conf.get("prop"));
+
+ List events = appender.getLog();
+ for (LoggingEvent loggingEvent : events) {
+ System.out.println("Event = " + loggingEvent.getRenderedMessage());
+ }
+ assertTrue("adding same resource twice should not cause logging",
+ events.isEmpty());
+ } finally {
+ // Make sure the appender is removed
+ logger.removeAppender(appender);
+ }
+ }
+
+ public void testFinalWarningsMultipleOverride() throws Exception {
+ // Make a configuration file with 2 final properties with different values
+ StringWriter writer = new StringWriter();
+ out = new BufferedWriter(writer);
+ startConfig();
+ declareProperty("prop", "A", "A", true);
+ declareProperty("prop", "BB", "BB", true);
+ endConfig();
+ byte[] bytes = writer.toString().getBytes();
+ InputStream in1 = new ByteArrayInputStream(bytes);
+
+ // Attach our own log appender so we can verify output
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
+ try {
+ // Add the resource - this should produce a warning
+ conf.addResource(in1);
+ assertEquals("should see the value", "A", conf.get("prop"));
+
+ List events = appender.getLog();
+ assertEquals("overriding a final parameter should cause logging", 1,
+ events.size());
+ LoggingEvent loggingEvent = events.get(0);
+ String renderedMessage = loggingEvent.getRenderedMessage();
+ assertTrue("did not see expected string inside message "+ renderedMessage,
+ renderedMessage.contains("an attempt to override final parameter: "
+ + "prop; Ignoring."));
+ } finally {
+ // Make sure the appender is removed
+ logger.removeAppender(appender);
+ }
+ }
+
+ /**
+ * A simple appender for white box testing.
+ */
+ private static class TestAppender extends AppenderSkeleton {
+ private final List log = new ArrayList<>();
+
+ @Override public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override protected void append(final LoggingEvent loggingEvent) {
+ log.add(loggingEvent);
+ }
+
+ @Override public void close() {
+ }
+
+ public List getLog() {
+ return new ArrayList<>(log);
+ }
+ }
+
/**
* Tests use of multi-byte characters in property names and values. This test
* round-trips multi-byte string literals through saving and loading of config
@@ -701,8 +887,6 @@ public class TestConfiguration extends TestCase {
new File(new File(relConfig).getParent()).delete();
}
- BufferedWriter out;
-
public void testIntegerRanges() {
Configuration conf = new Configuration();
conf.set("first", "-100");
@@ -1610,8 +1794,41 @@ public class TestConfiguration extends TestCase {
assertEquals(fileResource.toString(),prop.getResource());
}
}
-
-
+
+ public void testDumpSensitiveProperty() throws IOException {
+ final String myPassword = "ThisIsMyPassword";
+ Configuration testConf = new Configuration(false);
+ out = new BufferedWriter(new FileWriter(CONFIG));
+ startConfig();
+ appendProperty("test.password", myPassword);
+ endConfig();
+ Path fileResource = new Path(CONFIG);
+ testConf.addResource(fileResource);
+
+ try (StringWriter outWriter = new StringWriter()) {
+ testConf.set(SENSITIVE_CONFIG_KEYS, "password$");
+ Configuration.dumpConfiguration(testConf, "test.password", outWriter);
+ assertFalse(outWriter.toString().contains(myPassword));
+ }
+ }
+
+ public void testDumpSensitiveConfiguration() throws IOException {
+ final String myPassword = "ThisIsMyPassword";
+ Configuration testConf = new Configuration(false);
+ out = new BufferedWriter(new FileWriter(CONFIG));
+ startConfig();
+ appendProperty("test.password", myPassword);
+ endConfig();
+ Path fileResource = new Path(CONFIG);
+ testConf.addResource(fileResource);
+
+ try (StringWriter outWriter = new StringWriter()) {
+ testConf.set(SENSITIVE_CONFIG_KEYS, "password$");
+ Configuration.dumpConfiguration(testConf, outWriter);
+ assertFalse(outWriter.toString().contains(myPassword));
+ }
+ }
+
public void testGetValByRegex() {
Configuration conf = new Configuration();
String key1 = "t.abc.key1";
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
index 1962f49ccec..61a688ea4ee 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileStatus.java
@@ -36,6 +36,7 @@ import org.junit.Test;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 527b9eb8e43..90eaa2a65fc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -17,11 +17,13 @@
*/
package org.apache.hadoop.fs;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.StringUtils;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
@@ -31,7 +33,11 @@ import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import java.io.*;
import java.net.URI;
import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
import java.util.Random;
+import java.util.Set;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
@@ -46,6 +52,8 @@ import org.junit.Test;
import org.junit.rules.Timeout;
import org.mockito.internal.util.reflection.Whitebox;
+import javax.annotation.Nonnull;
+
/**
* This class tests the local file system via the FileSystem abstraction.
@@ -210,8 +218,8 @@ public class TestLocalFileSystem {
@Test
public void testHomeDirectory() throws IOException {
- Path home = new Path(System.getProperty("user.home"))
- .makeQualified(fileSys);
+ Path home = fileSys.makeQualified(
+ new Path(System.getProperty("user.home")));
Path fsHome = fileSys.getHomeDirectory();
assertEquals(home, fsHome);
}
@@ -221,7 +229,7 @@ public class TestLocalFileSystem {
Path path = new Path(TEST_ROOT_DIR, "foo%bar");
writeFile(fileSys, path, 1);
FileStatus status = fileSys.getFileStatus(path);
- assertEquals(path.makeQualified(fileSys), status.getPath());
+ assertEquals(fileSys.makeQualified(path), status.getPath());
cleanupFile(fileSys, path);
}
@@ -659,7 +667,7 @@ public class TestLocalFileSystem {
try {
FSDataOutputStreamBuilder builder =
- fileSys.createFile(path);
+ fileSys.createFile(path).recursive();
FSDataOutputStream out = builder.build();
String content = "Create with a generic type of createFile!";
byte[] contentOrigin = content.getBytes("UTF8");
@@ -703,4 +711,66 @@ public class TestLocalFileSystem {
Assert.assertEquals("Buffer size should be 0",
builder.getBufferSize(), 0);
}
+
+ /**
+ * A builder to verify configuration keys are supported.
+ */
+ private static class BuilderWithSupportedKeys
+ extends FSDataOutputStreamBuilder {
+
+ private final Set supportedKeys = new HashSet<>();
+
+ BuilderWithSupportedKeys(@Nonnull final Collection supportedKeys,
+ @Nonnull FileSystem fileSystem, @Nonnull Path p) {
+ super(fileSystem, p);
+ this.supportedKeys.addAll(supportedKeys);
+ }
+
+ @Override
+ protected BuilderWithSupportedKeys getThisBuilder() {
+ return this;
+ }
+
+ @Override
+ public FSDataOutputStream build()
+ throws IllegalArgumentException, IOException {
+ Set unsupported = new HashSet<>(getMandatoryKeys());
+ unsupported.removeAll(supportedKeys);
+ Preconditions.checkArgument(unsupported.isEmpty(),
+ "unsupported key found: " + supportedKeys);
+ return getFS().create(
+ getPath(), getPermission(), getFlags(), getBufferSize(),
+ getReplication(), getBlockSize(), getProgress(), getChecksumOpt());
+ }
+ }
+
+ @Test
+ public void testFSOutputStreamBuilderOptions() throws Exception {
+ Path path = new Path(TEST_ROOT_DIR, "testBuilderOpt");
+ final List supportedKeys = Arrays.asList("strM");
+
+ FSDataOutputStreamBuilder, ?> builder =
+ new BuilderWithSupportedKeys(supportedKeys, fileSys, path);
+ builder.opt("strKey", "value");
+ builder.opt("intKey", 123);
+ builder.opt("strM", "ignored");
+ // Over-write an optional value with a mandatory value.
+ builder.must("strM", "value");
+ builder.must("unsupported", 12.34);
+
+ assertEquals("Optional value should be overwrite by a mandatory value",
+ "value", builder.getOptions().get("strM"));
+
+ Set mandatoryKeys = builder.getMandatoryKeys();
+ Set expectedKeys = new HashSet<>();
+ expectedKeys.add("strM");
+ expectedKeys.add("unsupported");
+ assertEquals(expectedKeys, mandatoryKeys);
+ assertEquals(2, mandatoryKeys.size());
+
+ LambdaTestUtils.intercept(IllegalArgumentException.class,
+ "unsupported key found", builder::build
+ );
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
index 6b3e98bd95a..d61b6354498 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
@@ -60,6 +60,19 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
}
+ @Test
+ public void testBuilderAppendToEmptyFile() throws Throwable {
+ touch(getFileSystem(), target);
+ byte[] dataset = dataset(256, 'a', 'z');
+ try (FSDataOutputStream outputStream =
+ getFileSystem().appendFile(target).build()) {
+ outputStream.write(dataset);
+ }
+ byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
+ dataset.length);
+ ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length);
+ }
+
@Test
public void testAppendNonexistentFile() throws Throwable {
try {
@@ -78,15 +91,29 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
byte[] original = dataset(8192, 'A', 'Z');
byte[] appended = dataset(8192, '0', '9');
createFile(getFileSystem(), target, false, original);
- FSDataOutputStream outputStream = getFileSystem().append(target);
- outputStream.write(appended);
- outputStream.close();
+ try (FSDataOutputStream out = getFileSystem().append(target)) {
+ out.write(appended);
+ }
byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
original.length + appended.length);
ContractTestUtils.validateFileContent(bytes,
new byte[] [] { original, appended });
}
+ @Test
+ public void testBuilderAppendToExistingFile() throws Throwable {
+ byte[] original = dataset(8192, 'A', 'Z');
+ byte[] appended = dataset(8192, '0', '9');
+ createFile(getFileSystem(), target, false, original);
+ try (FSDataOutputStream out = getFileSystem().appendFile(target).build()) {
+ out.write(appended);
+ }
+ byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), target,
+ original.length + appended.length);
+ ContractTestUtils.validateFileContent(bytes,
+ new byte[][]{original, appended});
+ }
+
@Test
public void testAppendMissingTarget() throws Throwable {
try {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index a9ce6078023..2053f50b6bc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -47,24 +47,37 @@ public abstract class AbstractContractCreateTest extends
*/
public static final int CREATE_TIMEOUT = 15000;
- @Test
- public void testCreateNewFile() throws Throwable {
- describe("Foundational 'create a file' test");
- Path path = path("testCreateNewFile");
+ protected Path path(String filepath, boolean useBuilder) throws IOException {
+ return super.path(filepath + (useBuilder ? "" : "-builder"));
+ }
+
+ private void testCreateNewFile(boolean useBuilder) throws Throwable {
+ describe("Foundational 'create a file' test, using builder API=" +
+ useBuilder);
+ Path path = path("testCreateNewFile", useBuilder);
byte[] data = dataset(256, 'a', 'z');
- writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false);
+ writeDataset(getFileSystem(), path, data, data.length, 1024 * 1024, false,
+ useBuilder);
ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
}
@Test
- public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
- describe("Verify overwriting an existing file fails");
- Path path = path("testCreateFileOverExistingFileNoOverwrite");
+ public void testCreateNewFile() throws Throwable {
+ testCreateNewFile(true);
+ testCreateNewFile(false);
+ }
+
+ private void testCreateFileOverExistingFileNoOverwrite(boolean useBuilder)
+ throws Throwable {
+ describe("Verify overwriting an existing file fails, using builder API=" +
+ useBuilder);
+ Path path = path("testCreateFileOverExistingFileNoOverwrite", useBuilder);
byte[] data = dataset(256, 'a', 'z');
writeDataset(getFileSystem(), path, data, data.length, 1024, false);
byte[] data2 = dataset(10 * 1024, 'A', 'Z');
try {
- writeDataset(getFileSystem(), path, data2, data2.length, 1024, false);
+ writeDataset(getFileSystem(), path, data2, data2.length, 1024, false,
+ useBuilder);
fail("writing without overwrite unexpectedly succeeded");
} catch (FileAlreadyExistsException expected) {
//expected
@@ -76,6 +89,26 @@ public abstract class AbstractContractCreateTest extends
}
}
+ @Test
+ public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
+ testCreateFileOverExistingFileNoOverwrite(false);
+ testCreateFileOverExistingFileNoOverwrite(true);
+ }
+
+ private void testOverwriteExistingFile(boolean useBuilder) throws Throwable {
+ describe("Overwrite an existing file and verify the new data is there, " +
+ "use builder API=" + useBuilder);
+ Path path = path("testOverwriteExistingFile", useBuilder);
+ byte[] data = dataset(256, 'a', 'z');
+ writeDataset(getFileSystem(), path, data, data.length, 1024, false,
+ useBuilder);
+ ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
+ byte[] data2 = dataset(10 * 1024, 'A', 'Z');
+ writeDataset(getFileSystem(), path, data2, data2.length, 1024, true,
+ useBuilder);
+ ContractTestUtils.verifyFileContents(getFileSystem(), path, data2);
+ }
+
/**
* This test catches some eventual consistency problems that blobstores exhibit,
* as we are implicitly verifying that updates are consistent. This
@@ -84,25 +117,21 @@ public abstract class AbstractContractCreateTest extends
*/
@Test
public void testOverwriteExistingFile() throws Throwable {
- describe("Overwrite an existing file and verify the new data is there");
- Path path = path("testOverwriteExistingFile");
- byte[] data = dataset(256, 'a', 'z');
- writeDataset(getFileSystem(), path, data, data.length, 1024, false);
- ContractTestUtils.verifyFileContents(getFileSystem(), path, data);
- byte[] data2 = dataset(10 * 1024, 'A', 'Z');
- writeDataset(getFileSystem(), path, data2, data2.length, 1024, true);
- ContractTestUtils.verifyFileContents(getFileSystem(), path, data2);
+ testOverwriteExistingFile(false);
+ testOverwriteExistingFile(true);
}
- @Test
- public void testOverwriteEmptyDirectory() throws Throwable {
- describe("verify trying to create a file over an empty dir fails");
+ private void testOverwriteEmptyDirectory(boolean useBuilder)
+ throws Throwable {
+ describe("verify trying to create a file over an empty dir fails, " +
+ "use builder API=" + useBuilder);
Path path = path("testOverwriteEmptyDirectory");
mkdirs(path);
assertIsDirectory(path);
byte[] data = dataset(256, 'a', 'z');
try {
- writeDataset(getFileSystem(), path, data, data.length, 1024, true);
+ writeDataset(getFileSystem(), path, data, data.length, 1024, true,
+ useBuilder);
assertIsDirectory(path);
fail("write of file over empty dir succeeded");
} catch (FileAlreadyExistsException expected) {
@@ -121,8 +150,15 @@ public abstract class AbstractContractCreateTest extends
}
@Test
- public void testOverwriteNonEmptyDirectory() throws Throwable {
- describe("verify trying to create a file over a non-empty dir fails");
+ public void testOverwriteEmptyDirectory() throws Throwable {
+ testOverwriteEmptyDirectory(false);
+ testOverwriteEmptyDirectory(true);
+ }
+
+ private void testOverwriteNonEmptyDirectory(boolean useBuilder)
+ throws Throwable {
+ describe("verify trying to create a file over a non-empty dir fails, " +
+ "use builder API=" + useBuilder);
Path path = path("testOverwriteNonEmptyDirectory");
mkdirs(path);
try {
@@ -140,7 +176,7 @@ public abstract class AbstractContractCreateTest extends
byte[] data = dataset(256, 'a', 'z');
try {
writeDataset(getFileSystem(), path, data, data.length, 1024,
- true);
+ true, useBuilder);
FileStatus status = getFileSystem().getFileStatus(path);
boolean isDir = status.isDirectory();
@@ -166,6 +202,12 @@ public abstract class AbstractContractCreateTest extends
assertIsFile(child);
}
+ @Test
+ public void testOverwriteNonEmptyDirectory() throws Throwable {
+ testOverwriteNonEmptyDirectory(false);
+ testOverwriteNonEmptyDirectory(true);
+ }
+
@Test
public void testCreatedFileIsImmediatelyVisible() throws Throwable {
describe("verify that a newly created file exists as soon as open returns");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index e60fd4347e4..8c01d2b776d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -70,7 +70,8 @@ public class ContractTestUtils extends Assert {
* Assert that a property in the property set matches the expected value.
* @param props property set
* @param key property name
- * @param expected expected value. If null, the property must not be in the set
+ * @param expected expected value. If null, the property must not be in the
+ * set
*/
public static void assertPropertyEquals(Properties props,
String key,
@@ -146,16 +147,45 @@ public class ContractTestUtils extends Assert {
int len,
int buffersize,
boolean overwrite) throws IOException {
+ writeDataset(fs, path, src, len, buffersize, overwrite, false);
+ }
+
+ /**
+ * Write a file.
+ * Optional flags control
+ * whether file overwrite operations should be enabled
+ * Optional using {@link org.apache.hadoop.fs.FSDataOutputStreamBuilder}
+ *
+ * @param fs filesystem
+ * @param path path to write to
+ * @param len length of data
+ * @param overwrite should the create option allow overwrites?
+ * @param useBuilder should use builder API to create file?
+ * @throws IOException IO problems
+ */
+ public static void writeDataset(FileSystem fs, Path path, byte[] src,
+ int len, int buffersize, boolean overwrite, boolean useBuilder)
+ throws IOException {
assertTrue(
"Not enough data in source array to write " + len + " bytes",
src.length >= len);
- FSDataOutputStream out = fs.create(path,
- overwrite,
- fs.getConf()
- .getInt(IO_FILE_BUFFER_SIZE_KEY,
- IO_FILE_BUFFER_SIZE_DEFAULT),
- (short) 1,
- buffersize);
+ FSDataOutputStream out;
+ if (useBuilder) {
+ out = fs.createFile(path)
+ .overwrite(overwrite)
+ .replication((short) 1)
+ .bufferSize(buffersize)
+ .blockSize(buffersize)
+ .build();
+ } else {
+ out = fs.create(path,
+ overwrite,
+ fs.getConf()
+ .getInt(IO_FILE_BUFFER_SIZE_KEY,
+ IO_FILE_BUFFER_SIZE_DEFAULT),
+ (short) 1,
+ buffersize);
+ }
out.write(src, 0, len);
out.close();
assertFileHasLength(fs, path, len);
@@ -203,7 +233,7 @@ public class ContractTestUtils extends Assert {
assertTrue("not a file " + statText, stat.isFile());
assertEquals("wrong length " + statText, original.length, stat.getLen());
byte[] bytes = readDataset(fs, path, original.length);
- compareByteArrays(original,bytes,original.length);
+ compareByteArrays(original, bytes, original.length);
}
/**
@@ -222,7 +252,7 @@ public class ContractTestUtils extends Assert {
stm.readFully(out);
byte[] expected = Arrays.copyOfRange(fileContents, seekOff,
seekOff + toRead);
- compareByteArrays(expected, out,toRead);
+ compareByteArrays(expected, out, toRead);
}
/**
@@ -239,11 +269,11 @@ public class ContractTestUtils extends Assert {
assertEquals("Number of bytes read != number written",
len, received.length);
int errors = 0;
- int first_error_byte = -1;
+ int firstErrorByte = -1;
for (int i = 0; i < len; i++) {
if (original[i] != received[i]) {
if (errors == 0) {
- first_error_byte = i;
+ firstErrorByte = i;
}
errors++;
}
@@ -256,8 +286,8 @@ public class ContractTestUtils extends Assert {
// the range either side of the first error to print
// this is a purely arbitrary number, to aid user debugging
final int overlap = 10;
- for (int i = Math.max(0, first_error_byte - overlap);
- i < Math.min(first_error_byte + overlap, len);
+ for (int i = Math.max(0, firstErrorByte - overlap);
+ i < Math.min(firstErrorByte + overlap, len);
i++) {
byte actual = received[i];
byte expected = original[i];
@@ -450,7 +480,7 @@ public class ContractTestUtils extends Assert {
public static void downgrade(String message, Throwable failure) {
LOG.warn("Downgrading test " + message, failure);
AssumptionViolatedException ave =
- new AssumptionViolatedException(failure, null);
+ new AssumptionViolatedException(failure, null);
throw ave;
}
@@ -494,9 +524,9 @@ public class ContractTestUtils extends Assert {
int expected) throws IOException {
FileStatus status = fs.getFileStatus(path);
assertEquals(
- "Wrong file length of file " + path + " status: " + status,
- expected,
- status.getLen());
+ "Wrong file length of file " + path + " status: " + status,
+ expected,
+ status.getLen());
}
/**
@@ -682,7 +712,8 @@ public class ContractTestUtils extends Assert {
*/
public static String ls(FileSystem fileSystem, Path path) throws IOException {
if (path == null) {
- //surfaces when someone calls getParent() on something at the top of the path
+ // surfaces when someone calls getParent() on something at the top of the
+ // path
return "/";
}
FileStatus[] stats;
@@ -864,7 +895,7 @@ public class ContractTestUtils extends Assert {
}
/**
- * Test for the host being an OSX machine
+ * Test for the host being an OSX machine.
* @return true if the JVM thinks that is running on OSX
*/
public static boolean isOSX() {
@@ -887,8 +918,9 @@ public class ContractTestUtils extends Assert {
break;
}
}
- if (mismatch)
+ if (mismatch) {
break;
+ }
}
assertFalse("File content of file is not as expected at offset " + idx,
mismatch);
@@ -998,7 +1030,9 @@ public class ContractTestUtils extends Assert {
* @throws IOException
* thrown if an I/O error occurs while writing or reading the test file
*/
- public static void createAndVerifyFile(FileSystem fs, Path parent, final long fileSize)
+ public static void createAndVerifyFile(FileSystem fs,
+ Path parent,
+ final long fileSize)
throws IOException {
int testBufferSize = fs.getConf()
.getInt(IO_CHUNK_BUFFER_SIZE, DEFAULT_IO_CHUNK_BUFFER_SIZE);
@@ -1495,13 +1529,21 @@ public class ContractTestUtils extends Assert {
* printing some useful results in the process.
*/
public static final class NanoTimer {
- private final long startTime;
+ private long startTime;
private long endTime;
public NanoTimer() {
startTime = now();
}
+ /**
+ * Reset the timer. Equivalent to the reset button of a stopwatch.
+ */
+ public void reset() {
+ endTime = 0;
+ startTime = now();
+ }
+
/**
* End the operation.
* @return the duration of the operation
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java
new file mode 100644
index 00000000000..31cacf786d8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/protocolPB/TestFSSerialization.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.protocolPB;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import static org.apache.hadoop.fs.FSProtos.*;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/**
+ * Verify PB serialization of FS data structures.
+ */
+public class TestFSSerialization {
+
+ @Test
+ @SuppressWarnings("deprecation")
+ public void testWritableFlagSerialization() throws Exception {
+ final Path p = new Path("hdfs://yaks:4344/dingos/f");
+ for (int i = 0; i < 0x8; ++i) {
+ final boolean acl = 0 != (i & 0x1);
+ final boolean crypt = 0 != (i & 0x2);
+ final boolean ec = 0 != (i & 0x4);
+ FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
+ 12345678L, 87654321L, FsPermission.getFileDefault(),
+ "hadoop", "unqbbc", null, p, acl, crypt, ec);
+ DataOutputBuffer dob = new DataOutputBuffer();
+ stat.write(dob);
+ DataInputBuffer dib = new DataInputBuffer();
+ dib.reset(dob.getData(), 0, dob.getLength());
+ FileStatus fstat = new FileStatus();
+ fstat.readFields(dib);
+ assertEquals(stat, fstat);
+ checkFields(stat, fstat);
+ }
+ }
+
+ @Test
+ public void testUtilitySerialization() throws Exception {
+ final Path p = new Path("hdfs://yaks:4344/dingos/f");
+ FileStatus stat = new FileStatus(1024L, false, 3, 1L << 31,
+ 12345678L, 87654321L, FsPermission.createImmutable((short)0111),
+ "hadoop", "unqbbc", null, p);
+ FileStatusProto fsp = PBHelper.convert(stat);
+ FileStatus stat2 = PBHelper.convert(fsp);
+ assertEquals(stat, stat2);
+ checkFields(stat, stat2);
+ }
+
+ private static void checkFields(FileStatus expected, FileStatus actual) {
+ assertEquals(expected.getPath(), actual.getPath());
+ assertEquals(expected.isDirectory(), actual.isDirectory());
+ assertEquals(expected.getLen(), actual.getLen());
+ assertEquals(expected.getPermission(), actual.getPermission());
+ assertEquals(expected.getOwner(), actual.getOwner());
+ assertEquals(expected.getGroup(), actual.getGroup());
+ assertEquals(expected.getModificationTime(), actual.getModificationTime());
+ assertEquals(expected.getAccessTime(), actual.getAccessTime());
+ assertEquals(expected.getReplication(), actual.getReplication());
+ assertEquals(expected.getBlockSize(), actual.getBlockSize());
+ assertEquals(expected.hasAcl(), actual.hasAcl());
+ assertEquals(expected.isEncrypted(), actual.isEncrypted());
+ assertEquals(expected.isErasureCoded(), actual.isErasureCoded());
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index 6ec6e0f965e..ca7e466b79e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -682,4 +682,17 @@ public class TestHttpServer extends HttpServerFunctionalTest {
stopHttpServer(myServer2);
}
}
+
+ @Test
+ public void testBacklogSize() throws Exception
+ {
+ final int backlogSize = 2048;
+ Configuration conf = new Configuration();
+ conf.setInt(HttpServer2.HTTP_SOCKET_BACKLOG_SIZE_KEY, backlogSize);
+ HttpServer2 srv = createServer("test", conf);
+ List> listeners = (List>) Whitebox.getInternalState(srv,
+ "listeners");
+ ServerConnector listener = (ServerConnector)listeners.get(0);
+ assertEquals(backlogSize, listener.getAcceptQueueSize());
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
index 25e2ce9be98..58537adf5cf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java
@@ -131,4 +131,9 @@ public class TestGenericsUtil extends TestCase {
GenericClass.class, c2);
}
+ public void testIsLog4jLogger() throws Exception {
+ assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger(null));
+ assertTrue("The implementation is Log4j",
+ GenericsUtil.isLog4jLogger(TestGenericsUtil.class));
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java
new file mode 100644
index 00000000000..3e78a44fa70
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util.curator;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.curator.test.TestingServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test the manager for ZooKeeper Curator.
+ */
+public class TestZKCuratorManager {
+
+ private TestingServer server;
+ private ZKCuratorManager curator;
+
+ @Before
+ public void setup() throws Exception {
+ this.server = new TestingServer();
+
+ Configuration conf = new Configuration();
+ conf.set(
+ CommonConfigurationKeys.ZK_ADDRESS, this.server.getConnectString());
+
+ this.curator = new ZKCuratorManager(conf);
+ this.curator.start();
+ }
+
+ @After
+ public void teardown() throws Exception {
+ this.curator.close();
+ if (this.server != null) {
+ this.server.close();
+ this.server = null;
+ }
+ }
+
+ @Test
+ public void testReadWriteData() throws Exception {
+ String testZNode = "/test";
+ String expectedString = "testString";
+ assertFalse(curator.exists(testZNode));
+ curator.create(testZNode);
+ assertTrue(curator.exists(testZNode));
+ curator.setData(testZNode, expectedString, -1);
+ String testString = curator.getStringData("/test");
+ assertEquals(expectedString, testString);
+ }
+
+ @Test
+ public void testChildren() throws Exception {
+ List children = curator.getChildren("/");
+ assertEquals(1, children.size());
+
+ assertFalse(curator.exists("/node1"));
+ curator.create("/node1");
+ assertTrue(curator.exists("/node1"));
+
+ assertFalse(curator.exists("/node2"));
+ curator.create("/node2");
+ assertTrue(curator.exists("/node2"));
+
+ children = curator.getChildren("/");
+ assertEquals(3, children.size());
+
+ curator.delete("/node2");
+ assertFalse(curator.exists("/node2"));
+ children = curator.getChildren("/");
+ assertEquals(2, children.size());
+ }
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 64677f86f7a..6a3d53ad2de 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -919,15 +919,19 @@
RegexpComparator
- ^( |\t)*of owner \(%u\), modification date \(%y, %Y\).( )*
+ ^( |\t)*of owner \(%u\), access date \(%x, %X\).( )*RegexpComparator
- ^( |\t)*%y shows UTC date as "yyyy-MM-dd HH:mm:ss" and( )*
+ ^( |\t)*modification date \(%y, %Y\).( )*RegexpComparator
- ^( |\t)*%Y shows milliseconds since January 1, 1970 UTC.( )*
+ ^( |\t)*%x and %y show UTC date as "yyyy-MM-dd HH:mm:ss" and( )*
+
+
+ RegexpComparator
+ ^( |\t)*%X and %Y show milliseconds since January 1, 1970 UTC.( )*RegexpComparator
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
index 86608edd93a..fa34bdfc4b5 100755
--- a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_array_param.bats
old mode 100755
new mode 100644
similarity index 58%
rename from hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
rename to hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_array_param.bats
index 9b031f254fb..03264c18d77
--- a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_array_param.bats
@@ -15,18 +15,23 @@
load hadoop-functions_test_helper
-@test "hadoop_escape_sed (positive 1)" {
- ret="$(hadoop_sed_escape "\pass&&word\0#\$asdf/g ><'\"~\`!@#$%^&*()_+-=")"
- expected="\\\\pass\&\&word\\\0#\$asdf\/g ><'\"~\`!@#$%^\&*()_+-="
- echo "actual >${ret}<"
- echo "expected >${expected}<"
- [ "${ret}" = "${expected}" ]
+@test "hadoop_add_array_param (empty)" {
+ hadoop_add_array_param ARRAY value
+ [ "${ARRAY[0]}" = value ]
+}
+
+@test "hadoop_add_array_param (exist)" {
+ ARRAY=("val2")
+ hadoop_add_array_param ARRAY val1
+ [ "${ARRAY[0]}" = val2 ]
+ [ "${ARRAY[1]}" = val1 ]
+}
+
+@test "hadoop_add_array_param (double exist)" {
+ ARRAY=("val2" "val1")
+ hadoop_add_array_param ARRAY val3
+ [ "${ARRAY[0]}" = val2 ]
+ [ "${ARRAY[1]}" = val1 ]
+ [ "${ARRAY[2]}" = val3 ]
}
-@test "hadoop_escape_xml (positive 1)" {
- ret="$(hadoop_xml_escape "\pass&&word\0#\$asdf/g ><'\"~\`!@#$%^&*()_+-=")"
- expected="\\pass&&word\0#\$asdf/g \>\<\'\"~\`!@#\$%^&*()_+-="
- echo "actual >${ret}<"
- echo "expected >${expected}<"
- [ "${ret}" = "${expected}" ]
-}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_array_contains.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_array_contains.bats
new file mode 100644
index 00000000000..01cb4e3bc48
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_array_contains.bats
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_array_contains (empty)" {
+ run hadoop_array_contains value "${ARRAY[@]}"
+ [ "${status}" = 1 ]
+}
+
+@test "hadoop_array_contains (exist)" {
+ ARRAY=("value")
+ run hadoop_array_contains value "${ARRAY[@]}"
+ [ "${status}" = 0 ]
+}
+
+@test "hadoop_array_contains (notexist)" {
+ ARRAY=("different")
+ run hadoop_array_contains value "${ARRAY[@]}"
+ [ "${status}" = 1 ]
+}
+
+@test "hadoop_array_contains (exist, multi)" {
+ ARRAY=("val1" "val2" "val3")
+ for j in val1 val2 val3; do
+ run hadoop_array_contains "${j}" "${ARRAY[@]}"
+ [ "${status}" = 0 ]
+ done
+}
+
+@test "hadoop_array_contains (multi, not exist)" {
+ ARRAY=("val1" "val2" "val3")
+ run hadoop_array_contains value "${ARRAY[@]}"
+ [ "${status}" = 1 ]
+}
diff --git a/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_sort_array.bats
similarity index 62%
rename from hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
rename to hadoop-common-project/hadoop-common/src/test/scripts/hadoop_sort_array.bats
index 9f4922bb7fe..7a18b5d0cba 100644
--- a/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_sort_array.bats
@@ -13,5 +13,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-org.apache.hadoop.fs.azure.NativeAzureFileSystem
-org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure
\ No newline at end of file
+load hadoop-functions_test_helper
+
+@test "hadoop_sort_array (empty)" {
+ hadoop_sort_array ARRAY
+}
+
+@test "hadoop_sort_array (single value)" {
+ ARRAY=("value")
+ hadoop_sort_array ARRAY
+}
+
+@test "hadoop_sort_array (multiple value)" {
+ ARRAY=("b" "c" "a")
+ preifsod=$(echo "${IFS}" | od -c)
+ hadoop_sort_array ARRAY
+ postifsod=$(echo "${IFS}" | od -c)
+
+ [ "${ARRAY[0]}" = "a" ]
+ [ "${ARRAY[1]}" = "b" ]
+ [ "${ARRAY[2]}" = "c" ]
+ [ "${preifsod}" = "${postifsod}" ]
+}
diff --git a/hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh b/hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh
index c5307163468..0d084bb36e6 100755
--- a/hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/libexec/shellprofile.d/hadoop-kms.sh
@@ -16,7 +16,7 @@
# limitations under the License.
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
- hadoop_add_subcommand "kms" "run KMS, the Key Management Server"
+ hadoop_add_subcommand "kms" daemon "run KMS, the Key Management Server"
fi
## @description Command handler for kms subcommand
@@ -54,4 +54,4 @@ function hadoop_subcommand_kms
[[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then
hadoop_mkdir "${KMS_TEMP:-${HADOOP_HOME}/temp}"
fi
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 1f6022ca356..47c14e23805 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2764,7 +2764,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
}
- public HashMap getErasureCodingCodecs() throws IOException {
+ public Map getErasureCodingCodecs() throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("getErasureCodingCodecs")) {
return namenode.getErasureCodingCodecs();
@@ -2774,25 +2774,43 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public AddECPolicyResponse[] addErasureCodingPolicies(
ErasureCodingPolicy[] policies) throws IOException {
checkOpen();
- return namenode.addErasureCodingPolicies(policies);
+ try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
+ return namenode.addErasureCodingPolicies(policies);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class);
+ }
}
public void removeErasureCodingPolicy(String ecPolicyName)
throws IOException {
checkOpen();
- namenode.removeErasureCodingPolicy(ecPolicyName);
+ try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
+ namenode.removeErasureCodingPolicy(ecPolicyName);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class);
+ }
}
public void enableErasureCodingPolicy(String ecPolicyName)
throws IOException {
checkOpen();
- namenode.enableErasureCodingPolicy(ecPolicyName);
+ try (TraceScope ignored = tracer.newScope("enableErasureCodingPolicy")) {
+ namenode.enableErasureCodingPolicy(ecPolicyName);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ SafeModeException.class);
+ }
}
public void disableErasureCodingPolicy(String ecPolicyName)
throws IOException {
checkOpen();
- namenode.disableErasureCodingPolicy(ecPolicyName);
+ try (TraceScope ignored = tracer.newScope("disableErasureCodingPolicy")) {
+ namenode.disableErasureCodingPolicy(ecPolicyName);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ SafeModeException.class);
+ }
}
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
@@ -3026,7 +3044,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*
* @param src path to get the information for
* @return Returns the policy information if file or directory on the path is
- * erasure coded, null otherwise
+ * erasure coded, null otherwise. Null will be returned if directory or file
+ * has REPLICATION policy.
* @throws IOException
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
index 748edcdb275..b58cf16a323 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java
@@ -61,4 +61,6 @@ public class DFSClientFaultInjector {
public boolean skipRollingRestartWait() {
return false;
}
+
+ public void sleepBeforeHedgedGet() {}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index dcc997c173a..97d3de4a96e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -830,60 +830,85 @@ public class DFSInputStream extends FSInputStream
private DNAddrPair chooseDataNode(LocatedBlock block,
Collection ignoredNodes) throws IOException {
+ return chooseDataNode(block, ignoredNodes, true);
+ }
+
+ /**
+ * Choose datanode to read from.
+ *
+ * @param block Block to choose datanode addr from
+ * @param ignoredNodes Ignored nodes inside.
+ * @param refetchIfRequired Whether to refetch if no nodes to chose
+ * from.
+ * @return Returns chosen DNAddrPair; Can be null if refetchIfRequired is
+ * false.
+ */
+ private DNAddrPair chooseDataNode(LocatedBlock block,
+ Collection ignoredNodes, boolean refetchIfRequired)
+ throws IOException {
while (true) {
DNAddrPair result = getBestNodeDNAddrPair(block, ignoredNodes);
if (result != null) {
return result;
+ } else if (refetchIfRequired) {
+ block = refetchLocations(block, ignoredNodes);
} else {
- String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
- deadNodes, ignoredNodes);
- String blockInfo = block.getBlock() + " file=" + src;
- if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
- String description = "Could not obtain block: " + blockInfo;
- DFSClient.LOG.warn(description + errMsg
- + ". Throwing a BlockMissingException");
- throw new BlockMissingException(src, description,
- block.getStartOffset());
- }
-
- DatanodeInfo[] nodes = block.getLocations();
- if (nodes == null || nodes.length == 0) {
- DFSClient.LOG.info("No node available for " + blockInfo);
- }
- DFSClient.LOG.info("Could not obtain " + block.getBlock()
- + " from any node: " + errMsg
- + ". Will get new block locations from namenode and retry...");
- try {
- // Introducing a random factor to the wait time before another retry.
- // The wait time is dependent on # of failures and a random factor.
- // At the first time of getting a BlockMissingException, the wait time
- // is a random number between 0..3000 ms. If the first retry
- // still fails, we will wait 3000 ms grace period before the 2nd retry.
- // Also at the second retry, the waiting window is expanded to 6000 ms
- // alleviating the request rate from the server. Similarly the 3rd retry
- // will wait 6000ms grace period before retry and the waiting window is
- // expanded to 9000ms.
- final int timeWindow = dfsClient.getConf().getTimeWindow();
- double waitTime = timeWindow * failures + // grace period for the last round of attempt
- // expanding time window for each failure
- timeWindow * (failures + 1) *
- ThreadLocalRandom.current().nextDouble();
- DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
- " IOException, will wait for " + waitTime + " msec.");
- Thread.sleep((long)waitTime);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- throw new InterruptedIOException(
- "Interrupted while choosing DataNode for read.");
- }
- deadNodes.clear(); //2nd option is to remove only nodes[blockId]
- openInfo(true);
- block = refreshLocatedBlock(block);
- failures++;
+ return null;
}
}
}
+ private LocatedBlock refetchLocations(LocatedBlock block,
+ Collection ignoredNodes) throws IOException {
+ String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(),
+ deadNodes, ignoredNodes);
+ String blockInfo = block.getBlock() + " file=" + src;
+ if (failures >= dfsClient.getConf().getMaxBlockAcquireFailures()) {
+ String description = "Could not obtain block: " + blockInfo;
+ DFSClient.LOG.warn(description + errMsg
+ + ". Throwing a BlockMissingException");
+ throw new BlockMissingException(src, description,
+ block.getStartOffset());
+ }
+
+ DatanodeInfo[] nodes = block.getLocations();
+ if (nodes == null || nodes.length == 0) {
+ DFSClient.LOG.info("No node available for " + blockInfo);
+ }
+ DFSClient.LOG.info("Could not obtain " + block.getBlock()
+ + " from any node: " + errMsg
+ + ". Will get new block locations from namenode and retry...");
+ try {
+ // Introducing a random factor to the wait time before another retry.
+ // The wait time is dependent on # of failures and a random factor.
+ // At the first time of getting a BlockMissingException, the wait time
+ // is a random number between 0..3000 ms. If the first retry
+ // still fails, we will wait 3000 ms grace period before the 2nd retry.
+ // Also at the second retry, the waiting window is expanded to 6000 ms
+ // alleviating the request rate from the server. Similarly the 3rd retry
+ // will wait 6000ms grace period before retry and the waiting window is
+ // expanded to 9000ms.
+ final int timeWindow = dfsClient.getConf().getTimeWindow();
+ // grace period for the last round of attempt
+ double waitTime = timeWindow * failures +
+ // expanding time window for each failure
+ timeWindow * (failures + 1) *
+ ThreadLocalRandom.current().nextDouble();
+ DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) +
+ " IOException, will wait for " + waitTime + " msec.");
+ Thread.sleep((long)waitTime);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new InterruptedIOException(
+ "Interrupted while choosing DataNode for read.");
+ }
+ deadNodes.clear(); //2nd option is to remove only nodes[blockId]
+ openInfo(true);
+ block = refreshLocatedBlock(block);
+ failures++;
+ return block;
+ }
+
/**
* Get the best node from which to stream the data.
* @param block LocatedBlock, containing nodes in priority order.
@@ -985,6 +1010,7 @@ public class DFSInputStream extends FSInputStream
return new Callable() {
@Override
public ByteBuffer call() throws Exception {
+ DFSClientFaultInjector.get().sleepBeforeHedgedGet();
try (TraceScope ignored = dfsClient.getTracer().
newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks);
@@ -1131,8 +1157,9 @@ public class DFSInputStream extends FSInputStream
Future firstRequest = hedgedService
.submit(getFromDataNodeCallable);
futures.add(firstRequest);
+ Future future = null;
try {
- Future future = hedgedService.poll(
+ future = hedgedService.poll(
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
if (future != null) {
ByteBuffer result = future.get();
@@ -1142,34 +1169,38 @@ public class DFSInputStream extends FSInputStream
}
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
+ "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
- // Ignore this node on next go around.
- ignored.add(chosenNode.info);
dfsClient.getHedgedReadMetrics().incHedgedReadOps();
// continue; no need to refresh block locations
} catch (ExecutionException e) {
- // Ignore
+ futures.remove(future);
} catch (InterruptedException e) {
throw new InterruptedIOException(
"Interrupted while waiting for reading task");
}
+ // Ignore this node on next go around.
+ // If poll timeout and the request still ongoing, don't consider it
+ // again. If read data failed, don't consider it either.
+ ignored.add(chosenNode.info);
} else {
// We are starting up a 'hedged' read. We have a read already
// ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
// If no nodes to do hedged reads against, pass.
+ boolean refetch = false;
try {
- chosenNode = getBestNodeDNAddrPair(block, ignored);
- if (chosenNode == null) {
- chosenNode = chooseDataNode(block, ignored);
+ chosenNode = chooseDataNode(block, ignored, false);
+ if (chosenNode != null) {
+ // Latest block, if refreshed internally
+ block = chosenNode.block;
+ bb = ByteBuffer.allocate(len);
+ Callable getFromDataNodeCallable =
+ getFromOneDataNode(chosenNode, block, start, end, bb,
+ corruptedBlocks, hedgedReadId++);
+ Future oneMoreRequest =
+ hedgedService.submit(getFromDataNodeCallable);
+ futures.add(oneMoreRequest);
+ } else {
+ refetch = true;
}
- // Latest block, if refreshed internally
- block = chosenNode.block;
- bb = ByteBuffer.allocate(len);
- Callable getFromDataNodeCallable = getFromOneDataNode(
- chosenNode, block, start, end, bb,
- corruptedBlocks, hedgedReadId++);
- Future oneMoreRequest = hedgedService
- .submit(getFromDataNodeCallable);
- futures.add(oneMoreRequest);
} catch (IOException ioe) {
DFSClient.LOG.debug("Failed getting node for hedged read: {}",
ioe.getMessage());
@@ -1187,6 +1218,9 @@ public class DFSInputStream extends FSInputStream
} catch (InterruptedException ie) {
// Ignore and retry
}
+ if (refetch) {
+ refetchLocations(block, ignored);
+ }
// We got here if exception. Ignore this node on next go around IFF
// we found a chosenNode to hedge read against.
if (chosenNode != null && chosenNode.info != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 2e770cc1c73..e7cd0d827ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -83,6 +83,7 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.BlockingQueue;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -811,10 +812,30 @@ public class DFSUtilClient {
public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize,
int maxPoolSize, long keepAliveTimeSecs, String threadNamePrefix,
boolean runRejectedExec) {
+ return getThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTimeSecs,
+ new SynchronousQueue<>(), threadNamePrefix, runRejectedExec);
+}
+
+ /**
+ * Utility to create a {@link ThreadPoolExecutor}.
+ *
+ * @param corePoolSize - min threads in the pool, even if idle
+ * @param maxPoolSize - max threads in the pool
+ * @param keepAliveTimeSecs - max seconds beyond which excess idle threads
+ * will be terminated
+ * @param queue - the queue to use for holding tasks before they are executed.
+ * @param threadNamePrefix - name prefix for the pool threads
+ * @param runRejectedExec - when true, rejected tasks from
+ * ThreadPoolExecutor are run in the context of calling thread
+ * @return ThreadPoolExecutor
+ */
+ public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize,
+ int maxPoolSize, long keepAliveTimeSecs, BlockingQueue queue,
+ String threadNamePrefix, boolean runRejectedExec) {
Preconditions.checkArgument(corePoolSize > 0);
ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(corePoolSize,
maxPoolSize, keepAliveTimeSecs, TimeUnit.SECONDS,
- new SynchronousQueue(), new Daemon.DaemonFactory() {
+ queue, new Daemon.DaemonFactory() {
private final AtomicInteger threadIndex = new AtomicInteger(0);
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 34c631a66ba..ceec2b346b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -26,7 +26,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -2515,8 +2514,6 @@ public class DistributedFileSystem extends FileSystem {
public void setErasureCodingPolicy(final Path path,
final String ecPolicyName) throws IOException {
Path absF = fixRelativePart(path);
- Preconditions.checkNotNull(ecPolicyName, "Erasure coding policy cannot be" +
- " null.");
new FileSystemLinkResolver() {
@Override
public Void doCall(final Path p) throws IOException {
@@ -2543,7 +2540,8 @@ public class DistributedFileSystem extends FileSystem {
*
* @param path The path of the file or directory
* @return Returns the policy information if file or directory on the path
- * is erasure coded, null otherwise
+ * is erasure coded, null otherwise. Null will be returned if directory or
+ * file has REPLICATION policy.
* @throws IOException
*/
public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
@@ -2570,7 +2568,8 @@ public class DistributedFileSystem extends FileSystem {
}
/**
- * Retrieve all the erasure coding policies supported by this file system.
+ * Retrieve all the erasure coding policies supported by this file system,
+ * excluding REPLICATION policy.
*
* @return all erasure coding policies supported by this file system.
* @throws IOException
@@ -2587,7 +2586,7 @@ public class DistributedFileSystem extends FileSystem {
* @return all erasure coding codecs and coders supported by this file system.
* @throws IOException
*/
- public HashMap getAllErasureCodingCodecs()
+ public Map getAllErasureCodingCodecs()
throws IOException {
return dfs.getErasureCodingCodecs();
}
@@ -2892,7 +2891,8 @@ public class DistributedFileSystem extends FileSystem {
*/
@Override
public FSDataOutputStream build() throws IOException {
- if (getFlags().contains(CreateFlag.CREATE)) {
+ if (getFlags().contains(CreateFlag.CREATE) ||
+ getFlags().contains(CreateFlag.OVERWRITE)) {
if (isRecursive()) {
return dfs.create(getPath(), getPermission(), getFlags(),
getBufferSize(), getReplication(), getBlockSize(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 45c6b3269b6..b0e85e55ed4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -1588,7 +1588,8 @@ public interface ClientProtocol {
/**
- * Get the erasure coding policies loaded in Namenode.
+ * Get the erasure coding policies loaded in Namenode, excluding REPLICATION
+ * policy.
*
* @throws IOException
*/
@@ -1601,10 +1602,11 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
- HashMap getErasureCodingCodecs() throws IOException;
+ Map getErasureCodingCodecs() throws IOException;
/**
- * Get the information about the EC policy for the path.
+ * Get the information about the EC policy for the path. Null will be returned
+ * if directory or file has REPLICATION policy.
*
* @param src path to get the info for
* @throws IOException
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
index 368a2f265e6..501b67c15bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -17,23 +17,28 @@
*/
package org.apache.hadoop.hdfs.protocol;
+import java.io.Serializable;
+
import com.google.common.base.Preconditions;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
/**
* A policy about how to write/read/code an erasure coding file.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
-public final class ErasureCodingPolicy {
+public final class ErasureCodingPolicy implements Serializable {
+ private static final long serialVersionUID = 0x0079fe4e;
+
+ private String name;
private final ECSchema schema;
private final int cellSize;
- private String name;
private byte id;
public ErasureCodingPolicy(String name, ECSchema schema,
@@ -103,6 +108,10 @@ public final class ErasureCodingPolicy {
this.id = id;
}
+ public boolean isReplicationPolicy() {
+ return (id == ErasureCodeConstants.REPLICATION_POLICY_ID);
+ }
+
@Override
public boolean equals(Object o) {
if (o == null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
index e0dd0d70467..37d04e31cca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/FsPermissionExtension.java
@@ -27,6 +27,11 @@ import org.apache.hadoop.fs.permission.FsPermission;
* done for backwards compatibility in case any existing clients assume the
* value of FsPermission is in a particular range.
*/
+
+/**
+ * @deprecated ACLs, encryption, and erasure coding are managed on FileStatus.
+ */
+@Deprecated
@InterfaceAudience.Private
public class FsPermissionExtension extends FsPermission {
private static final long serialVersionUID = 0x13c298a4;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index b636121ac87..2681f129d7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -48,8 +48,8 @@ public final class HdfsConstants {
public static final byte COLD_STORAGE_POLICY_ID = 2;
public static final String COLD_STORAGE_POLICY_NAME = "COLD";
- // TODO should be conf injected?
- public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
+ public static final int DEFAULT_DATA_SOCKET_SIZE = 0;
+
/**
* A special path component contained in the path for a snapshot file/dir
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index c3866022d5d..8438b01b8de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -17,7 +17,9 @@
*/
package org.apache.hadoop.hdfs.protocol;
+import java.io.IOException;
import java.net.URI;
+import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -31,24 +33,15 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
-public class HdfsFileStatus {
+public class HdfsFileStatus extends FileStatus {
+
+ private static final long serialVersionUID = 0x126eb82a;
// local name of the inode that's encoded in java UTF8
- private final byte[] path;
- private final byte[] symlink; // symlink target encoded in java UTF8 or null
- private final long length;
- private final boolean isdir;
- private final short block_replication;
- private final long blocksize;
- private final long modification_time;
- private final long access_time;
- private final FsPermission permission;
- private final String owner;
- private final String group;
+ private byte[] uPath;
+ private byte[] uSymlink; // symlink target encoded in java UTF8/null
private final long fileId;
-
private final FileEncryptionInfo feInfo;
-
private final ErasureCodingPolicy ecPolicy;
// Used by dir, not including dot and dotdot. Always zero for a regular file.
@@ -57,12 +50,22 @@ public class HdfsFileStatus {
public static final byte[] EMPTY_NAME = new byte[0];
+ /**
+ * Set of features potentially active on an instance.
+ */
+ public enum Flags {
+ HAS_ACL,
+ HAS_CRYPT,
+ HAS_EC;
+ }
+ private final EnumSet flags;
+
/**
* Constructor.
- * @param length the number of bytes the file has
- * @param isdir if the path is a directory
+ * @param length the number of bytes the file has
+ * @param isdir if the path is a directory
* @param block_replication the replication factor
- * @param blocksize the block size
+ * @param blocksize the block size
* @param modification_time modification time
* @param access_time access time
* @param permission permission
@@ -77,25 +80,18 @@ public class HdfsFileStatus {
* @param ecPolicy the erasure coding policy
*/
public HdfsFileStatus(long length, boolean isdir, int block_replication,
- long blocksize, long modification_time, long access_time,
- FsPermission permission, String owner, String group, byte[] symlink,
- byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo,
- byte storagePolicy, ErasureCodingPolicy ecPolicy) {
- this.length = length;
- this.isdir = isdir;
- this.block_replication = (short) block_replication;
- this.blocksize = blocksize;
- this.modification_time = modification_time;
- this.access_time = access_time;
- this.permission = (permission == null) ?
- ((isdir || symlink!=null) ?
- FsPermission.getDefault() :
- FsPermission.getFileDefault()) :
- permission;
- this.owner = (owner == null) ? "" : owner;
- this.group = (group == null) ? "" : group;
- this.symlink = symlink;
- this.path = path;
+ long blocksize, long modification_time,
+ long access_time, FsPermission permission,
+ EnumSet flags, String owner, String group,
+ byte[] symlink, byte[] path, long fileId,
+ int childrenNum, FileEncryptionInfo feInfo,
+ byte storagePolicy, ErasureCodingPolicy ecPolicy) {
+ super(length, isdir, block_replication, blocksize, modification_time,
+ access_time, convert(isdir, symlink != null, permission, flags),
+ owner, group, null, null);
+ this.flags = flags;
+ this.uSymlink = symlink;
+ this.uPath = path;
this.fileId = fileId;
this.childrenNum = childrenNum;
this.feInfo = feInfo;
@@ -104,83 +100,48 @@ public class HdfsFileStatus {
}
/**
- * Get the length of this file, in bytes.
- * @return the length of this file, in bytes.
+ * Set redundant flags for compatibility with existing applications.
*/
- public final long getLen() {
- return length;
+ protected static FsPermission convert(boolean isdir, boolean symlink,
+ FsPermission p, EnumSet f) {
+ if (p instanceof FsPermissionExtension) {
+ // verify flags are set consistently
+ assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL);
+ assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT);
+ assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC);
+ return p;
+ }
+ if (null == p) {
+ if (isdir) {
+ p = FsPermission.getDirDefault();
+ } else if (symlink) {
+ p = FsPermission.getDefault();
+ } else {
+ p = FsPermission.getFileDefault();
+ }
+ }
+ return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL),
+ f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC));
}
- /**
- * Is this a directory?
- * @return true if this is a directory
- */
- public final boolean isDir() {
- return isdir;
- }
-
- /**
- * Is this a symbolic link?
- * @return true if this is a symbolic link
- */
+ @Override
public boolean isSymlink() {
- return symlink != null;
+ return uSymlink != null;
}
- /**
- * Get the block size of the file.
- * @return the number of bytes
- */
- public final long getBlockSize() {
- return blocksize;
+ @Override
+ public boolean hasAcl() {
+ return flags.contains(Flags.HAS_ACL);
}
- /**
- * Get the replication factor of a file.
- * @return the replication factor of a file.
- */
- public final short getReplication() {
- return block_replication;
+ @Override
+ public boolean isEncrypted() {
+ return flags.contains(Flags.HAS_CRYPT);
}
- /**
- * Get the modification time of the file.
- * @return the modification time of file in milliseconds since January 1, 1970 UTC.
- */
- public final long getModificationTime() {
- return modification_time;
- }
-
- /**
- * Get the access time of the file.
- * @return the access time of file in milliseconds since January 1, 1970 UTC.
- */
- public final long getAccessTime() {
- return access_time;
- }
-
- /**
- * Get FsPermission associated with the file.
- * @return permission
- */
- public final FsPermission getPermission() {
- return permission;
- }
-
- /**
- * Get the owner of the file.
- * @return owner of the file
- */
- public final String getOwner() {
- return owner;
- }
-
- /**
- * Get the group associated with the file.
- * @return group for the file.
- */
- public final String getGroup() {
- return group;
+ @Override
+ public boolean isErasureCoded() {
+ return flags.contains(Flags.HAS_EC);
}
/**
@@ -188,7 +149,7 @@ public class HdfsFileStatus {
* @return true if the name is empty
*/
public final boolean isEmptyLocalName() {
- return path.length == 0;
+ return uPath.length == 0;
}
/**
@@ -196,7 +157,7 @@ public class HdfsFileStatus {
* @return the local name in string
*/
public final String getLocalName() {
- return DFSUtilClient.bytes2String(path);
+ return DFSUtilClient.bytes2String(uPath);
}
/**
@@ -204,7 +165,7 @@ public class HdfsFileStatus {
* @return the local name in java UTF8
*/
public final byte[] getLocalNameInBytes() {
- return path;
+ return uPath;
}
/**
@@ -238,16 +199,24 @@ public class HdfsFileStatus {
return new Path(parent, getLocalName());
}
- /**
- * Get the string representation of the symlink.
- * @return the symlink as a string.
- */
- public final String getSymlink() {
- return DFSUtilClient.bytes2String(symlink);
+ @Override
+ public Path getSymlink() throws IOException {
+ if (isSymlink()) {
+ return new Path(DFSUtilClient.bytes2String(uSymlink));
+ }
+ throw new IOException("Path " + getPath() + " is not a symbolic link");
}
+ @Override
+ public void setSymlink(Path sym) {
+ uSymlink = DFSUtilClient.string2Bytes(sym.toString());
+ }
+
+ /**
+ * Opaque referant for the symlink, to be resolved at the client.
+ */
public final byte[] getSymlinkInBytes() {
- return symlink;
+ return uSymlink;
}
public final long getFileId() {
@@ -275,13 +244,30 @@ public class HdfsFileStatus {
return storagePolicy;
}
- public final FileStatus makeQualified(URI defaultUri, Path path) {
- return new FileStatus(getLen(), isDir(), getReplication(),
- getBlockSize(), getModificationTime(),
- getAccessTime(),
- getPermission(), getOwner(), getGroup(),
- isSymlink() ? new Path(getSymlink()) : null,
- (getFullPath(path)).makeQualified(
- defaultUri, null)); // fully-qualify path
+ @Override
+ public boolean equals(Object o) {
+ // satisfy findbugs
+ return super.equals(o);
}
+
+ @Override
+ public int hashCode() {
+ // satisfy findbugs
+ return super.hashCode();
+ }
+
+ /**
+ * Resolve the short name of the Path given the URI, parent provided. This
+ * FileStatus reference will not contain a valid Path until it is resolved
+ * by this method.
+ * @param defaultUri FileSystem to fully qualify HDFS path.
+ * @param parent Parent path of this element.
+ * @return Reference to this instance.
+ */
+ public final FileStatus makeQualified(URI defaultUri, Path parent) {
+ // fully-qualify path
+ setPath(getFullPath(parent).makeQualified(defaultUri, null));
+ return this; // API compatibility
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index 0fd203936b6..b82a860cf4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.protocol;
import java.net.URI;
+import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -34,7 +35,14 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HdfsLocatedFileStatus extends HdfsFileStatus {
- private final LocatedBlocks locations;
+
+ private static final long serialVersionUID = 0x23c73328;
+
+ /**
+ * Left transient, because {@link #makeQualifiedLocated(URI,Path)}
+ * is the user-facing type.
+ */
+ private transient LocatedBlocks locations;
/**
* Constructor
@@ -56,12 +64,12 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
*/
public HdfsLocatedFileStatus(long length, boolean isdir,
int block_replication, long blocksize, long modification_time,
- long access_time, FsPermission permission, String owner, String group,
- byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
- int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
- ErasureCodingPolicy ecPolicy) {
+ long access_time, FsPermission permission, EnumSet flags,
+ String owner, String group, byte[] symlink, byte[] path, long fileId,
+ LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo,
+ byte storagePolicy, ErasureCodingPolicy ecPolicy) {
super(length, isdir, block_replication, blocksize, modification_time,
- access_time, permission, owner, group, symlink, path, fileId,
+ access_time, permission, flags, owner, group, symlink, path, fileId,
childrenNum, feInfo, storagePolicy, ecPolicy);
this.locations = locations;
}
@@ -72,13 +80,21 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
Path path) {
- return new LocatedFileStatus(getLen(), isDir(), getReplication(),
- getBlockSize(), getModificationTime(),
- getAccessTime(),
- getPermission(), getOwner(), getGroup(),
- isSymlink() ? new Path(getSymlink()) : null,
- (getFullPath(path)).makeQualified(
- defaultUri, null), // fully-qualify path
+ makeQualified(defaultUri, path);
+ return new LocatedFileStatus(this,
DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
}
+
+ @Override
+ public boolean equals(Object o) {
+ // satisfy findbugs
+ return super.equals(o);
+ }
+
+ @Override
+ public int hashCode() {
+ // satisfy findbugs
+ return super.hashCode();
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 583d02784ce..61e5316f80b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -21,6 +21,7 @@ import java.io.PrintStream;
import java.text.SimpleDateFormat;
import java.util.Comparator;
import java.util.Date;
+import java.util.EnumSet;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -57,11 +58,12 @@ public class SnapshottableDirectoryStatus {
private final byte[] parentFullPath;
public SnapshottableDirectoryStatus(long modification_time, long access_time,
- FsPermission permission, String owner, String group, byte[] localName,
- long inodeId, int childrenNum,
- int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
+ FsPermission permission, EnumSet flags,
+ String owner, String group, byte[] localName, long inodeId,
+ int childrenNum, int snapshotNumber, int snapshotQuota,
+ byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
- access_time, permission, owner, group, null, localName, inodeId,
+ access_time, permission, flags, owner, group, null, localName, inodeId,
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
null);
this.snapshotNumber = snapshotNumber;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
index 2cd838b8fa7..f0efe762d1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java
@@ -68,6 +68,13 @@ public final class SystemErasureCodingPolicies {
new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA,
DEFAULT_CELLSIZE, RS_10_4_POLICY_ID);
+ // REPLICATION policy is always enabled.
+ private static final ErasureCodingPolicy REPLICATION_POLICY =
+ new ErasureCodingPolicy(ErasureCodeConstants.REPLICATION_POLICY_NAME,
+ ErasureCodeConstants.REPLICATION_1_2_SCHEMA,
+ DEFAULT_CELLSIZE,
+ ErasureCodeConstants.REPLICATION_POLICY_ID);
+
private static final List SYS_POLICIES =
Collections.unmodifiableList(Arrays.asList(
SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, SYS_POLICY4,
@@ -118,4 +125,11 @@ public final class SystemErasureCodingPolicies {
public static ErasureCodingPolicy getByName(String name) {
return SYSTEM_POLICIES_BY_NAME.get(name);
}
+
+ /**
+ * Get the special REPLICATION policy.
+ */
+ public static ErasureCodingPolicy getReplicationPolicy() {
+ return REPLICATION_POLICY;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 388788c89b9..ac06c1ade8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -26,6 +26,7 @@ import java.util.List;
import com.google.common.collect.Lists;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@@ -1518,7 +1519,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
final SetErasureCodingPolicyRequestProto.Builder builder =
SetErasureCodingPolicyRequestProto.newBuilder();
builder.setSrc(src);
- builder.setEcPolicyName(ecPolicyName);
+ if (ecPolicyName != null) {
+ builder.setEcPolicyName(ecPolicyName);
+ }
SetErasureCodingPolicyRequestProto req = builder.build();
try {
rpcProxy.setErasureCodingPolicy(null, req);
@@ -1758,11 +1761,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
}
@Override
- public HashMap getErasureCodingCodecs() throws IOException {
+ public Map getErasureCodingCodecs() throws IOException {
try {
GetErasureCodingCodecsResponseProto response = rpcProxy
.getErasureCodingCodecs(null, VOID_GET_EC_CODEC_REQUEST);
- HashMap ecCodecs = new HashMap();
+ Map ecCodecs = new HashMap<>();
for (CodecProto codec : response.getCodecList()) {
ecCodecs.put(codec.getCodec(), codec.getCoders());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index a16c679825c..f5bad296dc4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -104,6 +104,7 @@ import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntrySco
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
@@ -149,7 +150,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Sto
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
@@ -1142,7 +1142,7 @@ public class PBHelperClient {
}
public static FsPermission convert(FsPermissionProto p) {
- return new FsPermissionExtension((short)p.getPerm());
+ return new FsPermission((short)p.getPerm());
}
private static Event.CreateEvent.INodeType createTypeConvert(
@@ -1501,10 +1501,14 @@ public class PBHelperClient {
return null;
}
final HdfsFileStatusProto status = sdirStatusProto.getDirStatus();
+ EnumSet flags = status.hasFlags()
+ ? convertFlags(status.getFlags())
+ : convertFlags(status.getPermission());
return new SnapshottableDirectoryStatus(
status.getModificationTime(),
status.getAccessTime(),
convert(status.getPermission()),
+ flags,
status.getOwner(),
status.getGroup(),
status.getPath().toByteArray(),
@@ -1546,17 +1550,23 @@ public class PBHelperClient {
}
public static FsPermissionProto convert(FsPermission p) {
- return FsPermissionProto.newBuilder().setPerm(p.toExtendedShort()).build();
+ return FsPermissionProto.newBuilder().setPerm(p.toShort()).build();
}
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
- if (fs == null)
+ if (fs == null) {
return null;
+ }
+ EnumSet flags = fs.hasFlags()
+ ? convertFlags(fs.getFlags())
+ : convertFlags(fs.getPermission());
return new HdfsLocatedFileStatus(
fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
fs.getBlockReplication(), fs.getBlocksize(),
fs.getModificationTime(), fs.getAccessTime(),
- convert(fs.getPermission()), fs.getOwner(), fs.getGroup(),
+ convert(fs.getPermission()),
+ flags,
+ fs.getOwner(), fs.getGroup(),
fs.getFileType().equals(FileType.IS_SYMLINK) ?
fs.getSymlink().toByteArray() : null,
fs.getPath().toByteArray(),
@@ -1569,6 +1579,47 @@ public class PBHelperClient {
fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null);
}
+ private static EnumSet convertFlags(int flags) {
+ EnumSet f =
+ EnumSet.noneOf(HdfsFileStatus.Flags.class);
+ for (HdfsFileStatusProto.Flags pbf : HdfsFileStatusProto.Flags.values()) {
+ if ((pbf.getNumber() & flags) != 0) {
+ switch (pbf) {
+ case HAS_ACL:
+ f.add(HdfsFileStatus.Flags.HAS_ACL);
+ break;
+ case HAS_CRYPT:
+ f.add(HdfsFileStatus.Flags.HAS_CRYPT);
+ break;
+ case HAS_EC:
+ f.add(HdfsFileStatus.Flags.HAS_EC);
+ break;
+ default:
+ // ignore unknown
+ break;
+ }
+ }
+ }
+ return f;
+ }
+
+ private static EnumSet convertFlags(
+ FsPermissionProto pbp) {
+ EnumSet f =
+ EnumSet.noneOf(HdfsFileStatus.Flags.class);
+ FsPermission p = new FsPermissionExtension((short)pbp.getPerm());
+ if (p.getAclBit()) {
+ f.add(HdfsFileStatus.Flags.HAS_ACL);
+ }
+ if (p.getEncryptedBit()) {
+ f.add(HdfsFileStatus.Flags.HAS_CRYPT);
+ }
+ if (p.getErasureCodedBit()) {
+ f.add(HdfsFileStatus.Flags.HAS_EC);
+ }
+ return f;
+ }
+
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
if (c == null)
return null;
@@ -2044,7 +2095,7 @@ public class PBHelperClient {
if (fs == null)
return null;
FileType fType = FileType.IS_FILE;
- if (fs.isDir()) {
+ if (fs.isDirectory()) {
fType = FileType.IS_DIR;
} else if (fs.isSymlink()) {
fType = FileType.IS_SYMLINK;
@@ -2082,6 +2133,10 @@ public class PBHelperClient {
builder.setEcPolicy(convertErasureCodingPolicy(
fs.getErasureCodingPolicy()));
}
+ int flags = fs.hasAcl() ? HdfsFileStatusProto.Flags.HAS_ACL_VALUE : 0;
+ flags |= fs.isEncrypted() ? HdfsFileStatusProto.Flags.HAS_CRYPT_VALUE : 0;
+ flags |= fs.isErasureCoded() ? HdfsFileStatusProto.Flags.HAS_EC_VALUE : 0;
+ builder.setFlags(flags);
return builder.build();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 5e9396e368d..7ec5fe5c15f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -61,6 +60,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.EnumSet;
import java.util.List;
import java.util.Map;
@@ -97,17 +97,8 @@ class JsonUtilClient {
}
/** Convert a string to a FsPermission object. */
- static FsPermission toFsPermission(
- final String s, Boolean aclBit, Boolean encBit, Boolean erasureBit) {
- FsPermission perm = new FsPermission(Short.parseShort(s, 8));
- final boolean aBit = (aclBit != null) ? aclBit : false;
- final boolean eBit = (encBit != null) ? encBit : false;
- final boolean ecBit = (erasureBit != null) ? erasureBit : false;
- if (aBit || eBit || ecBit) {
- return new FsPermissionExtension(perm, aBit, eBit, ecBit);
- } else {
- return perm;
- }
+ static FsPermission toFsPermission(final String s) {
+ return null == s ? null : new FsPermission(Short.parseShort(s, 8));
}
/** Convert a Json map to a HdfsFileStatus object. */
@@ -128,10 +119,23 @@ class JsonUtilClient {
final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner");
final String group = (String) m.get("group");
- final FsPermission permission = toFsPermission((String) m.get("permission"),
- (Boolean) m.get("aclBit"),
- (Boolean) m.get("encBit"),
- (Boolean) m.get("ecBit"));
+ final FsPermission permission = toFsPermission((String)m.get("permission"));
+
+ Boolean aclBit = (Boolean) m.get("aclBit");
+ Boolean encBit = (Boolean) m.get("encBit");
+ Boolean erasureBit = (Boolean) m.get("ecBit");
+ EnumSet f =
+ EnumSet.noneOf(HdfsFileStatus.Flags.class);
+ if (aclBit != null && aclBit) {
+ f.add(HdfsFileStatus.Flags.HAS_ACL);
+ }
+ if (encBit != null && encBit) {
+ f.add(HdfsFileStatus.Flags.HAS_CRYPT);
+ }
+ if (erasureBit != null && erasureBit) {
+ f.add(HdfsFileStatus.Flags.HAS_EC);
+ }
+
final long aTime = ((Number) m.get("accessTime")).longValue();
final long mTime = ((Number) m.get("modificationTime")).longValue();
final long blockSize = ((Number) m.get("blockSize")).longValue();
@@ -143,11 +147,11 @@ class JsonUtilClient {
final byte storagePolicy = m.containsKey("storagePolicy") ?
(byte) ((Number) m.get("storagePolicy")).longValue() :
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
- return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY,
- replication, blockSize, mTime, aTime, permission, owner, group,
- symlink, DFSUtilClient.string2Bytes(localName),
- fileId, childrenNum, null,
- storagePolicy, null);
+ return new HdfsFileStatus(len,
+ type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize,
+ mTime, aTime, permission, f, owner, group, symlink,
+ DFSUtilClient.string2Bytes(localName), fileId, childrenNum,
+ null, storagePolicy, null);
}
static HdfsFileStatus[] toHdfsFileStatusArray(final Map, ?> json) {
@@ -465,9 +469,7 @@ class JsonUtilClient {
aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit"));
String permString = (String) m.get("permission");
if (permString != null) {
- final FsPermission permission = toFsPermission(permString,
- (Boolean) m.get("aclBit"), (Boolean) m.get("encBit"),
- (Boolean) m.get("ecBit"));
+ final FsPermission permission = toFsPermission(permString);
aclStatusBuilder.setPermission(permission);
}
final List> entries = (List>) m.get("entries");
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
index 50da8998264..f690dd0039d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
@@ -32,7 +32,13 @@ public class WebHdfsConstants {
FILE, DIRECTORY, SYMLINK;
static PathType valueOf(HdfsFileStatus status) {
- return status.isDir()? DIRECTORY: status.isSymlink()? SYMLINK: FILE;
+ if (status.isDirectory()) {
+ return DIRECTORY;
+ }
+ if (status.isSymlink()) {
+ return SYMLINK;
+ }
+ return FILE;
}
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 3861cbaf015..1159e50de7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -1016,15 +1016,7 @@ public class WebHdfsFileSystem extends FileSystem
public FileStatus getFileStatus(Path f) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_STATUS);
- return makeQualified(getHdfsFileStatus(f), f);
- }
-
- private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
- return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
- f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
- f.getPermission(), f.getOwner(), f.getGroup(),
- f.isSymlink() ? new Path(f.getSymlink()) : null,
- f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
+ return getHdfsFileStatus(f).makeQualified(getUri(), f);
}
@Override
@@ -1507,6 +1499,7 @@ public class WebHdfsFileSystem extends FileSystem
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.LIST_STATUS);
+ final URI fsUri = getUri();
final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
return new FsPathResponseRunner(op, f) {
@Override
@@ -1515,7 +1508,7 @@ public class WebHdfsFileSystem extends FileSystem
JsonUtilClient.toHdfsFileStatusArray(json);
final FileStatus[] statuses = new FileStatus[hdfsStatuses.length];
for (int i = 0; i < hdfsStatuses.length; i++) {
- statuses[i] = makeQualified(hdfsStatuses[i], f);
+ statuses[i] = hdfsStatuses[i].makeQualified(fsUri, f);
}
return statuses;
@@ -1541,10 +1534,11 @@ public class WebHdfsFileSystem extends FileSystem
}
}.run();
// Qualify the returned FileStatus array
+ final URI fsUri = getUri();
final HdfsFileStatus[] statuses = listing.getPartialListing();
FileStatus[] qualified = new FileStatus[statuses.length];
for (int i = 0; i < statuses.length; i++) {
- qualified[i] = makeQualified(statuses[i], f);
+ qualified[i] = statuses[i].makeQualified(fsUri, f);
}
return new DirectoryEntries(qualified, listing.getLastName(),
listing.hasMore());
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
index bb7fdb0168f..c2529c90c32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/acl.proto
@@ -21,7 +21,12 @@ option java_outer_classname = "AclProtos";
option java_generate_equals_and_hash = true;
package hadoop.hdfs;
-import "hdfs.proto";
+/**
+ * File or Directory permision - same spec as posix
+ */
+message FsPermissionProto {
+ required uint32 perm = 1; // Actually a short - only 16bits used
+}
message AclEntryProto {
enum AclEntryScopeProto {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index 65baab65a47..9f803503c39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@ -25,7 +25,7 @@ import "hdfs.proto";
message SetErasureCodingPolicyRequestProto {
required string src = 1;
- required string ecPolicyName = 2;
+ optional string ecPolicyName = 2;
}
message SetErasureCodingPolicyResponseProto {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 497d734445d..465da854e4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true;
package hadoop.hdfs;
import "Security.proto";
+import "acl.proto";
/**
* Extended block idenfies a block
@@ -198,13 +199,6 @@ message CorruptFileBlocksProto {
required string cookie = 2;
}
-/**
- * File or Directory permision - same spec as posix
- */
-message FsPermissionProto {
- required uint32 perm = 1; // Actually a short - only 16bits used
-}
-
/**
* Types of recognized storage media.
*/
@@ -390,6 +384,11 @@ message HdfsFileStatusProto {
IS_FILE = 2;
IS_SYMLINK = 3;
}
+ enum Flags {
+ HAS_ACL = 0x01; // has ACLs
+ HAS_CRYPT = 0x02; // encrypted
+ HAS_EC = 0x04; // erasure coded
+ }
required FileType fileType = 1;
required bytes path = 2; // local name of inode encoded java UTF8
required uint64 length = 3;
@@ -417,6 +416,9 @@ message HdfsFileStatusProto {
// Optional field for erasure coding
optional ErasureCodingPolicyProto ecPolicy = 17;
+
+ // Set of flags
+ optional uint32 flags = 18 [default = 0];
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 1ab890f3f50..1059a02f127 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -124,6 +124,8 @@ public class HttpFSFileSystem extends FileSystem
public static final String POLICY_NAME_PARAM = "storagepolicy";
public static final String OFFSET_PARAM = "offset";
public static final String LENGTH_PARAM = "length";
+ public static final String SNAPSHOT_NAME_PARAM = "snapshotname";
+ public static final String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname";
public static final Short DEFAULT_PERMISSION = 0755;
public static final String ACLSPEC_DEFAULT = "";
@@ -144,6 +146,8 @@ public class HttpFSFileSystem extends FileSystem
public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
+ public static final String SNAPSHOT_JSON = "Path";
+
public enum FILE_TYPE {
FILE, DIRECTORY, SYMLINK;
@@ -229,7 +233,9 @@ public class HttpFSFileSystem extends FileSystem
DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET),
REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET), LISTSTATUS_BATCH(HTTP_GET),
GETALLSTORAGEPOLICY(HTTP_GET), GETSTORAGEPOLICY(HTTP_GET),
- SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST);
+ SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST),
+ CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE),
+ RENAMESNAPSHOT(HTTP_PUT);
private String httpMethod;
@@ -1047,18 +1053,7 @@ public class HttpFSFileSystem extends FileSystem
/** Convert a string to a FsPermission object. */
static FsPermission toFsPermission(JSONObject json) {
final String s = (String) json.get(PERMISSION_JSON);
- final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
- final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
- final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
- FsPermission perm = new FsPermission(Short.parseShort(s, 8));
- final boolean aBit = (aclBit != null) ? aclBit : false;
- final boolean eBit = (encBit != null) ? encBit : false;
- final boolean ecBit = (erasureBit != null) ? erasureBit : false;
- if (aBit || eBit || ecBit) {
- return new FsPermissionExtension(perm, aBit, eBit, ecBit);
- } else {
- return perm;
- }
+ return new FsPermission(Short.parseShort(s, 8));
}
private FileStatus createFileStatus(Path parent, JSONObject json) {
@@ -1073,23 +1068,23 @@ public class HttpFSFileSystem extends FileSystem
long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
- FileStatus fileStatus = null;
- switch (type) {
- case FILE:
- case DIRECTORY:
- fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY),
- replication, blockSize, mTime, aTime,
- permission, owner, group, path);
- break;
- case SYMLINK:
- Path symLink = null;
- fileStatus = new FileStatus(len, false,
- replication, blockSize, mTime, aTime,
- permission, owner, group, symLink,
- path);
+ final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
+ final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
+ final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON);
+ final boolean aBit = (aclBit != null) ? aclBit : false;
+ final boolean eBit = (encBit != null) ? encBit : false;
+ final boolean ecBit = (erasureBit != null) ? erasureBit : false;
+ if (aBit || eBit || ecBit) {
+ // include this for compatibility with 2.x
+ FsPermissionExtension deprecatedPerm =
+ new FsPermissionExtension(permission, aBit, eBit, ecBit);
+ return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
+ replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
+ null, path, aBit, eBit, ecBit);
}
- return fileStatus;
+ return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
+ replication, blockSize, mTime, aTime, permission, owner, group, path);
}
/**
@@ -1445,4 +1440,43 @@ public class HttpFSFileSystem extends FileSystem
Operation.UNSETSTORAGEPOLICY.getMethod(), params, src, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
}
+
+ @Override
+ public final Path createSnapshot(Path path, String snapshotName)
+ throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, Operation.CREATESNAPSHOT.toString());
+ if (snapshotName != null) {
+ params.put(SNAPSHOT_NAME_PARAM, snapshotName);
+ }
+ HttpURLConnection conn = getConnection(Operation.CREATESNAPSHOT.getMethod(),
+ params, path, true);
+ HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+ return new Path((String) json.get(SNAPSHOT_JSON));
+ }
+
+ @Override
+ public void renameSnapshot(Path path, String snapshotOldName,
+ String snapshotNewName) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, Operation.RENAMESNAPSHOT.toString());
+ params.put(SNAPSHOT_NAME_PARAM, snapshotNewName);
+ params.put(OLD_SNAPSHOT_NAME_PARAM, snapshotOldName);
+ HttpURLConnection conn = getConnection(Operation.RENAMESNAPSHOT.getMethod(),
+ params, path, true);
+ HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+ }
+
+ @Override
+ public void deleteSnapshot(Path path, String snapshotName)
+ throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, Operation.DELETESNAPSHOT.toString());
+ params.put(SNAPSHOT_NAME_PARAM, snapshotName);
+ HttpURLConnection conn = getConnection(Operation.DELETESNAPSHOT.getMethod(),
+ params, path, true);
+ HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index f1615c3e678..4b5918abf50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -669,7 +669,7 @@ public class FSOperations {
/**
* Creates a list-status executor.
*
- * @param path the directory to retrieve the status of its contents.
+ * @param path the directory/file to retrieve the status of its contents.
* @param filter glob filter to use.
*
* @throws IOException thrown if the filter expression is incorrect.
@@ -1492,4 +1492,109 @@ public class FSOperations {
return JsonUtil.toJsonMap(locations);
}
}
+
+ /**
+ * Executor that performs a createSnapshot FileSystemAccess operation.
+ */
+ @InterfaceAudience.Private
+ public static class FSCreateSnapshot implements
+ FileSystemAccess.FileSystemExecutor {
+
+ private Path path;
+ private String snapshotName;
+
+ /**
+ * Creates a createSnapshot executor.
+ * @param path directory path to be snapshotted.
+ * @param snapshotName the snapshot name.
+ */
+ public FSCreateSnapshot(String path, String snapshotName) {
+ this.path = new Path(path);
+ this.snapshotName = snapshotName;
+ }
+
+ /**
+ * Executes the filesystem operation.
+ * @param fs filesystem instance to use.
+ * @return Path the complete path for newly created snapshot
+ * @throws IOException thrown if an IO error occurred.
+ */
+ @Override
+ public String execute(FileSystem fs) throws IOException {
+ Path snapshotPath = fs.createSnapshot(path, snapshotName);
+ JSONObject json = toJSON(HttpFSFileSystem.HOME_DIR_JSON,
+ snapshotPath.toString());
+ return json.toJSONString().replaceAll("\\\\", "");
+ }
+ }
+
+ /**
+ * Executor that performs a deleteSnapshot FileSystemAccess operation.
+ */
+ @InterfaceAudience.Private
+ public static class FSDeleteSnapshot implements
+ FileSystemAccess.FileSystemExecutor {
+
+ private Path path;
+ private String snapshotName;
+
+ /**
+ * Creates a deleteSnapshot executor.
+ * @param path path for the snapshot to be deleted.
+ * @param snapshotName snapshot name.
+ */
+ public FSDeleteSnapshot(String path, String snapshotName) {
+ this.path = new Path(path);
+ this.snapshotName = snapshotName;
+ }
+
+ /**
+ * Executes the filesystem operation.
+ * @param fs filesystem instance to use.
+ * @return void
+ * @throws IOException thrown if an IO error occurred.
+ */
+ @Override
+ public Void execute(FileSystem fs) throws IOException {
+ fs.deleteSnapshot(path, snapshotName);
+ return null;
+ }
+ }
+
+ /**
+ * Executor that performs a renameSnapshot FileSystemAccess operation.
+ */
+ @InterfaceAudience.Private
+ public static class FSRenameSnapshot implements
+ FileSystemAccess.FileSystemExecutor {
+ private Path path;
+ private String oldSnapshotName;
+ private String snapshotName;
+
+ /**
+ * Creates a renameSnapshot executor.
+ * @param path directory path of the snapshot to be renamed.
+ * @param oldSnapshotName current snapshot name.
+ * @param snapshotName new snapshot name to be set.
+ */
+ public FSRenameSnapshot(String path, String oldSnapshotName,
+ String snapshotName) {
+ this.path = new Path(path);
+ this.oldSnapshotName = oldSnapshotName;
+ this.snapshotName = snapshotName;
+ }
+
+ /**
+ * Executes the filesystem operation.
+ * @param fs filesystem instance to use.
+ * @return void
+ * @throws IOException thrown if an IO error occurred.
+ */
+ @Override
+ public Void execute(FileSystem fs) throws IOException {
+ fs.renameSnapshot(path, oldSnapshotName, snapshotName);
+ return null;
+ }
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
index 347a74750dc..5f265c09852 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
@@ -100,6 +100,13 @@ public class HttpFSParametersProvider extends ParametersProvider {
PARAMS_DEF.put(Operation.SETSTORAGEPOLICY,
new Class[] {PolicyNameParam.class});
PARAMS_DEF.put(Operation.UNSETSTORAGEPOLICY, new Class[] {});
+ PARAMS_DEF.put(Operation.CREATESNAPSHOT,
+ new Class[] {SnapshotNameParam.class});
+ PARAMS_DEF.put(Operation.DELETESNAPSHOT,
+ new Class[] {SnapshotNameParam.class});
+ PARAMS_DEF.put(Operation.RENAMESNAPSHOT,
+ new Class[] {OldSnapshotNameParam.class,
+ SnapshotNameParam.class});
}
public HttpFSParametersProvider() {
@@ -565,4 +572,42 @@ public class HttpFSParametersProvider extends ParametersProvider {
super(NAME, null);
}
}
+
+ /**
+ * Class for SnapshotName parameter.
+ */
+ public static class SnapshotNameParam extends StringParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.SNAPSHOT_NAME_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public SnapshotNameParam() {
+ super(NAME, null);
+ }
+
+ }
+
+ /**
+ * Class for OldSnapshotName parameter.
+ */
+ public static class OldSnapshotNameParam extends StringParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.OLD_SNAPSHOT_NAME_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public OldSnapshotNameParam() {
+ super(NAME, null);
+ }
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index 5c0c9b5f967..03ccb4caa04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NewLengthParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OldSnapshotNameParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
@@ -45,6 +46,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PolicyNameParam
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SnapshotNameParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrEncodingParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam;
@@ -430,6 +432,16 @@ public class HttpFSServer {
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
+ case DELETESNAPSHOT: {
+ String snapshotName = params.get(SnapshotNameParam.NAME,
+ SnapshotNameParam.class);
+ FSOperations.FSDeleteSnapshot command =
+ new FSOperations.FSDeleteSnapshot(path, snapshotName);
+ fsExecute(user, command);
+ AUDIT_LOG.info("[{}] deleted snapshot [{}]", path, snapshotName);
+ response = Response.ok().build();
+ break;
+ }
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
@@ -602,6 +614,16 @@ public class HttpFSServer {
}
break;
}
+ case CREATESNAPSHOT: {
+ String snapshotName = params.get(SnapshotNameParam.NAME,
+ SnapshotNameParam.class);
+ FSOperations.FSCreateSnapshot command =
+ new FSOperations.FSCreateSnapshot(path, snapshotName);
+ String json = fsExecute(user, command);
+ AUDIT_LOG.info("[{}] snapshot created as [{}]", path, snapshotName);
+ response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+ break;
+ }
case SETXATTR: {
String xattrName = params.get(XAttrNameParam.NAME,
XAttrNameParam.class);
@@ -617,6 +639,20 @@ public class HttpFSServer {
response = Response.ok().build();
break;
}
+ case RENAMESNAPSHOT: {
+ String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
+ OldSnapshotNameParam.class);
+ String snapshotName = params.get(SnapshotNameParam.NAME,
+ SnapshotNameParam.class);
+ FSOperations.FSRenameSnapshot command =
+ new FSOperations.FSRenameSnapshot(path, oldSnapshotName,
+ snapshotName);
+ fsExecute(user, command);
+ AUDIT_LOG.info("[{}] renamed snapshot [{}] to [{}]", path,
+ oldSnapshotName, snapshotName);
+ response = Response.ok().build();
+ break;
+ }
case REMOVEXATTR: {
String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
index 6301e274866..85cbc6682c7 100755
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
@@ -16,7 +16,7 @@
# limitations under the License.
if [[ "${HADOOP_SHELL_EXECNAME}" = hdfs ]]; then
- hadoop_add_subcommand "httpfs" "run HttpFS server, the HDFS HTTP Gateway"
+ hadoop_add_subcommand "httpfs" daemon "run HttpFS server, the HDFS HTTP Gateway"
fi
## @description Command handler for httpfs subcommand
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index e23093e0721..2cd89344aa8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.UserGroupInformation;
@@ -74,6 +75,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.regex.Pattern;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
@@ -852,10 +854,12 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
}
}
- private static void assertSameAclBit(FileSystem expected, FileSystem actual,
+ private static void assertSameAcls(FileSystem expected, FileSystem actual,
Path path) throws IOException {
FileStatus expectedFileStatus = expected.getFileStatus(path);
FileStatus actualFileStatus = actual.getFileStatus(path);
+ assertEquals(actualFileStatus.hasAcl(), expectedFileStatus.hasAcl());
+ // backwards compat
assertEquals(actualFileStatus.getPermission().getAclBit(),
expectedFileStatus.getPermission().getAclBit());
}
@@ -888,31 +892,31 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
AclStatus proxyAclStat = proxyFs.getAclStatus(path);
AclStatus httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAclBit(httpfs, proxyFs, path);
+ assertSameAcls(httpfs, proxyFs, path);
httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAclBit(httpfs, proxyFs, path);
+ assertSameAcls(httpfs, proxyFs, path);
httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAclBit(httpfs, proxyFs, path);
+ assertSameAcls(httpfs, proxyFs, path);
httpfs.removeAclEntries(path, AclEntry.parseAclSpec(rmAclUser1, false));
proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAclBit(httpfs, proxyFs, path);
+ assertSameAcls(httpfs, proxyFs, path);
httpfs.removeAcl(path);
proxyAclStat = proxyFs.getAclStatus(path);
httpfsAclStat = httpfs.getAclStatus(path);
assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAclBit(httpfs, proxyFs, path);
+ assertSameAcls(httpfs, proxyFs, path);
}
/**
@@ -935,21 +939,21 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAclBit(httpfs, proxyFs, dir);
+ assertSameAcls(httpfs, proxyFs, dir);
/* Set a default ACL on the directory */
httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
proxyAclStat = proxyFs.getAclStatus(dir);
httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAclBit(httpfs, proxyFs, dir);
+ assertSameAcls(httpfs, proxyFs, dir);
/* Remove the default ACL */
httpfs.removeDefaultAcl(dir);
proxyAclStat = proxyFs.getAclStatus(dir);
httpfsAclStat = httpfs.getAclStatus(dir);
assertSameAcls(httpfsAclStat, proxyAclStat);
- assertSameAclBit(httpfs, proxyFs, dir);
+ assertSameAcls(httpfs, proxyFs, dir);
}
private void testEncryption() throws Exception {
@@ -1033,11 +1037,12 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
}
protected enum Operation {
- GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS,
+ GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS,
WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH,
- GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, GETFILEBLOCKLOCATIONS
+ GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, GETFILEBLOCKLOCATIONS,
+ CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT
}
private void operation(Operation op) throws Exception {
@@ -1129,6 +1134,15 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
case GETFILEBLOCKLOCATIONS:
testGetFileBlockLocations();
break;
+ case CREATE_SNAPSHOT:
+ testCreateSnapshot();
+ break;
+ case RENAME_SNAPSHOT:
+ testRenameSnapshot();
+ break;
+ case DELETE_SNAPSHOT:
+ testDeleteSnapshot();
+ break;
}
}
@@ -1256,4 +1270,98 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
location2.getTopologyPaths());
}
}
+
+ private void testCreateSnapshot(String snapshotName) throws Exception {
+ if (!this.isLocalFS()) {
+ Path snapshottablePath = new Path("/tmp/tmp-snap-test");
+ createSnapshotTestsPreconditions(snapshottablePath);
+ //Now get the FileSystem instance that's being tested
+ FileSystem fs = this.getHttpFSFileSystem();
+ if (snapshotName == null) {
+ fs.createSnapshot(snapshottablePath);
+ } else {
+ fs.createSnapshot(snapshottablePath, snapshotName);
+ }
+ Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
+ FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
+ assertTrue("Should have exactly one snapshot.",
+ snapshotItems.length == 1);
+ String resultingSnapName = snapshotItems[0].getPath().getName();
+ if (snapshotName == null) {
+ assertTrue("Snapshot auto generated name not matching pattern",
+ Pattern.matches("(s)(\\d{8})(-)(\\d{6})(\\.)(\\d{3})",
+ resultingSnapName));
+ } else {
+ assertTrue("Snapshot name is not same as passed name.",
+ snapshotName.equals(resultingSnapName));
+ }
+ cleanSnapshotTests(snapshottablePath, resultingSnapName);
+ }
+ }
+
+ private void testCreateSnapshot() throws Exception {
+ testCreateSnapshot(null);
+ testCreateSnapshot("snap-with-name");
+ }
+
+ private void createSnapshotTestsPreconditions(Path snapshottablePath)
+ throws Exception {
+ //Needed to get a DistributedFileSystem instance, in order to
+ //call allowSnapshot on the newly created directory
+ DistributedFileSystem distributedFs = (DistributedFileSystem)
+ FileSystem.get(snapshottablePath.toUri(), this.getProxiedFSConf());
+ distributedFs.mkdirs(snapshottablePath);
+ distributedFs.allowSnapshot(snapshottablePath);
+ Path subdirPath = new Path("/tmp/tmp-snap-test/subdir");
+ distributedFs.mkdirs(subdirPath);
+
+ }
+
+ private void cleanSnapshotTests(Path snapshottablePath,
+ String resultingSnapName) throws Exception {
+ DistributedFileSystem distributedFs = (DistributedFileSystem)
+ FileSystem.get(snapshottablePath.toUri(), this.getProxiedFSConf());
+ distributedFs.deleteSnapshot(snapshottablePath, resultingSnapName);
+ distributedFs.delete(snapshottablePath, true);
+ }
+
+ private void testRenameSnapshot() throws Exception {
+ if (!this.isLocalFS()) {
+ Path snapshottablePath = new Path("/tmp/tmp-snap-test");
+ createSnapshotTestsPreconditions(snapshottablePath);
+ //Now get the FileSystem instance that's being tested
+ FileSystem fs = this.getHttpFSFileSystem();
+ fs.createSnapshot(snapshottablePath, "snap-to-rename");
+ fs.renameSnapshot(snapshottablePath, "snap-to-rename",
+ "snap-new-name");
+ Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
+ FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
+ assertTrue("Should have exactly one snapshot.",
+ snapshotItems.length == 1);
+ String resultingSnapName = snapshotItems[0].getPath().getName();
+ assertTrue("Snapshot name is not same as passed name.",
+ "snap-new-name".equals(resultingSnapName));
+ cleanSnapshotTests(snapshottablePath, resultingSnapName);
+ }
+ }
+
+ private void testDeleteSnapshot() throws Exception {
+ if (!this.isLocalFS()) {
+ Path snapshottablePath = new Path("/tmp/tmp-snap-test");
+ createSnapshotTestsPreconditions(snapshottablePath);
+ //Now get the FileSystem instance that's being tested
+ FileSystem fs = this.getHttpFSFileSystem();
+ fs.createSnapshot(snapshottablePath, "snap-to-delete");
+ Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
+ FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
+ assertTrue("Should have exactly one snapshot.",
+ snapshotItems.length == 1);
+ fs.deleteSnapshot(snapshottablePath, "snap-to-delete");
+ snapshotItems = fs.listStatus(snapshotsDir);
+ assertTrue("There should be no snapshot anymore.",
+ snapshotItems.length == 0);
+ fs.delete(snapshottablePath, true);
+ }
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index 0e1cc20177e..60e70d2e6f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
@@ -71,6 +72,7 @@ import org.eclipse.jetty.webapp.WebAppContext;
import com.google.common.collect.Maps;
import java.util.Properties;
+import java.util.regex.Pattern;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
/**
@@ -465,6 +467,20 @@ public class TestHttpFSServer extends HFSTestCase {
*/
private void putCmd(String filename, String command,
String params) throws Exception {
+ Assert.assertEquals(HttpURLConnection.HTTP_OK,
+ putCmdWithReturn(filename, command, params).getResponseCode());
+ }
+
+ /**
+ * General-purpose http PUT command to the httpfs server,
+ * which returns relted HttpURLConnection instance.
+ * @param filename The file to operate upon
+ * @param command The command to perform (SETACL, etc)
+ * @param params Parameters, like "aclspec=..."
+ * @return HttpURLConnection the HttpURLConnection instance for the given PUT
+ */
+ private HttpURLConnection putCmdWithReturn(String filename, String command,
+ String params) throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if (filename.charAt(0) == '/') {
@@ -478,7 +494,7 @@ public class TestHttpFSServer extends HFSTestCase {
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
- Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+ return conn;
}
/**
@@ -882,6 +898,108 @@ public class TestHttpFSServer extends HFSTestCase {
delegationTokenCommonTests(false);
}
+ private HttpURLConnection snapshotTestPreconditions(String httpMethod,
+ String snapOperation,
+ String additionalParams)
+ throws Exception {
+ String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+ URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
+ "/webhdfs/v1/tmp/tmp-snap-test/subdir?user.name={0}&op=MKDIRS",
+ user));
+ HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod("PUT");
+ conn.connect();
+
+ Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+
+ //needed to make the given dir snapshottable
+ Path snapshottablePath = new Path("/tmp/tmp-snap-test");
+ DistributedFileSystem dfs =
+ (DistributedFileSystem) FileSystem.get(snapshottablePath.toUri(),
+ TestHdfsHelper.getHdfsConf());
+ dfs.allowSnapshot(snapshottablePath);
+
+ //Try to create snapshot passing snapshot name
+ url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
+ "/webhdfs/v1/tmp/tmp-snap-test?user.name={0}&op={1}&{2}", user,
+ snapOperation, additionalParams));
+ conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod(httpMethod);
+ conn.connect();
+ return conn;
+ }
+
+ @Test
+ @TestDir
+ @TestJetty
+ @TestHdfs
+ public void testCreateSnapshot() throws Exception {
+ createHttpFSServer(false, false);
+ final HttpURLConnection conn = snapshotTestPreconditions("PUT",
+ "CREATESNAPSHOT",
+ "snapshotname=snap-with-name");
+ Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+ final BufferedReader reader =
+ new BufferedReader(new InputStreamReader(conn.getInputStream()));
+ String result = reader.readLine();
+ //Validates if the content format is correct
+ Assert.assertTrue(result.
+ equals("{\"Path\":\"/tmp/tmp-snap-test/.snapshot/snap-with-name\"}"));
+ //Validates if the snapshot is properly created under .snapshot folder
+ result = getStatus("/tmp/tmp-snap-test/.snapshot",
+ "LISTSTATUS");
+ Assert.assertTrue(result.contains("snap-with-name"));
+ }
+
+ @Test
+ @TestDir
+ @TestJetty
+ @TestHdfs
+ public void testCreateSnapshotNoSnapshotName() throws Exception {
+ createHttpFSServer(false, false);
+ final HttpURLConnection conn = snapshotTestPreconditions("PUT",
+ "CREATESNAPSHOT",
+ "");
+ Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+ final BufferedReader reader = new BufferedReader(
+ new InputStreamReader(conn.getInputStream()));
+ String result = reader.readLine();
+ //Validates if the content format is correct
+ Assert.assertTrue(Pattern.matches(
+ "(\\{\\\"Path\\\"\\:\\\"/tmp/tmp-snap-test/.snapshot/s)" +
+ "(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\"\\})", result));
+ //Validates if the snapshot is properly created under .snapshot folder
+ result = getStatus("/tmp/tmp-snap-test/.snapshot",
+ "LISTSTATUS");
+
+ Assert.assertTrue(Pattern.matches("(.+)(\\\"pathSuffix\\\":\\\"s)" +
+ "(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\")(.+)",
+ result));
+ }
+
+ @Test
+ @TestDir
+ @TestJetty
+ @TestHdfs
+ public void testRenameSnapshot() throws Exception {
+ createHttpFSServer(false, false);
+ HttpURLConnection conn = snapshotTestPreconditions("PUT",
+ "CREATESNAPSHOT",
+ "snapshotname=snap-to-rename");
+ Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+ conn = snapshotTestPreconditions("PUT",
+ "RENAMESNAPSHOT",
+ "oldsnapshotname=snap-to-rename" +
+ "&snapshotname=snap-renamed");
+ Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+ //Validates the snapshot is properly renamed under .snapshot folder
+ String result = getStatus("/tmp/tmp-snap-test/.snapshot",
+ "LISTSTATUS");
+ Assert.assertTrue(result.contains("snap-renamed"));
+ //There should be no snapshot named snap-to-rename now
+ Assert.assertFalse(result.contains("snap-to-rename"));
+ }
+
@Test
@TestDir
@TestJetty
@@ -890,4 +1008,24 @@ public class TestHttpFSServer extends HFSTestCase {
createHttpFSServer(true, true);
delegationTokenCommonTests(true);
}
+
+ @Test
+ @TestDir
+ @TestJetty
+ @TestHdfs
+ public void testDeleteSnapshot() throws Exception {
+ createHttpFSServer(false, false);
+ HttpURLConnection conn = snapshotTestPreconditions("PUT",
+ "CREATESNAPSHOT",
+ "snapshotname=snap-to-delete");
+ Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+ conn = snapshotTestPreconditions("DELETE",
+ "DELETESNAPSHOT",
+ "snapshotname=snap-to-delete");
+ Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
+ //Validates the snapshot is not under .snapshot folder anymore
+ String result = getStatus("/tmp/tmp-snap-test/.snapshot",
+ "LISTSTATUS");
+ Assert.assertFalse(result.contains("snap-to-delete"));
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
index cc17394197a..abaa5cad648 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
@@ -65,7 +65,9 @@ public class Nfs3Utils {
* client takes only the lower 32bit of the fileId and treats it as signed
* int. When the 32th bit is 1, the client considers it invalid.
*/
- NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
+ NfsFileType fileType = fs.isDirectory()
+ ? NfsFileType.NFSDIR
+ : NfsFileType.NFSREG;
fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
@@ -98,7 +100,7 @@ public class Nfs3Utils {
return null;
}
- long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
+ long size = fstat.isDirectory() ? getDirSize(fstat.getChildrenNum()) : fstat
.getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
new NfsTime(fstat.getModificationTime()));
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index d6bb71d0d9c..7a6aa89fde7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -1208,7 +1208,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
if (fstat == null) {
return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
}
- if (fstat.isDir()) {
+ if (fstat.isDirectory()) {
return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc);
}
@@ -1289,7 +1289,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
if (fstat == null) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
}
- if (!fstat.isDir()) {
+ if (!fstat.isDirectory()) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc);
}
@@ -1565,7 +1565,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
- if (!dirStatus.isDir()) {
+ if (!dirStatus.isDirectory()) {
LOG.error("Can't readdir for regular file, fileId: "
+ handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
@@ -1732,7 +1732,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
}
- if (!dirStatus.isDir()) {
+ if (!dirStatus.isDirectory()) {
LOG.error("Can't readdirplus for regular file, fileId: "
+ handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 83f3e9f368c..d5db8b3026c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -266,4 +266,37 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index b8364d8dcc3..bebb8d1ef82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -169,8 +169,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
compile
- xerces
- xercesImpl
+ io.netty
+ netty-allcompile
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 914635e6210..61e48088dea 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+ #!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
@@ -31,45 +31,47 @@ function hadoop_usage
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
hadoop_add_option "--workers" "turn on worker mode"
- hadoop_add_subcommand "balancer" "run a cluster balancing utility"
- hadoop_add_subcommand "cacheadmin" "configure the HDFS cache"
- hadoop_add_subcommand "cblock" "cblock CLI"
- hadoop_add_subcommand "cblockserver" "run cblock server"
- hadoop_add_subcommand "classpath" "prints the class path needed to get the hadoop jar and the required libraries"
- hadoop_add_subcommand "corona" "run an ozone data generator"
- hadoop_add_subcommand "crypto" "configure HDFS encryption zones"
- hadoop_add_subcommand "datanode" "run a DFS datanode"
- hadoop_add_subcommand "debug" "run a Debug Admin to execute HDFS debug commands"
- hadoop_add_subcommand "dfs" "run a filesystem command on the file system"
- hadoop_add_subcommand "dfsadmin" "run a DFS admin client"
- hadoop_add_subcommand "diskbalancer" "Distributes data evenly among disks on a given node"
- hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
- hadoop_add_subcommand "ec" "run a HDFS ErasureCoding CLI"
- hadoop_add_subcommand "fetchdt" "fetch a delegation token from the NameNode"
- hadoop_add_subcommand "fsck" "run a DFS filesystem checking utility"
- hadoop_add_subcommand "getconf" "get config values from configuration"
- hadoop_add_subcommand "groups" "get the groups which users belong to"
- hadoop_add_subcommand "haadmin" "run a DFS HA admin client"
- hadoop_add_subcommand "jmxget" "get JMX exported values from NameNode or DataNode."
- hadoop_add_subcommand "journalnode" "run the DFS journalnode"
- hadoop_add_subcommand "jscsi" "run cblock jscsi server"
- hadoop_add_subcommand "ksm" "Ozone keyspace manager"
- hadoop_add_subcommand "lsSnapshottableDir" "list all snapshottable dirs owned by the current user"
- hadoop_add_subcommand "mover" "run a utility to move block replicas across storage types"
- hadoop_add_subcommand "namenode" "run the DFS namenode"
- hadoop_add_subcommand "nfs3" "run an NFS version 3 gateway"
- hadoop_add_subcommand "oev" "apply the offline edits viewer to an edits file"
- hadoop_add_subcommand "oiv" "apply the offline fsimage viewer to an fsimage"
- hadoop_add_subcommand "oiv_legacy" "apply the offline fsimage viewer to a legacy fsimage"
- hadoop_add_subcommand "oz" "command line interface for ozone"
- hadoop_add_subcommand "oz_debug" "ozone debug tool, convert ozone meta data db into sqlite db"
- hadoop_add_subcommand "portmap" "run a portmap service"
- hadoop_add_subcommand "scm" "run the Storage Container Manager service"
- hadoop_add_subcommand "secondarynamenode" "run the DFS secondary namenode"
- hadoop_add_subcommand "snapshotDiff" "diff two snapshots of a directory or diff the current directory contents with a snapshot"
- hadoop_add_subcommand "storagepolicies" "list/get/set block storage policies"
- hadoop_add_subcommand "version" "print the version"
- hadoop_add_subcommand "zkfc" "run the ZK Failover Controller daemon"
+
+ hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
+ hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
+ hadoop_add_subcommand "cblock" admin "cblock CLI"
+ hadoop_add_subcommand "cblockserver" daemon "run cblock server"
+ hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
+ hadoop_add_subcommand "corona" client "run an ozone data generator"
+ hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones"
+ hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
+ hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug commands"
+ hadoop_add_subcommand "dfs" client "run a filesystem command on the file system"
+ hadoop_add_subcommand "dfsadmin" admin "run a DFS admin client"
+ hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node"
+ hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
+ hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
+ hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode"
+ hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
+ hadoop_add_subcommand "getconf" client "get config values from configuration"
+ hadoop_add_subcommand "groups" client "get the groups which users belong to"
+ hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client"
+ hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
+ hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode"
+ hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server"
+ hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
+ hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable dirs owned by the current user"
+ hadoop_add_subcommand "mover" daemon "run a utility to move block replicas across storage types"
+ hadoop_add_subcommand "namenode" daemon "run the DFS namenode"
+ hadoop_add_subcommand "nfs3" daemon "run an NFS version 3 gateway"
+ hadoop_add_subcommand "oev" admin "apply the offline edits viewer to an edits file"
+ hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage"
+ hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage"
+ hadoop_add_subcommand "oz" client "command line interface for ozone"
+ hadoop_add_subcommand "oz_debug" client "ozone debug tool, convert ozone metadata into relational data"
+ hadoop_add_subcommand "portmap" daemon "run a portmap service"
+ hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
+ hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode"
+ hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot"
+ hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies"
+ hadoop_add_subcommand "version" client "print the version"
+ hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
+
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1f60f3249c2..f4c383e84f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -269,7 +269,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY =
"dfs.namenode.posix.acl.inheritance.enabled";
public static final boolean
- DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_DEFAULT = false;
+ DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_DEFAULT = true;
public static final String DFS_NAMENODE_XATTRS_ENABLED_KEY = "dfs.namenode.xattrs.enabled";
public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
public static final String DFS_ADMIN = "dfs.cluster.administrators";
@@ -564,6 +564,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = "";
public static final String DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY = "dfs.namenode.ec.policies.max.cellsize";
public static final int DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT = 4 * 1024 * 1024;
+ public static final String DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY =
+ "dfs.namenode.ec.system.default.policy";
+ public static final String DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT =
+ "RS-6-3-64k";
public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = "dfs.datanode.ec.reconstruction.stripedread.threads";
public static final int DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = "dfs.datanode.ec.reconstruction.stripedread.buffer.size";
@@ -1031,7 +1035,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_PIPELINE_ECN_ENABLED_DEFAULT = false;
// Key Provider Cache Expiry
- public static final String DFS_DATANODE_BLOCK_PINNING_ENABLED =
+ public static final String DFS_DATANODE_BLOCK_PINNING_ENABLED =
"dfs.datanode.block-pinning.enabled";
public static final boolean DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT =
false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java
index e9c5b2a4b19..49f3eaaba2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java
@@ -30,4 +30,8 @@ public class SnapshotException extends IOException {
public SnapshotException(final Throwable cause) {
super(cause);
}
+
+ public SnapshotException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
index 3ddfc85638a..676e8276f25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 4ac49fe12f7..a4462769020 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@@ -1488,7 +1487,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
RpcController controller, SetErasureCodingPolicyRequestProto req)
throws ServiceException {
try {
- server.setErasureCodingPolicy(req.getSrc(), req.getEcPolicyName());
+ String ecPolicyName = req.hasEcPolicyName() ?
+ req.getEcPolicyName() : null;
+ server.setErasureCodingPolicy(req.getSrc(), ecPolicyName);
return SetErasureCodingPolicyResponseProto.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
@@ -1662,7 +1663,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
RpcController controller, GetErasureCodingCodecsRequestProto request)
throws ServiceException {
try {
- HashMap codecs = server.getErasureCodingCodecs();
+ Map codecs = server.getErasureCodingCodecs();
GetErasureCodingCodecsResponseProto.Builder resBuilder =
GetErasureCodingCodecsResponseProto.newBuilder();
for (Map.Entry codec : codecs.entrySet()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/package-info.java
new file mode 100644
index 00000000000..6233024467d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 0041d5eda7b..0f4091dcb23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -286,8 +286,7 @@ public class Journal implements Closeable {
fjm.setLastReadableTxId(val);
}
- @VisibleForTesting
- JournalMetrics getMetricsForTests() {
+ JournalMetrics getMetrics() {
return metrics;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
index cffe2c1f55a..fcfd9016cd1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
@@ -45,6 +45,9 @@ class JournalMetrics {
@Metric("Number of batches written where this node was lagging")
MutableCounterLong batchesWrittenWhileLagging;
+
+ @Metric("Number of edit logs downloaded by JournalNodeSyncer")
+ private MutableCounterLong numEditLogsSynced;
private final int[] QUANTILE_INTERVALS = new int[] {
1*60, // 1m
@@ -120,4 +123,12 @@ class JournalMetrics {
q.add(us);
}
}
+
+ public MutableCounterLong getNumEditLogsSynced() {
+ return numEditLogsSynced;
+ }
+
+ public void incrNumEditLogsSynced() {
+ numEditLogsSynced.incr();
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
index 479f6a00e2c..537ba0a0fd3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
@@ -77,6 +77,7 @@ public class JournalNodeSyncer {
private final long journalSyncInterval;
private final int logSegmentTransferTimeout;
private final DataTransferThrottler throttler;
+ private final JournalMetrics metrics;
JournalNodeSyncer(JournalNode jouranlNode, Journal journal, String jid,
Configuration conf) {
@@ -93,6 +94,7 @@ public class JournalNodeSyncer {
DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_TIMEOUT_KEY,
DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_TIMEOUT_DEFAULT);
throttler = getThrottler(conf);
+ metrics = journal.getMetrics();
}
void stopSync() {
@@ -411,6 +413,8 @@ public class JournalNodeSyncer {
LOG.warn("Deleting " + tmpEditsFile + " has failed");
}
return false;
+ } else {
+ metrics.incrNumEditLogsSynced();
}
return true;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a5ee30bc1a5..6129db8a5a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -232,47 +232,47 @@ public class BlockManager implements BlockStatsMXBean {
}
/** Used by metrics. */
- public long getLowRedundancyBlocksStat() {
+ public long getLowRedundancyBlocks() {
return neededReconstruction.getLowRedundancyBlocksStat();
}
/** Used by metrics. */
- public long getCorruptBlocksStat() {
+ public long getCorruptBlocks() {
return corruptReplicas.getCorruptBlocksStat();
}
/** Used by metrics. */
- public long getMissingBlocksStat() {
+ public long getMissingBlocks() {
return neededReconstruction.getCorruptBlocksStat();
}
/** Used by metrics. */
- public long getMissingReplicationOneBlocksStat() {
+ public long getMissingReplicationOneBlocks() {
return neededReconstruction.getCorruptReplicationOneBlocksStat();
}
/** Used by metrics. */
- public long getPendingDeletionBlocksStat() {
+ public long getPendingDeletionReplicatedBlocks() {
return invalidateBlocks.getBlocksStat();
}
/** Used by metrics. */
- public long getLowRedundancyECBlockGroupsStat() {
+ public long getLowRedundancyECBlockGroups() {
return neededReconstruction.getLowRedundancyECBlockGroupsStat();
}
/** Used by metrics. */
- public long getCorruptECBlockGroupsStat() {
+ public long getCorruptECBlockGroups() {
return corruptReplicas.getCorruptECBlockGroupsStat();
}
/** Used by metrics. */
- public long getMissingECBlockGroupsStat() {
+ public long getMissingECBlockGroups() {
return neededReconstruction.getCorruptECBlockGroupsStat();
}
/** Used by metrics. */
- public long getPendingDeletionECBlockGroupsStat() {
+ public long getPendingDeletionECBlockGroups() {
return invalidateBlocks.getECBlockGroupsStat();
}
@@ -705,17 +705,36 @@ public class BlockManager implements BlockStatsMXBean {
datanodeManager.fetchDatanodes(live, dead, false);
out.println("Live Datanodes: " + live.size());
out.println("Dead Datanodes: " + dead.size());
+
//
- // Dump contents of neededReconstruction
+ // Need to iterate over all queues from neededReplications
+ // except for the QUEUE_WITH_CORRUPT_BLOCKS)
//
synchronized (neededReconstruction) {
out.println("Metasave: Blocks waiting for reconstruction: "
- + neededReconstruction.size());
- for (Block block : neededReconstruction) {
+ + neededReconstruction.getLowRedundancyBlockCount());
+ for (int i = 0; i < neededReconstruction.LEVEL; i++) {
+ if (i != neededReconstruction.QUEUE_WITH_CORRUPT_BLOCKS) {
+ for (Iterator it = neededReconstruction.iterator(i);
+ it.hasNext();) {
+ Block block = it.next();
+ dumpBlockMeta(block, out);
+ }
+ }
+ }
+ //
+ // Now prints corrupt blocks separately
+ //
+ out.println("Metasave: Blocks currently missing: " +
+ neededReconstruction.getCorruptBlockSize());
+ for (Iterator it = neededReconstruction.
+ iterator(neededReconstruction.QUEUE_WITH_CORRUPT_BLOCKS);
+ it.hasNext();) {
+ Block block = it.next();
dumpBlockMeta(block, out);
}
}
-
+
// Dump any postponed over-replicated blocks
out.println("Mis-replicated blocks that have been postponed:");
for (Block block : postponedMisreplicatedBlocks) {
@@ -2292,11 +2311,11 @@ public class BlockManager implements BlockStatsMXBean {
return bmSafeMode.getBytesInFuture();
}
- public long getBytesInFutureReplicatedBlocksStat() {
+ public long getBytesInFutureReplicatedBlocks() {
return bmSafeMode.getBytesInFutureBlocks();
}
- public long getBytesInFutureStripedBlocksStat() {
+ public long getBytesInFutureECBlockGroups() {
return bmSafeMode.getBytesInFutureECBlockGroups();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
similarity index 87%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
index ae7982628fa..928036af869 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
@@ -49,37 +49,47 @@ import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
- * Manages datanode decommissioning. A background monitor thread
- * periodically checks the status of datanodes that are in-progress of
- * decommissioning.
+ * Manages decommissioning and maintenance state for DataNodes. A background
+ * monitor thread periodically checks the status of DataNodes that are
+ * decommissioning or entering maintenance state.
*
- * A datanode can be decommissioned in a few situations:
+ * A DataNode can be decommissioned in a few situations:
*
*
If a DN is dead, it is decommissioned immediately.
- *
If a DN is alive, it is decommissioned after all of its blocks
- * are sufficiently replicated. Merely under-replicated blocks do not
- * block decommissioning as long as they are above a replication
+ *
If a DN is alive, it is decommissioned after all of its blocks
+ * are sufficiently replicated. Merely under-replicated blocks do not
+ * block decommissioning as long as they are above a replication
* threshold.
*
- * In the second case, the datanode transitions to a
- * decommission-in-progress state and is tracked by the monitor thread. The
- * monitor periodically scans through the list of insufficiently replicated
- * blocks on these datanodes to
- * determine if they can be decommissioned. The monitor also prunes this list
- * as blocks become replicated, so monitor scans will become more efficient
+ * In the second case, the DataNode transitions to a DECOMMISSION_INPROGRESS
+ * state and is tracked by the monitor thread. The monitor periodically scans
+ * through the list of insufficiently replicated blocks on these DataNodes to
+ * determine if they can be DECOMMISSIONED. The monitor also prunes this list
+ * as blocks become replicated, so monitor scans will become more efficient
* over time.
*
- * Decommission-in-progress nodes that become dead do not progress to
- * decommissioned until they become live again. This prevents potential
+ * DECOMMISSION_INPROGRESS nodes that become dead do not progress to
+ * DECOMMISSIONED until they become live again. This prevents potential
* durability loss for singly-replicated blocks (see HDFS-6791).
*
+ * DataNodes can also be put under maintenance state for any short duration
+ * maintenance operations. Unlike decommissioning, blocks are not always
+ * re-replicated for the DataNodes to enter maintenance state. When the
+ * blocks are replicated at least dfs.namenode.maintenance.replication.min,
+ * DataNodes transition to IN_MAINTENANCE state. Otherwise, just like
+ * decommissioning, DataNodes transition to ENTERING_MAINTENANCE state and
+ * wait for the blocks to be sufficiently replicated and then transition to
+ * IN_MAINTENANCE state. The block replication factor is relaxed for a maximum
+ * of maintenance expiry time. When DataNodes don't transition or join the
+ * cluster back by expiry time, blocks are re-replicated just as in
+ * decommissioning case as to avoid read or write performance degradation.
+ *
* This class depends on the FSNamesystem lock for synchronization.
*/
@InterfaceAudience.Private
-public class DecommissionManager {
- private static final Logger LOG = LoggerFactory.getLogger(DecommissionManager
- .class);
-
+public class DatanodeAdminManager {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(DatanodeAdminManager.class);
private final Namesystem namesystem;
private final BlockManager blockManager;
private final HeartbeatManager hbManager;
@@ -97,7 +107,7 @@ public class DecommissionManager {
* the node from being marked as decommissioned. During a monitor tick, this
* list is pruned as blocks becomes replicated.
*
- * Note also that the reference to the list of under-replicated blocks
+ * Note also that the reference to the list of under-replicated blocks
* will be null on initial add
*
* However, this map can become out-of-date since it is not updated by block
@@ -113,24 +123,23 @@ public class DecommissionManager {
* outOfServiceNodeBlocks. Additional nodes wait in pendingNodes.
*/
private final Queue pendingNodes;
-
private Monitor monitor = null;
- DecommissionManager(final Namesystem namesystem,
+ DatanodeAdminManager(final Namesystem namesystem,
final BlockManager blockManager, final HeartbeatManager hbManager) {
this.namesystem = namesystem;
this.blockManager = blockManager;
this.hbManager = hbManager;
executor = Executors.newScheduledThreadPool(1,
- new ThreadFactoryBuilder().setNameFormat("DecommissionMonitor-%d")
+ new ThreadFactoryBuilder().setNameFormat("DatanodeAdminMonitor-%d")
.setDaemon(true).build());
outOfServiceNodeBlocks = new TreeMap<>();
pendingNodes = new LinkedList<>();
}
/**
- * Start the decommission monitor thread.
+ * Start the DataNode admin monitor thread.
* @param conf
*/
void activate(Configuration conf) {
@@ -151,7 +160,7 @@ public class DecommissionManager {
if (strNodes != null) {
LOG.warn("Deprecated configuration key {} will be ignored.",
deprecatedKey);
- LOG.warn("Please update your configuration to use {} instead.",
+ LOG.warn("Please update your configuration to use {} instead.",
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
}
@@ -161,7 +170,8 @@ public class DecommissionManager {
final int maxConcurrentTrackedNodes = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
- DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
+ DFSConfigKeys
+ .DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " +
"value for "
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
@@ -170,14 +180,14 @@ public class DecommissionManager {
executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs,
TimeUnit.SECONDS);
- LOG.debug("Activating DecommissionManager with interval {} seconds, " +
+ LOG.debug("Activating DatanodeAdminManager with interval {} seconds, " +
"{} max blocks per interval, " +
"{} max concurrently tracked nodes.", intervalSecs,
blocksPerInterval, maxConcurrentTrackedNodes);
}
/**
- * Stop the decommission monitor thread, waiting briefly for it to terminate.
+ * Stop the admin monitor thread, waiting briefly for it to terminate.
*/
void close() {
executor.shutdownNow();
@@ -187,7 +197,7 @@ public class DecommissionManager {
}
/**
- * Start decommissioning the specified datanode.
+ * Start decommissioning the specified datanode.
* @param node
*/
@VisibleForTesting
@@ -211,7 +221,7 @@ public class DecommissionManager {
}
/**
- * Stop decommissioning the specified datanode.
+ * Stop decommissioning the specified datanode.
* @param node
*/
@VisibleForTesting
@@ -224,7 +234,7 @@ public class DecommissionManager {
if (node.isAlive()) {
blockManager.processExtraRedundancyBlocksOnInService(node);
}
- // Remove from tracking in DecommissionManager
+ // Remove from tracking in DatanodeAdminManager
pendingNodes.remove(node);
outOfServiceNodeBlocks.remove(node);
} else {
@@ -303,7 +313,7 @@ public class DecommissionManager {
blockManager.processExtraRedundancyBlocksOnInService(node);
}
- // Remove from tracking in DecommissionManager
+ // Remove from tracking in DatanodeAdminManager
pendingNodes.remove(node);
outOfServiceNodeBlocks.remove(node);
} else {
@@ -324,8 +334,9 @@ public class DecommissionManager {
/**
* Checks whether a block is sufficiently replicated/stored for
- * decommissioning. For replicated blocks or striped blocks, full-strength
- * replication or storage is not always necessary, hence "sufficient".
+ * DECOMMISSION_INPROGRESS or ENTERING_MAINTENANCE datanodes. For replicated
+ * blocks or striped blocks, full-strength replication or storage is not
+ * always necessary, hence "sufficient".
* @return true if sufficient, else false.
*/
private boolean isSufficient(BlockInfo block, BlockCollection bc,
@@ -416,9 +427,10 @@ public class DecommissionManager {
}
/**
- * Checks to see if DNs have finished decommissioning.
+ * Checks to see if datanodes have finished DECOMMISSION_INPROGRESS or
+ * ENTERING_MAINTENANCE state.
*
- * Since this is done while holding the namesystem lock,
+ * Since this is done while holding the namesystem lock,
* the amount of work per monitor tick is limited.
*/
private class Monitor implements Runnable {
@@ -440,15 +452,15 @@ public class DecommissionManager {
*/
private int numBlocksCheckedPerLock = 0;
/**
- * The number of nodes that have been checked on this tick. Used for
+ * The number of nodes that have been checked on this tick. Used for
* statistics.
*/
private int numNodesChecked = 0;
/**
- * The last datanode in outOfServiceNodeBlocks that we've processed
+ * The last datanode in outOfServiceNodeBlocks that we've processed.
*/
- private DatanodeDescriptor iterkey = new DatanodeDescriptor(new
- DatanodeID("", "", "", 0, 0, 0, 0));
+ private DatanodeDescriptor iterkey = new DatanodeDescriptor(
+ new DatanodeID("", "", "", 0, 0, 0, 0));
Monitor(int numBlocksPerCheck, int maxConcurrentTrackedNodes) {
this.numBlocksPerCheck = numBlocksPerCheck;
@@ -463,8 +475,8 @@ public class DecommissionManager {
@Override
public void run() {
if (!namesystem.isRunning()) {
- LOG.info("Namesystem is not running, skipping decommissioning checks"
- + ".");
+ LOG.info("Namesystem is not running, skipping " +
+ "decommissioning/maintenance checks.");
return;
}
// Reset the checked count at beginning of each iteration
@@ -486,7 +498,7 @@ public class DecommissionManager {
}
/**
- * Pop datanodes off the pending list and into decomNodeBlocks,
+ * Pop datanodes off the pending list and into decomNodeBlocks,
* subject to the maxConcurrentTrackedNodes limit.
*/
private void processPendingNodes() {
@@ -522,8 +534,8 @@ public class DecommissionManager {
continue;
}
if (blocks == null) {
- // This is a newly added datanode, run through its list to schedule
- // under-replicated blocks for replication and collect the blocks
+ // This is a newly added datanode, run through its list to schedule
+ // under-replicated blocks for replication and collect the blocks
// that are insufficiently replicated for further tracking
LOG.debug("Newly-added node {}, doing full scan to find " +
"insufficiently-replicated blocks.", dn);
@@ -531,26 +543,27 @@ public class DecommissionManager {
outOfServiceNodeBlocks.put(dn, blocks);
fullScan = true;
} else {
- // This is a known datanode, check if its # of insufficiently
- // replicated blocks has dropped to zero and if it can be decommed
+ // This is a known datanode, check if its # of insufficiently
+ // replicated blocks has dropped to zero and if it can move
+ // to the next state.
LOG.debug("Processing {} node {}", dn.getAdminState(), dn);
pruneReliableBlocks(dn, blocks);
}
if (blocks.size() == 0) {
if (!fullScan) {
- // If we didn't just do a full scan, need to re-check with the
+ // If we didn't just do a full scan, need to re-check with the
// full block map.
//
- // We've replicated all the known insufficiently replicated
- // blocks. Re-check with the full block map before finally
- // marking the datanode as decommissioned
+ // We've replicated all the known insufficiently replicated
+ // blocks. Re-check with the full block map before finally
+ // marking the datanode as DECOMMISSIONED or IN_MAINTENANCE.
LOG.debug("Node {} has finished replicating current set of "
+ "blocks, checking with the full block map.", dn);
blocks = handleInsufficientlyStored(dn);
outOfServiceNodeBlocks.put(dn, blocks);
}
- // If the full scan is clean AND the node liveness is okay,
- // we can finally mark as decommissioned.
+ // If the full scan is clean AND the node liveness is okay,
+ // we can finally mark as DECOMMISSIONED or IN_MAINTENANCE.
final boolean isHealthy =
blockManager.isNodeHealthyForDecommissionOrMaintenance(dn);
if (blocks.size() == 0 && isHealthy) {
@@ -580,7 +593,7 @@ public class DecommissionManager {
}
iterkey = dn;
}
- // Remove the datanodes that are decommissioned or in service after
+ // Remove the datanodes that are DECOMMISSIONED or in service after
// maintenance expiration.
for (DatanodeDescriptor dn : toRemove) {
Preconditions.checkState(dn.isDecommissioned() || dn.isInService(),
@@ -598,9 +611,9 @@ public class DecommissionManager {
}
/**
- * Returns a list of blocks on a datanode that are insufficiently replicated
- * or require recovery, i.e. requiring recovery and should prevent
- * decommission.
+ * Returns a list of blocks on a datanode that are insufficiently
+ * replicated or require recovery, i.e. requiring recovery and
+ * should prevent decommission or maintenance.
*
* As part of this, it also schedules replication/recovery work.
*
@@ -615,9 +628,10 @@ public class DecommissionManager {
}
/**
- * Used while checking if decommission-in-progress datanodes can be marked
- * as decommissioned. Combines shared logic of
- * pruneReliableBlocks and handleInsufficientlyStored.
+ * Used while checking if DECOMMISSION_INPROGRESS datanodes can be
+ * marked as DECOMMISSIONED or ENTERING_MAINTENANCE datanodes can be
+ * marked as IN_MAINTENANCE. Combines shared logic of pruneReliableBlocks
+ * and handleInsufficientlyStored.
*
* @param datanode Datanode
* @param it Iterator over the blocks on the
@@ -652,7 +666,7 @@ public class DecommissionManager {
// configured per-iteration-limit.
namesystem.writeUnlock();
try {
- LOG.debug("Yielded lock during decommission check");
+ LOG.debug("Yielded lock during decommission/maintenance check");
Thread.sleep(0, 500);
} catch (InterruptedException ignored) {
return;
@@ -682,8 +696,8 @@ public class DecommissionManager {
final NumberReplicas num = blockManager.countNodes(block);
final int liveReplicas = num.liveReplicas();
- // Schedule low redundancy blocks for reconstruction if not already
- // pending
+ // Schedule low redundancy blocks for reconstruction
+ // if not already pending.
boolean isDecommission = datanode.isDecommissionInProgress();
boolean neededReconstruction = isDecommission ?
blockManager.isNeededReconstruction(block, num) :
@@ -701,7 +715,8 @@ public class DecommissionManager {
}
// Even if the block is without sufficient redundancy,
- // it doesn't block decommission if has sufficient redundancy
+ // it might not block decommission/maintenance if it
+ // has sufficient redundancy.
if (isSufficient(block, bc, num, isDecommission)) {
if (pruneReliableBlocks) {
it.remove();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 2bd4a203ff7..d35894caef3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -661,7 +661,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
return erasurecodeBlocks.size();
}
- public List getReplicationCommand(int maxTransfers) {
+ int getNumberOfReplicateBlocks() {
+ return replicateBlocks.size();
+ }
+
+ List getReplicationCommand(int maxTransfers) {
return replicateBlocks.poll(maxTransfers);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 1d0975111d9..c75bcea45b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -75,7 +75,7 @@ public class DatanodeManager {
private final Namesystem namesystem;
private final BlockManager blockManager;
- private final DecommissionManager decomManager;
+ private final DatanodeAdminManager datanodeAdminManager;
private final HeartbeatManager heartbeatManager;
private final FSClusterStats fsClusterStats;
@@ -212,8 +212,6 @@ public class DatanodeManager {
this.namesystem = namesystem;
this.blockManager = blockManager;
- // TODO: Enables DFSNetworkTopology by default after more stress
- // testings/validations.
this.useDfsNetworkTopology = conf.getBoolean(
DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY,
DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT);
@@ -223,9 +221,10 @@ public class DatanodeManager {
networktopology = NetworkTopology.getInstance(conf);
}
- this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);
- this.decomManager = new DecommissionManager(namesystem, blockManager,
- heartbeatManager);
+ this.heartbeatManager = new HeartbeatManager(namesystem,
+ blockManager, conf);
+ this.datanodeAdminManager = new DatanodeAdminManager(namesystem,
+ blockManager, heartbeatManager);
this.fsClusterStats = newFSClusterStats();
this.dataNodePeerStatsEnabled = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_PEER_STATS_ENABLED_KEY,
@@ -290,12 +289,19 @@ public class DatanodeManager {
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
+ 10 * 1000 * heartbeatIntervalSeconds;
- final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
+
+ // Effected block invalidate limit is the bigger value between
+ // value configured in hdfs-site.xml, and 20 * HB interval.
+ final int configuredBlockInvalidateLimit = conf.getInt(
+ DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
- this.blockInvalidateLimit = conf.getInt(
- DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
+ final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds);
+ this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit,
+ configuredBlockInvalidateLimit);
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
- + "=" + this.blockInvalidateLimit);
+ + ": configured=" + configuredBlockInvalidateLimit
+ + ", counted=" + countedBlockInvalidateLimit
+ + ", effected=" + blockInvalidateLimit);
this.checkIpHostnameInRegistration = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
@@ -365,12 +371,12 @@ public class DatanodeManager {
}
void activate(final Configuration conf) {
- decomManager.activate(conf);
+ datanodeAdminManager.activate(conf);
heartbeatManager.activate();
}
void close() {
- decomManager.close();
+ datanodeAdminManager.close();
heartbeatManager.close();
}
@@ -385,8 +391,8 @@ public class DatanodeManager {
}
@VisibleForTesting
- public DecommissionManager getDecomManager() {
- return decomManager;
+ public DatanodeAdminManager getDatanodeAdminManager() {
+ return datanodeAdminManager;
}
public HostConfigManager getHostConfigManager() {
@@ -403,7 +409,8 @@ public class DatanodeManager {
return fsClusterStats;
}
- int getBlockInvalidateLimit() {
+ @VisibleForTesting
+ public int getBlockInvalidateLimit() {
return blockInvalidateLimit;
}
@@ -983,9 +990,9 @@ public class DatanodeManager {
hostConfigManager.getMaintenanceExpirationTimeInMS(nodeReg);
// If the registered node is in exclude list, then decommission it
if (getHostConfigManager().isExcluded(nodeReg)) {
- decomManager.startDecommission(nodeReg);
+ datanodeAdminManager.startDecommission(nodeReg);
} else if (nodeReg.maintenanceNotExpired(maintenanceExpireTimeInMS)) {
- decomManager.startMaintenance(nodeReg, maintenanceExpireTimeInMS);
+ datanodeAdminManager.startMaintenance(nodeReg, maintenanceExpireTimeInMS);
}
}
@@ -1211,12 +1218,13 @@ public class DatanodeManager {
long maintenanceExpireTimeInMS =
hostConfigManager.getMaintenanceExpirationTimeInMS(node);
if (node.maintenanceNotExpired(maintenanceExpireTimeInMS)) {
- decomManager.startMaintenance(node, maintenanceExpireTimeInMS);
+ datanodeAdminManager.startMaintenance(
+ node, maintenanceExpireTimeInMS);
} else if (hostConfigManager.isExcluded(node)) {
- decomManager.startDecommission(node);
+ datanodeAdminManager.startDecommission(node);
} else {
- decomManager.stopMaintenance(node);
- decomManager.stopDecommission(node);
+ datanodeAdminManager.stopMaintenance(node);
+ datanodeAdminManager.stopDecommission(node);
}
}
node.setUpgradeDomain(hostConfigManager.getUpgradeDomain(node));
@@ -1655,21 +1663,38 @@ public class DatanodeManager {
}
final List cmds = new ArrayList<>();
- // check pending replication
- List pendingList = nodeinfo.getReplicationCommand(
- maxTransfers);
- if (pendingList != null) {
- cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
- pendingList));
- maxTransfers -= pendingList.size();
- }
- // check pending erasure coding tasks
- List pendingECList = nodeinfo
- .getErasureCodeCommand(maxTransfers);
- if (pendingECList != null) {
- cmds.add(new BlockECReconstructionCommand(
- DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList));
+ // Allocate _approximately_ maxTransfers pending tasks to DataNode.
+ // NN chooses pending tasks based on the ratio between the lengths of
+ // replication and erasure-coded block queues.
+ int totalReplicateBlocks = nodeinfo.getNumberOfReplicateBlocks();
+ int totalECBlocks = nodeinfo.getNumberOfBlocksToBeErasureCoded();
+ int totalBlocks = totalReplicateBlocks + totalECBlocks;
+ if (totalBlocks > 0) {
+ int numReplicationTasks = (int) Math.ceil(
+ (double) (totalReplicateBlocks * maxTransfers) / totalBlocks);
+ int numECTasks = (int) Math.ceil(
+ (double) (totalECBlocks * maxTransfers) / totalBlocks);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Pending replication tasks: " + numReplicationTasks
+ + " erasure-coded tasks: " + numECTasks);
+ }
+ // check pending replication tasks
+ List pendingList = nodeinfo.getReplicationCommand(
+ numReplicationTasks);
+ if (pendingList != null && !pendingList.isEmpty()) {
+ cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
+ pendingList));
+ }
+ // check pending erasure coding tasks
+ List pendingECList = nodeinfo
+ .getErasureCodeCommand(numECTasks);
+ if (pendingECList != null && !pendingECList.isEmpty()) {
+ cmds.add(new BlockECReconstructionCommand(
+ DNA_ERASURE_CODING_RECONSTRUCTION, pendingECList));
+ }
}
+
// check block invalidation
Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
if (blks != null) {
@@ -1911,7 +1936,7 @@ public class DatanodeManager {
this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000
* intervalSeconds;
this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds),
- DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
+ blockInvalidateLimit);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index e1e642abe4a..5ae5b30fb3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2256,6 +2256,17 @@ public class DataNode extends ReconfigurableBase
xmitsInProgress.getAndIncrement();
}
+ /**
+ * Increments the xmitInProgress count by given value.
+ *
+ * @param delta the amount of xmitsInProgress to increase.
+ * @see #incrementXmitsInProgress()
+ */
+ public void incrementXmitsInProcess(int delta) {
+ Preconditions.checkArgument(delta >= 0);
+ xmitsInProgress.getAndAdd(delta);
+ }
+
/**
* Decrements the xmitsInProgress count
*/
@@ -2263,6 +2274,16 @@ public class DataNode extends ReconfigurableBase
xmitsInProgress.getAndDecrement();
}
+ /**
+ * Decrements the xmitsInProgress count by given value.
+ *
+ * @see #decrementXmitsInProgress()
+ */
+ public void decrementXmitsInProgress(int delta) {
+ Preconditions.checkArgument(delta >= 0);
+ xmitsInProgress.getAndAdd(0 - delta);
+ }
+
private void reportBadBlock(final BPOfferService bpos,
final ExtendedBlock block, final String msg) {
FsVolumeSpi volume = getFSDataset().getVolume(block);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 8ffd3a4f3fc..3216a78b7b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -811,20 +811,25 @@ class DataXceiver extends Receiver implements Runnable {
smallBufferSize));
mirrorIn = new DataInputStream(unbufMirrorIn);
+ String targetStorageId = null;
+ if (targetStorageIds.length > 0) {
+ // Older clients may not have provided any targetStorageIds
+ targetStorageId = targetStorageIds[0];
+ }
if (targetPinnings != null && targetPinnings.length > 0) {
new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
blockToken, clientname, targets, targetStorageTypes,
srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
latestGenerationStamp, requestedChecksum, cachingStrategy,
allowLazyPersist, targetPinnings[0], targetPinnings,
- targetStorageIds[0], targetStorageIds);
+ targetStorageId, targetStorageIds);
} else {
new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
blockToken, clientname, targets, targetStorageTypes,
srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
latestGenerationStamp, requestedChecksum, cachingStrategy,
allowLazyPersist, false, targetPinnings,
- targetStorageIds[0], targetStorageIds);
+ targetStorageId, targetStorageIds);
}
mirrorOut.flush();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index e076dda9809..72c224f2f77 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.util.Daemon;
import org.slf4j.Logger;
import java.util.Collection;
+import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -93,7 +94,8 @@ public final class ErasureCodingWorker {
LOG.debug("Using striped block reconstruction; pool threads={}",
numThreads);
stripedReconstructionPool = DFSUtilClient.getThreadPoolExecutor(2,
- numThreads, 60, "StripedBlockReconstruction-", false);
+ numThreads, 60, new LinkedBlockingQueue<>(),
+ "StripedBlockReconstruction-", false);
stripedReconstructionPool.allowCoreThreadTimeOut(true);
}
@@ -106,6 +108,7 @@ public final class ErasureCodingWorker {
public void processErasureCodingTasks(
Collection ecTasks) {
for (BlockECReconstructionInfo reconInfo : ecTasks) {
+ int xmitsSubmitted = 0;
try {
StripedReconstructionInfo stripedReconInfo =
new StripedReconstructionInfo(
@@ -113,15 +116,25 @@ public final class ErasureCodingWorker {
reconInfo.getLiveBlockIndices(), reconInfo.getSourceDnInfos(),
reconInfo.getTargetDnInfos(), reconInfo.getTargetStorageTypes(),
reconInfo.getTargetStorageIDs());
+ // It may throw IllegalArgumentException from task#stripedReader
+ // constructor.
final StripedBlockReconstructor task =
new StripedBlockReconstructor(this, stripedReconInfo);
if (task.hasValidTargets()) {
+ // See HDFS-12044. We increase xmitsInProgress even the task is only
+ // enqueued, so that
+ // 1) NN will not send more tasks than what DN can execute and
+ // 2) DN will not throw away reconstruction tasks, and instead keeps
+ // an unbounded number of tasks in the executor's task queue.
+ xmitsSubmitted = task.getXmits();
+ getDatanode().incrementXmitsInProcess(xmitsSubmitted);
stripedReconstructionPool.submit(task);
} else {
LOG.warn("No missing internal block. Skip reconstruction for task:{}",
reconInfo);
}
} catch (Throwable e) {
+ getDatanode().decrementXmitsInProgress(xmitsSubmitted);
LOG.warn("Failed to reconstruct striped block {}",
reconInfo.getExtendedBlock().getLocalBlock(), e);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
index 1119bbbd230..bac013aea29 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
@@ -48,7 +48,6 @@ class StripedBlockReconstructor extends StripedReconstructor
@Override
public void run() {
- getDatanode().incrementXmitsInProgress();
try {
initDecoderIfNecessary();
@@ -66,7 +65,7 @@ class StripedBlockReconstructor extends StripedReconstructor
LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e);
getDatanode().getMetrics().incrECFailedReconstructionTasks();
} finally {
- getDatanode().decrementXmitsInProgress();
+ getDatanode().decrementXmitsInProgress(getXmits());
final DataNodeMetrics metrics = getDatanode().getMetrics();
metrics.incrECReconstructionTasks();
metrics.incrECReconstructionBytesRead(getBytesRead());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java
index f6f343a6bf3..96f97915455 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java
@@ -68,6 +68,8 @@ class StripedReader {
private int[] successList;
private final int minRequiredSources;
+ // the number of xmits used by the re-construction task.
+ private final int xmits;
// The buffers and indices for striped blocks whose length is 0
private ByteBuffer[] zeroStripeBuffers;
private short[] zeroStripeIndices;
@@ -107,6 +109,12 @@ class StripedReader {
zeroStripeIndices = new short[zeroStripNum];
}
+ // It is calculated by the maximum number of connections from either sources
+ // or targets.
+ xmits = Math.max(minRequiredSources,
+ stripedReconInfo.getTargets() != null ?
+ stripedReconInfo.getTargets().length : 0);
+
this.liveIndices = stripedReconInfo.getLiveIndices();
assert liveIndices != null;
this.sources = stripedReconInfo.getSources();
@@ -472,4 +480,16 @@ class StripedReader {
CachingStrategy getCachingStrategy() {
return reconstructor.getCachingStrategy();
}
+
+ /**
+ * Return the xmits of this EC reconstruction task.
+ *
+ * DN uses it to coordinate with NN to adjust the speed of scheduling the
+ * EC reconstruction tasks to this DN.
+ *
+ * @return the xmits of this reconstruction task.
+ */
+ int getXmits() {
+ return xmits;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
index a619c34781c..0a3e12546df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructionInfo.java
@@ -103,5 +103,20 @@ public class StripedReconstructionInfo {
String[] getTargetStorageIds() {
return targetStorageIds;
}
+
+ /**
+ * Return the weight of this EC reconstruction task.
+ *
+ * DN uses it to coordinate with NN to adjust the speed of scheduling the
+ * reconstructions tasks to this DN.
+ *
+ * @return the weight of this reconstruction task.
+ * @see HDFS-12044
+ */
+ int getWeight() {
+ // See HDFS-12044. The weight of a RS(n, k) is calculated by the network
+ // connections it opens.
+ return sources.length + targets.length;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
index b8433c7b6c3..3202121b62e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
@@ -133,7 +133,6 @@ abstract class StripedReconstructor {
}
blockGroup = stripedReconInfo.getBlockGroup();
stripedReader = new StripedReader(this, datanode, conf, stripedReconInfo);
-
cachingStrategy = CachingStrategy.newDefaultStrategy();
positionInBlock = 0L;
@@ -233,6 +232,13 @@ abstract class StripedReconstructor {
return blockGroup;
}
+ /**
+ * Get the xmits that _will_ be used for this reconstruction task.
+ */
+ int getXmits() {
+ return stripedReader.getXmits();
+ }
+
BitSet getLiveBitSet() {
return liveBitSet;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 6a61d31e26d..16df7091da7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -536,7 +536,7 @@ class FsDatasetImpl implements FsDatasetSpi {
ReplicaInfo block = it.next();
final StorageLocation blockStorageLocation =
block.getVolume().getStorageLocation();
- LOG.info("checking for block " + block.getBlockId() +
+ LOG.trace("checking for block " + block.getBlockId() +
" with storageLocation " + blockStorageLocation);
if (blockStorageLocation.equals(sdLocation)) {
blocks.add(block);
@@ -991,8 +991,7 @@ class FsDatasetImpl implements FsDatasetSpi {
replicaInfo, smallBufferSize, conf);
// Finalize the copied files
- newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo,
- false);
+ newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
try (AutoCloseableLock lock = datasetLock.acquire()) {
// Increment numBlocks here as this block moved without knowing to BPS
FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
@@ -1295,7 +1294,7 @@ class FsDatasetImpl implements FsDatasetSpi {
replicaInfo.bumpReplicaGS(newGS);
// finalize the replica if RBW
if (replicaInfo.getState() == ReplicaState.RBW) {
- finalizeReplica(b.getBlockPoolId(), replicaInfo, false);
+ finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
return replicaInfo;
}
@@ -1625,23 +1624,39 @@ class FsDatasetImpl implements FsDatasetSpi {
@Override // FsDatasetSpi
public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
throws IOException {
+ ReplicaInfo replicaInfo = null;
+ ReplicaInfo finalizedReplicaInfo = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
if (Thread.interrupted()) {
// Don't allow data modifications from interrupted threads
throw new IOException("Cannot finalize block from Interrupted Thread");
}
- ReplicaInfo replicaInfo = getReplicaInfo(b);
+ replicaInfo = getReplicaInfo(b);
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
// this is legal, when recovery happens on a file that has
// been opened for append but never modified
return;
}
- finalizeReplica(b.getBlockPoolId(), replicaInfo, fsyncDir);
+ finalizedReplicaInfo = finalizeReplica(b.getBlockPoolId(), replicaInfo);
+ }
+ /*
+ * Sync the directory after rename from tmp/rbw to Finalized if
+ * configured. Though rename should be atomic operation, sync on both
+ * dest and src directories are done because IOUtils.fsync() calls
+ * directory's channel sync, not the journal itself.
+ */
+ if (fsyncDir && finalizedReplicaInfo instanceof FinalizedReplica
+ && replicaInfo instanceof LocalReplica) {
+ FinalizedReplica finalizedReplica =
+ (FinalizedReplica) finalizedReplicaInfo;
+ finalizedReplica.fsyncDirectory();
+ LocalReplica localReplica = (LocalReplica) replicaInfo;
+ localReplica.fsyncDirectory();
}
}
- private ReplicaInfo finalizeReplica(String bpid,
- ReplicaInfo replicaInfo, boolean fsyncDir) throws IOException {
+ private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo)
+ throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo newReplicaInfo = null;
if (replicaInfo.getState() == ReplicaState.RUR &&
@@ -1656,19 +1671,6 @@ class FsDatasetImpl implements FsDatasetSpi {
newReplicaInfo = v.addFinalizedBlock(
bpid, replicaInfo, replicaInfo, replicaInfo.getBytesReserved());
- /*
- * Sync the directory after rename from tmp/rbw to Finalized if
- * configured. Though rename should be atomic operation, sync on both
- * dest and src directories are done because IOUtils.fsync() calls
- * directory's channel sync, not the journal itself.
- */
- if (fsyncDir && newReplicaInfo instanceof FinalizedReplica
- && replicaInfo instanceof LocalReplica) {
- FinalizedReplica finalizedReplica = (FinalizedReplica) newReplicaInfo;
- finalizedReplica.fsyncDirectory();
- LocalReplica localReplica = (LocalReplica) replicaInfo;
- localReplica.fsyncDirectory();
- }
if (v.isTransientStorage()) {
releaseLockedMemory(
replicaInfo.getOriginalBytesReserved()
@@ -2634,11 +2636,11 @@ class FsDatasetImpl implements FsDatasetSpi {
newReplicaInfo.setNumBytes(newlength);
volumeMap.add(bpid, newReplicaInfo.getReplicaInfo());
- finalizeReplica(bpid, newReplicaInfo.getReplicaInfo(), false);
+ finalizeReplica(bpid, newReplicaInfo.getReplicaInfo());
}
}
// finalize the block
- return finalizeReplica(bpid, rur, false);
+ return finalizeReplica(bpid, rur);
}
@Override // FsDatasetSpi
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 1a2c889e41d..b653f4fcccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -348,7 +348,7 @@ public class Mover {
private void processRecursively(String parent, HdfsFileStatus status,
Result result) {
String fullPath = status.getFullName(parent);
- if (status.isDir()) {
+ if (status.isDirectory()) {
if (!fullPath.endsWith(Path.SEPARATOR)) {
fullPath = fullPath + Path.SEPARATOR;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index b859148a443..318d8e011a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -471,7 +471,7 @@ public class BackupNode extends NameNode {
* {@link LeaseManager.Monitor} protected by SafeMode.
* {@link BlockManager.RedundancyMonitor} protected by SafeMode.
* {@link HeartbeatManager.Monitor} protected by SafeMode.
- * {@link DecommissionManager.Monitor} need to prohibit refreshNodes().
+ * {@link DatanodeAdminManager.Monitor} need to prohibit refreshNodes().
* {@link PendingReconstructionBlocks.PendingReconstructionMonitor}
* harmless, because RedundancyMonitor is muted.
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 266d45cdc0b..404a0aab04b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -26,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -47,7 +49,6 @@ public final class ErasureCodingPolicyManager {
public static Logger LOG = LoggerFactory.getLogger(
ErasureCodingPolicyManager.class);
- private static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
private int maxCellSize =
DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT;
@@ -92,9 +93,14 @@ public final class ErasureCodingPolicyManager {
public void init(Configuration conf) {
// Populate the list of enabled policies from configuration
- final String[] policyNames = conf.getTrimmedStrings(
- DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
- DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT);
+ final String[] enablePolicyNames = conf.getTrimmedStrings(
+ DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+ DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT);
+ final String defaultPolicyName = conf.getTrimmed(
+ DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
+ DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
+ final String[] policyNames =
+ (String[]) ArrayUtils.add(enablePolicyNames, defaultPolicyName);
this.userPoliciesByID = new TreeMap<>();
this.userPoliciesByName = new TreeMap<>();
this.removedPoliciesByName = new TreeMap<>();
@@ -151,7 +157,13 @@ public final class ErasureCodingPolicyManager {
* Get enabled policy by policy name.
*/
public ErasureCodingPolicy getEnabledPolicyByName(String name) {
- return enabledPoliciesByName.get(name);
+ ErasureCodingPolicy ecPolicy = enabledPoliciesByName.get(name);
+ if (ecPolicy == null) {
+ if (name.equalsIgnoreCase(ErasureCodeConstants.REPLICATION_POLICY_NAME)) {
+ ecPolicy = SystemErasureCodingPolicies.getReplicationPolicy();
+ }
+ }
+ return ecPolicy;
}
/**
@@ -251,7 +263,8 @@ public final class ErasureCodingPolicyManager {
private byte getNextAvailablePolicyID() {
byte currentId = this.userPoliciesByID.keySet().stream()
- .max(Byte::compareTo).orElse(USER_DEFINED_POLICY_START_ID);
+ .max(Byte::compareTo).orElse(
+ ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
return (byte) (currentId + 1);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 486503cbf7f..426b42b4414 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -25,8 +25,8 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
import com.google.common.base.Preconditions;
@@ -62,7 +62,7 @@ final class FSDirErasureCodingOp {
/**
* Check if the ecPolicyName is valid and enabled, return the corresponding
- * EC policy if is.
+ * EC policy if is, including the REPLICATION EC policy.
* @param fsn namespace
* @param ecPolicyName name of EC policy to be checked
* @return an erasure coding policy if ecPolicyName is valid and enabled
@@ -295,7 +295,12 @@ final class FSDirErasureCodingOp {
if (iip.getLastINode() == null) {
throw new FileNotFoundException("Path not found: " + iip.getPath());
}
- return getErasureCodingPolicyForPath(fsd, iip);
+
+ ErasureCodingPolicy ecPolicy = getErasureCodingPolicyForPath(fsd, iip);
+ if (ecPolicy != null && ecPolicy.isReplicationPolicy()) {
+ ecPolicy = null;
+ }
+ return ecPolicy;
}
/**
@@ -312,7 +317,8 @@ final class FSDirErasureCodingOp {
}
/**
- * Get the erasure coding policy. This does not do any permission checking.
+ * Get the erasure coding policy, including the REPLICATION policy. This does
+ * not do any permission checking.
*
* @param fsn namespace
* @param iip inodes in the path containing the file
@@ -344,12 +350,13 @@ final class FSDirErasureCodingOp {
* @param fsn namespace
* @return {@link java.util.HashMap} array
*/
- static HashMap getErasureCodingCodecs(final FSNamesystem fsn)
+ static Map getErasureCodingCodecs(final FSNamesystem fsn)
throws IOException {
assert fsn.hasReadLock();
return CodecRegistry.getInstance().getCodec2CoderCompactMap();
}
+ //return erasure coding policy for path, including REPLICATION policy
private static ErasureCodingPolicy getErasureCodingPolicyForPath(
FSDirectory fsd, INodesInPath iip) throws IOException {
Preconditions.checkNotNull(iip, "INodes cannot be null");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 4c9224908bc..3b3368d8e14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -47,6 +46,7 @@ import org.apache.hadoop.security.AccessControlException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
+import java.util.EnumSet;
import static org.apache.hadoop.util.Time.now;
@@ -384,7 +384,6 @@ class FSDirStatAndListingOp {
* @param child for a directory listing of the iip, else null
* @param storagePolicy for the path or closest ancestor
* @param needLocation if block locations need to be included or not
- * @param includeStoragePolicy if storage policy should be returned
* @return a file status
* @throws java.io.IOException if any error occurs
*/
@@ -439,7 +438,19 @@ class FSDirStatAndListingOp {
int childrenNum = node.isDirectory() ?
node.asDirectory().getChildrenNum(snapshot) : 0;
+ EnumSet flags =
+ EnumSet.noneOf(HdfsFileStatus.Flags.class);
INodeAttributes nodeAttrs = fsd.getAttributes(iip);
+ boolean hasAcl = nodeAttrs.getAclFeature() != null;
+ if (hasAcl) {
+ flags.add(HdfsFileStatus.Flags.HAS_ACL);
+ }
+ if (isEncrypted) {
+ flags.add(HdfsFileStatus.Flags.HAS_CRYPT);
+ }
+ if (isErasureCoded) {
+ flags.add(HdfsFileStatus.Flags.HAS_EC);
+ }
return createFileStatus(
size,
node.isDirectory(),
@@ -447,7 +458,8 @@ class FSDirStatAndListingOp {
blocksize,
node.getModificationTime(snapshot),
node.getAccessTime(snapshot),
- getPermissionForFileStatus(nodeAttrs, isEncrypted, isErasureCoded),
+ nodeAttrs.getFsPermission(),
+ flags,
nodeAttrs.getUserName(),
nodeAttrs.getGroupName(),
node.isSymlink() ? node.asSymlink().getSymlink() : null,
@@ -460,44 +472,24 @@ class FSDirStatAndListingOp {
loc);
}
- private static HdfsFileStatus createFileStatus(long length, boolean isdir,
- int replication, long blocksize, long mtime,
- long atime, FsPermission permission, String owner, String group,
- byte[] symlink, byte[] path, long fileId, int childrenNum,
- FileEncryptionInfo feInfo, byte storagePolicy,
+ private static HdfsFileStatus createFileStatus(
+ long length, boolean isdir,
+ int replication, long blocksize, long mtime, long atime,
+ FsPermission permission, EnumSet flags,
+ String owner, String group, byte[] symlink, byte[] path, long fileId,
+ int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
if (locations == null) {
return new HdfsFileStatus(length, isdir, replication, blocksize,
- mtime, atime, permission, owner, group, symlink, path, fileId,
- childrenNum, feInfo, storagePolicy, ecPolicy);
+ mtime, atime, permission, flags, owner, group, symlink, path,
+ fileId, childrenNum, feInfo, storagePolicy, ecPolicy);
} else {
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
- mtime, atime, permission, owner, group, symlink, path, fileId,
- locations, childrenNum, feInfo, storagePolicy, ecPolicy);
+ mtime, atime, permission, flags, owner, group, symlink, path,
+ fileId, locations, childrenNum, feInfo, storagePolicy, ecPolicy);
}
}
- /**
- * Returns an inode's FsPermission for use in an outbound FileStatus. If the
- * inode has an ACL or is for an encrypted file/dir, then this method will
- * return an FsPermissionExtension.
- *
- * @param node INode to check
- * @param isEncrypted boolean true if the file/dir is encrypted
- * @return FsPermission from inode, with ACL bit on if the inode has an ACL
- * and encrypted bit on if it represents an encrypted file/dir.
- */
- private static FsPermission getPermissionForFileStatus(
- INodeAttributes node, boolean isEncrypted, boolean isErasureCoded) {
- FsPermission perm = node.getFsPermission();
- boolean hasAcl = node.getAclFeature() != null;
- if (hasAcl || isEncrypted || isErasureCoded) {
- perm = new FsPermissionExtension(perm, hasAcl,
- isEncrypted, isErasureCoded);
- }
- return perm;
- }
-
private static ContentSummary getContentSummaryInt(FSDirectory fsd,
INodesInPath iip) throws IOException {
fsd.readLock();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index a62cddd097c..7ab05d78860 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -541,7 +541,7 @@ class FSDirWriteFileOp {
ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(
fsd.getFSNamesystem(), existing);
}
- if (ecPolicy != null) {
+ if (ecPolicy != null && (!ecPolicy.isReplicationPolicy())) {
isStriped = true;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 07dc5c1fa17..87b11562dee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -72,12 +72,13 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.RecursiveAction;
+import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
import java.util.TreeSet;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.RecursiveAction;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES;
@@ -135,11 +136,13 @@ public class FSDirectory implements Closeable {
public final static HdfsFileStatus DOT_RESERVED_STATUS =
new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
- null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
+ EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
+ HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
public final static HdfsFileStatus DOT_SNAPSHOT_DIR_STATUS =
- new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
+ new HdfsFileStatus(0, true, 0, 0, 0, 0, null,
+ EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
@@ -383,12 +386,15 @@ public class FSDirectory implements Closeable {
*/
void createReservedStatuses(long cTime) {
HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
- new FsPermission((short) 0770), null, supergroup, null,
+ new FsPermission((short) 0770),
+ EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
DOT_INODES, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
- new FsPermission((short) 0770), null, supergroup, null, RAW, -1L,
- 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
+ new FsPermission((short) 0770),
+ EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
+ RAW, -1L, 0, null,
+ HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
reservedStatuses = new HdfsFileStatus[] {inodes, raw};
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9872cd7720e..2313335d6ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -92,6 +92,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
import org.apache.hadoop.hdfs.protocol.BlocksStats;
import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksMBean;
import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
import static org.apache.hadoop.util.Time.now;
import static org.apache.hadoop.util.Time.monotonicNow;
@@ -176,6 +177,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException;
@@ -243,10 +245,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
-import org.apache.hadoop.hdfs.server.namenode.metrics.ECBlockGroupsStatsMBean;
+import org.apache.hadoop.hdfs.server.namenode.metrics.ECBlockGroupsMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksStatsMBean;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
@@ -340,7 +341,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.Private
@Metrics(context="dfs")
public class FSNamesystem implements Namesystem, FSNamesystemMBean,
- NameNodeMXBean, ReplicatedBlocksStatsMBean, ECBlockGroupsStatsMBean {
+ NameNodeMXBean, ReplicatedBlocksMBean, ECBlockGroupsMBean {
public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
private final MetricsRegistry registry = new MetricsRegistry("FSNamesystem");
@Metric final MutableRatesWithAggregation detailedLockHoldTimeMetrics =
@@ -371,9 +372,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
FileStatus status = null;
if (stat != null) {
- Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
+ Path symlink = stat.isSymlink()
+ ? new Path(DFSUtilClient.bytes2String(stat.getSymlinkInBytes()))
+ : null;
Path path = new Path(src);
- status = new FileStatus(stat.getLen(), stat.isDir(),
+ status = new FileStatus(stat.getLen(), stat.isDirectory(),
stat.getReplication(), stat.getBlockSize(),
stat.getModificationTime(),
stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
@@ -4076,10 +4079,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @see ClientProtocol#getBlocksStats()
*/
BlocksStats getBlocksStats() {
- return new BlocksStats(getLowRedundancyBlocksStat(),
- getCorruptBlocksStat(), getMissingBlocksStat(),
- getMissingReplicationOneBlocksStat(), getBlocksBytesInFutureStat(),
- getPendingDeletionBlocksStat());
+ return new BlocksStats(getLowRedundancyReplicatedBlocks(),
+ getCorruptReplicatedBlocks(), getMissingReplicatedBlocks(),
+ getMissingReplicationOneBlocks(), getBytesInFutureReplicatedBlocks(),
+ getPendingDeletionReplicatedBlocks());
}
/**
@@ -4089,9 +4092,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @see ClientProtocol#getECBlockGroupsStats()
*/
ECBlockGroupsStats getECBlockGroupsStats() {
- return new ECBlockGroupsStats(getLowRedundancyECBlockGroupsStat(),
- getCorruptECBlockGroupsStat(), getMissingECBlockGroupsStat(),
- getECBlocksBytesInFutureStat(), getPendingDeletionECBlockGroupsStat());
+ return new ECBlockGroupsStats(getLowRedundancyECBlockGroups(),
+ getCorruptECBlockGroups(), getMissingECBlockGroups(),
+ getBytesInFutureECBlockGroups(), getPendingDeletionECBlockGroups());
}
@Override // FSNamesystemMBean
@@ -4638,76 +4641,76 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
@Override // ReplicatedBlocksMBean
@Metric({"LowRedundancyReplicatedBlocks",
"Number of low redundancy replicated blocks"})
- public long getLowRedundancyBlocksStat() {
- return blockManager.getLowRedundancyBlocksStat();
+ public long getLowRedundancyReplicatedBlocks() {
+ return blockManager.getLowRedundancyBlocks();
}
@Override // ReplicatedBlocksMBean
@Metric({"CorruptReplicatedBlocks", "Number of corrupted replicated blocks"})
- public long getCorruptBlocksStat() {
- return blockManager.getCorruptBlocksStat();
+ public long getCorruptReplicatedBlocks() {
+ return blockManager.getCorruptBlocks();
}
@Override // ReplicatedBlocksMBean
@Metric({"MissingReplicatedBlocks", "Number of missing replicated blocks"})
- public long getMissingBlocksStat() {
- return blockManager.getMissingBlocksStat();
+ public long getMissingReplicatedBlocks() {
+ return blockManager.getMissingBlocks();
}
@Override // ReplicatedBlocksMBean
- @Metric({"MissingReplicatedOneBlocks", "Number of missing replicated blocks" +
- " with replication factor 1"})
- public long getMissingReplicationOneBlocksStat() {
- return blockManager.getMissingReplicationOneBlocksStat();
+ @Metric({"MissingReplicationOneBlocks", "Number of missing replicated " +
+ "blocks with replication factor 1"})
+ public long getMissingReplicationOneBlocks() {
+ return blockManager.getMissingReplicationOneBlocks();
}
@Override // ReplicatedBlocksMBean
- @Metric({"BytesReplicatedFutureBlocks", "Total bytes in replicated blocks " +
- "with future generation stamp"})
- public long getBlocksBytesInFutureStat() {
- return blockManager.getBytesInFutureReplicatedBlocksStat();
+ @Metric({"BytesInFutureReplicatedBlocks", "Total bytes in replicated " +
+ "blocks with future generation stamp"})
+ public long getBytesInFutureReplicatedBlocks() {
+ return blockManager.getBytesInFutureReplicatedBlocks();
}
@Override // ReplicatedBlocksMBean
@Metric({"PendingDeletionReplicatedBlocks", "Number of replicated blocks " +
"that are pending deletion"})
- public long getPendingDeletionBlocksStat() {
- return blockManager.getPendingDeletionBlocksStat();
+ public long getPendingDeletionReplicatedBlocks() {
+ return blockManager.getPendingDeletionReplicatedBlocks();
}
- @Override // ECBlockGroupsStatsMBean
+ @Override // ECBlockGroupsMBean
@Metric({"LowRedundancyECBlockGroups", "Number of erasure coded block " +
"groups with low redundancy"})
- public long getLowRedundancyECBlockGroupsStat() {
- return blockManager.getLowRedundancyECBlockGroupsStat();
+ public long getLowRedundancyECBlockGroups() {
+ return blockManager.getLowRedundancyECBlockGroups();
}
- @Override // ECBlockGroupsStatsMBean
+ @Override // ECBlockGroupsMBean
@Metric({"CorruptECBlockGroups", "Number of erasure coded block groups that" +
" are corrupt"})
- public long getCorruptECBlockGroupsStat() {
- return blockManager.getCorruptECBlockGroupsStat();
+ public long getCorruptECBlockGroups() {
+ return blockManager.getCorruptECBlockGroups();
}
- @Override // ECBlockGroupsStatsMBean
+ @Override // ECBlockGroupsMBean
@Metric({"MissingECBlockGroups", "Number of erasure coded block groups that" +
" are missing"})
- public long getMissingECBlockGroupsStat() {
- return blockManager.getMissingECBlockGroupsStat();
+ public long getMissingECBlockGroups() {
+ return blockManager.getMissingECBlockGroups();
}
- @Override // ECBlockGroupsStatsMBean
- @Metric({"BytesFutureECBlockGroups", "Total bytes in erasure coded block " +
+ @Override // ECBlockGroupsMBean
+ @Metric({"BytesInFutureECBlockGroups", "Total bytes in erasure coded block " +
"groups with future generation stamp"})
- public long getECBlocksBytesInFutureStat() {
- return blockManager.getBytesInFutureStripedBlocksStat();
+ public long getBytesInFutureECBlockGroups() {
+ return blockManager.getBytesInFutureECBlockGroups();
}
- @Override // ECBlockGroupsStatsMBean
+ @Override // ECBlockGroupsMBean
@Metric({"PendingDeletionECBlockGroups", "Number of erasure coded block " +
"groups that are pending deletion"})
- public long getPendingDeletionECBlockGroupsStat() {
- return blockManager.getPendingDeletionECBlockGroupsStat();
+ public long getPendingDeletionECBlockGroups() {
+ return blockManager.getPendingDeletionECBlockGroups();
}
@Override
@@ -4774,9 +4777,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* Register following MBeans with their respective names.
* FSNamesystemMBean:
* "hadoop:service=NameNode,name=FSNamesystemState"
- * ReplicatedBlocksStatsMBean:
+ * ReplicatedBlocksMBean:
* "hadoop:service=NameNode,name=ReplicatedBlocksState"
- * ECBlockGroupsStatsMBean:
+ * ECBlockGroupsMBean:
* "hadoop:service=NameNode,name=ECBlockGroupsState"
*/
private void registerMBean() {
@@ -4785,9 +4788,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
StandardMBean namesystemBean = new StandardMBean(
this, FSNamesystemMBean.class);
StandardMBean replicaBean = new StandardMBean(
- this, ReplicatedBlocksStatsMBean.class);
+ this, ReplicatedBlocksMBean.class);
StandardMBean ecBean = new StandardMBean(
- this, ECBlockGroupsStatsMBean.class);
+ this, ECBlockGroupsMBean.class);
namesystemMBeanName = MBeans.register(
"NameNode", "FSNamesystemState", namesystemBean);
replicatedBlocksMBeanName = MBeans.register(
@@ -4840,16 +4843,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
@Override // FSNamesystemMBean
+ @Metric({"NumLiveDataNodes", "Number of datanodes which are currently live"})
public int getNumLiveDataNodes() {
return getBlockManager().getDatanodeManager().getNumLiveDataNodes();
}
@Override // FSNamesystemMBean
+ @Metric({"NumDeadDataNodes", "Number of datanodes which are currently dead"})
public int getNumDeadDataNodes() {
return getBlockManager().getDatanodeManager().getNumDeadDataNodes();
}
@Override // FSNamesystemMBean
+ @Metric({"NumDecomLiveDataNodes",
+ "Number of datanodes which have been decommissioned and are now live"})
public int getNumDecomLiveDataNodes() {
final List live = new ArrayList();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
@@ -4861,6 +4868,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
@Override // FSNamesystemMBean
+ @Metric({"NumDecomDeadDataNodes",
+ "Number of datanodes which have been decommissioned and are now dead"})
public int getNumDecomDeadDataNodes() {
final List dead = new ArrayList();
getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, false);
@@ -4872,6 +4881,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
@Override // FSNamesystemMBean
+ @Metric({"VolumeFailuresTotal",
+ "Total number of volume failures across all Datanodes"})
public int getVolumeFailuresTotal() {
List live = new ArrayList();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
@@ -4883,6 +4894,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
@Override // FSNamesystemMBean
+ @Metric({"EstimatedCapacityLostTotal",
+ "An estimate of the total capacity lost due to volume failures"})
public long getEstimatedCapacityLostTotal() {
List live = new ArrayList();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
@@ -4898,6 +4911,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
@Override // FSNamesystemMBean
+ @Metric({"NumDecommissioningDataNodes",
+ "Number of datanodes in decommissioning state"})
public int getNumDecommissioningDataNodes() {
return getBlockManager().getDatanodeManager().getDecommissioningNodes()
.size();
@@ -4915,6 +4930,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* before NN receives the first Heartbeat followed by the first Blockreport.
*/
@Override // FSNamesystemMBean
+ @Metric({"NumStaleStorages",
+ "Number of storages marked as content stale"})
public int getNumStaleStorages() {
return getBlockManager().getDatanodeManager().getNumStaleStorages();
}
@@ -4971,7 +4988,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return blockId;
}
- private boolean isFileDeleted(INodeFile file) {
+ boolean isFileDeleted(INodeFile file) {
// Not in the inodeMap or in the snapshot but marked deleted.
if (dir.getInode(file.getId()) == null) {
return true;
@@ -7038,18 +7055,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
resultingStat = FSDirErasureCodingOp.setErasureCodingPolicy(this,
srcArg, ecPolicyName, pc, logRetryCache);
success = true;
- } catch (AccessControlException ace) {
- logAuditEvent(success, operationName, srcArg, null,
- resultingStat);
- throw ace;
} finally {
writeUnlock(operationName);
if (success) {
getEditLog().logSync();
}
+ logAuditEvent(success, operationName, srcArg, null, resultingStat);
}
- logAuditEvent(success, operationName, srcArg, null,
- resultingStat);
}
/**
@@ -7057,9 +7069,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* @param policies The policies to add.
* @return The according result of add operation.
*/
- AddECPolicyResponse[] addECPolicies(ErasureCodingPolicy[] policies)
+ AddECPolicyResponse[] addErasureCodingPolicies(ErasureCodingPolicy[] policies)
throws IOException {
- final String operationName = "addECPolicies";
+ final String operationName = "addErasureCodingPolicies";
String addECPolicyName = "";
checkOperation(OperationCategory.WRITE);
List responses = new ArrayList<>();
@@ -7069,6 +7081,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkOperation(OperationCategory.WRITE);
for (ErasureCodingPolicy policy : policies) {
try {
+ checkOperation(OperationCategory.WRITE);
+ checkNameNodeSafeMode("Cannot add erasure coding policy");
ErasureCodingPolicy newPolicy =
FSDirErasureCodingOp.addErasureCodePolicy(this, policy);
addECPolicyName = newPolicy.getName();
@@ -7099,6 +7113,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
boolean success = false;
writeLock();
try {
+ checkOperation(OperationCategory.WRITE);
+ checkNameNodeSafeMode("Cannot remove erasure coding policy "
+ + ecPolicyName);
FSDirErasureCodingOp.removeErasureCodePolicy(this, ecPolicyName);
success = true;
} finally {
@@ -7184,18 +7201,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
resultingStat = FSDirErasureCodingOp.unsetErasureCodingPolicy(this,
srcArg, pc, logRetryCache);
success = true;
- } catch (AccessControlException ace) {
- logAuditEvent(success, operationName, srcArg, null,
- resultingStat);
- throw ace;
} finally {
writeUnlock(operationName);
if (success) {
getEditLog().logSync();
}
+ logAuditEvent(success, operationName, srcArg, null, resultingStat);
}
- logAuditEvent(success, operationName, srcArg, null,
- resultingStat);
}
/**
@@ -7203,14 +7215,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
*/
ErasureCodingPolicy getErasureCodingPolicy(String src)
throws AccessControlException, UnresolvedLinkException, IOException {
+ final String operationName = "getErasureCodingPolicy";
+ boolean success = false;
checkOperation(OperationCategory.READ);
FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
- return FSDirErasureCodingOp.getErasureCodingPolicy(this, src, pc);
+ final ErasureCodingPolicy ret =
+ FSDirErasureCodingOp.getErasureCodingPolicy(this, src, pc);
+ success = true;
+ return ret;
} finally {
- readUnlock("getErasureCodingPolicy");
+ readUnlock(operationName);
+ logAuditEvent(success, operationName, null);
}
}
@@ -7218,27 +7236,39 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* Get available erasure coding polices
*/
ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
+ final String operationName = "getErasureCodingPolicies";
+ boolean success = false;
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
- return FSDirErasureCodingOp.getErasureCodingPolicies(this);
+ final ErasureCodingPolicy[] ret =
+ FSDirErasureCodingOp.getErasureCodingPolicies(this);
+ success = true;
+ return ret;
} finally {
- readUnlock("getErasureCodingPolicies");
+ readUnlock(operationName);
+ logAuditEvent(success, operationName, null);
}
}
/**
* Get available erasure coding codecs and corresponding coders.
*/
- HashMap getErasureCodingCodecs() throws IOException {
+ Map getErasureCodingCodecs() throws IOException {
+ final String operationName = "getErasureCodingCodecs";
+ boolean success = false;
checkOperation(OperationCategory.READ);
readLock();
try {
checkOperation(OperationCategory.READ);
- return FSDirErasureCodingOp.getErasureCodingCodecs(this);
+ final Map ret =
+ FSDirErasureCodingOp.getErasureCodingCodecs(this);
+ success = true;
+ return ret;
} finally {
- readUnlock("getErasureCodingCodecs");
+ readUnlock(operationName);
+ logAuditEvent(success, operationName, null);
}
}
@@ -7539,6 +7569,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
@Override // FSNamesystemMBean
+ @Metric({"NumInMaintenanceLiveDataNodes",
+ "Number of live Datanodes which are in maintenance state"})
public int getNumInMaintenanceLiveDataNodes() {
final List live = new ArrayList();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
@@ -7550,6 +7582,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
@Override // FSNamesystemMBean
+ @Metric({"NumInMaintenanceDeadDataNodes",
+ "Number of dead Datanodes which are in maintenance state"})
public int getNumInMaintenanceDeadDataNodes() {
final List dead = new ArrayList();
getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true);
@@ -7561,6 +7595,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
@Override // FSNamesystemMBean
+ @Metric({"NumEnteringMaintenanceDataNodes",
+ "Number of Datanodes that are entering the maintenance state"})
public int getNumEnteringMaintenanceDataNodes() {
return getBlockManager().getDatanodeManager().getEnteringMaintenanceNodes()
.size();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 38cdbb30ef8..35ec063a1b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -26,10 +26,11 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
-import java.util.PriorityQueue;
+import java.util.NavigableSet;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
+import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -87,11 +88,15 @@ public class LeaseManager {
// Mapping: leaseHolder -> Lease
private final SortedMap leases = new TreeMap<>();
// Set of: Lease
- private final PriorityQueue sortedLeases = new PriorityQueue<>(512,
+ private final NavigableSet sortedLeases = new TreeSet<>(
new Comparator() {
@Override
public int compare(Lease o1, Lease o2) {
- return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
+ if (o1.getLastUpdate() != o2.getLastUpdate()) {
+ return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
+ } else {
+ return o1.holder.compareTo(o2.holder);
+ }
}
});
// INodeID -> Lease
@@ -162,18 +167,25 @@ public class LeaseManager {
*
* @return Set
*/
- public Set getINodeWithLeases() {
+ @VisibleForTesting
+ Set getINodeWithLeases() throws IOException {
return getINodeWithLeases(null);
}
private synchronized INode[] getINodesWithLease() {
- int inodeCount = 0;
- INode[] inodes = new INode[leasesById.size()];
+ List inodes = new ArrayList<>(leasesById.size());
+ INode currentINode;
for (long inodeId : leasesById.keySet()) {
- inodes[inodeCount] = fsnamesystem.getFSDirectory().getInode(inodeId);
- inodeCount++;
+ currentINode = fsnamesystem.getFSDirectory().getInode(inodeId);
+ // A file with an active lease could get deleted, or its
+ // parent directories could get recursively deleted.
+ if (currentINode != null &&
+ currentINode.isFile() &&
+ !fsnamesystem.isFileDeleted(currentINode.asFile())) {
+ inodes.add(currentINode);
+ }
}
- return inodes;
+ return inodes.toArray(new INode[0]);
}
/**
@@ -186,7 +198,7 @@ public class LeaseManager {
* @return Set
*/
public Set getINodeWithLeases(final INodeDirectory
- ancestorDir) {
+ ancestorDir) throws IOException {
assert fsnamesystem.hasReadLock();
final long startTimeMs = Time.monotonicNow();
Set iipSet = new HashSet<>();
@@ -233,7 +245,7 @@ public class LeaseManager {
try {
iipSet.addAll(f.get());
} catch (Exception e) {
- LOG.warn("INode filter task encountered exception: ", e);
+ throw new IOException("Failed to get files with active leases", e);
}
}
final long endTimeMs = Time.monotonicNow();
@@ -521,9 +533,10 @@ public class LeaseManager {
long start = monotonicNow();
- while(!sortedLeases.isEmpty() && sortedLeases.peek().expiredHardLimit()
- && !isMaxLockHoldToReleaseLease(start)) {
- Lease leaseToCheck = sortedLeases.peek();
+ while(!sortedLeases.isEmpty() &&
+ sortedLeases.first().expiredHardLimit()
+ && !isMaxLockHoldToReleaseLease(start)) {
+ Lease leaseToCheck = sortedLeases.first();
LOG.info(leaseToCheck + " has expired hard limit");
final List removing = new ArrayList<>();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 39d93dff96b..78712020066 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -37,9 +37,9 @@ import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
@@ -80,6 +80,7 @@ import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.inotify.EventBatchList;
@@ -250,13 +251,15 @@ public class NameNodeRpcServer implements NamenodeProtocols {
private final String minimumDataNodeVersion;
+ private final String defaultECPolicyName;
+
public NameNodeRpcServer(Configuration conf, NameNode nn)
throws IOException {
this.nn = nn;
this.namesystem = nn.getNamesystem();
this.retryCache = namesystem.getRetryCache();
this.metrics = NameNode.getNameNodeMetrics();
-
+
int handlerCount =
conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
@@ -489,6 +492,10 @@ public class NameNodeRpcServer implements NamenodeProtocols {
DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY,
DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT);
+ defaultECPolicyName = conf.get(
+ DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY,
+ DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
+
// Set terse exception whose stack trace won't be logged
clientRpcServer.addTerseExceptions(SafeModeException.class,
FileNotFoundException.class,
@@ -1430,7 +1437,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
} else if (!stat.isSymlink()) {
throw new IOException("Path " + path + " is not a symbolic link");
}
- return stat.getSymlink();
+ return DFSUtilClient.bytes2String(stat.getSymlinkInBytes());
}
@@ -2054,6 +2061,12 @@ public class NameNodeRpcServer implements NamenodeProtocols {
}
boolean success = false;
try {
+ if (ecPolicyName == null) {
+ ecPolicyName = defaultECPolicyName;
+ LOG.trace("No policy name is specified, " +
+ "set the default policy name instead");
+ }
+ LOG.trace("Set erasure coding policy " + ecPolicyName + " on " + src);
namesystem.setErasureCodingPolicy(src, ecPolicyName, cacheEntry != null);
success = true;
} finally {
@@ -2265,7 +2278,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
}
@Override // ClientProtocol
- public HashMap getErasureCodingCodecs() throws IOException {
+ public Map getErasureCodingCodecs() throws IOException {
checkNNStartup();
return namesystem.getErasureCodingCodecs();
}
@@ -2297,13 +2310,14 @@ public class NameNodeRpcServer implements NamenodeProtocols {
ErasureCodingPolicy[] policies) throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
- return namesystem.addECPolicies(policies);
+ return namesystem.addErasureCodingPolicies(policies);
}
@Override
public void removeErasureCodingPolicy(String ecPolicyName)
throws IOException {
checkNNStartup();
+ namesystem.checkSuperuserPrivilege();
namesystem.removeErasureCodingPolicy(ecPolicyName);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index eddab3fa22a..5872955fc52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -471,7 +471,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
void check(String parent, HdfsFileStatus file, Result replRes, Result ecRes)
throws IOException {
String path = file.getFullName(parent);
- if (file.isDir()) {
+ if (file.isDirectory()) {
checkDir(path, replRes, ecRes);
return;
}
@@ -1115,7 +1115,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
if (lfStatus == null) { // not exists
lfInitedOk = dfs.mkdirs(lfName, null, true);
lostFound = lfName;
- } else if (!lfStatus.isDir()) { // exists but not a directory
+ } else if (!lfStatus.isDirectory()) { // exists but not a directory
LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
lfInitedOk = false;
} else { // exists and is a directory
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java
similarity index 87%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java
index f9fd416f8e7..5fa646a6c44 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsStatsMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java
@@ -31,29 +31,29 @@ import org.apache.hadoop.classification.InterfaceAudience;
*
*/
@InterfaceAudience.Private
-public interface ECBlockGroupsStatsMBean {
+public interface ECBlockGroupsMBean {
/**
* Return count of erasure coded block groups with low redundancy.
*/
- long getLowRedundancyECBlockGroupsStat();
+ long getLowRedundancyECBlockGroups();
/**
* Return count of erasure coded block groups that are corrupt.
*/
- long getCorruptECBlockGroupsStat();
+ long getCorruptECBlockGroups();
/**
* Return count of erasure coded block groups that are missing.
*/
- long getMissingECBlockGroupsStat();
+ long getMissingECBlockGroups();
/**
* Return total bytes of erasure coded future block groups.
*/
- long getECBlocksBytesInFutureStat();
+ long getBytesInFutureECBlockGroups();
/**
* Return count of erasure coded block groups that are pending deletion.
*/
- long getPendingDeletionECBlockGroupsStat();
+ long getPendingDeletionECBlockGroups();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java
similarity index 85%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java
index 4643b803299..e2c924e56f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksStatsMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ReplicatedBlocksMBean.java
@@ -30,34 +30,34 @@ import org.apache.hadoop.classification.InterfaceAudience;
* @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
*/
@InterfaceAudience.Private
-public interface ReplicatedBlocksStatsMBean {
+public interface ReplicatedBlocksMBean {
/**
* Return low redundancy blocks count.
*/
- long getLowRedundancyBlocksStat();
+ long getLowRedundancyReplicatedBlocks();
/**
* Return corrupt blocks count.
*/
- long getCorruptBlocksStat();
+ long getCorruptReplicatedBlocks();
/**
* Return missing blocks count.
*/
- long getMissingBlocksStat();
+ long getMissingReplicatedBlocks();
/**
* Return count of missing blocks with replication factor one.
*/
- long getMissingReplicationOneBlocksStat();
+ long getMissingReplicationOneBlocks();
/**
* Return total bytes of future blocks.
*/
- long getBlocksBytesInFutureStat();
+ long getBytesInFutureReplicatedBlocks();
/**
* Return count of blocks that are pending deletion.
*/
- long getPendingDeletionBlocksStat();
+ long getPendingDeletionReplicatedBlocks();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 0ab928d04dc..23dcbe8c9a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -195,11 +195,17 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
s.getRoot().setModificationTime(now, Snapshot.CURRENT_STATE_ID);
if (captureOpenFiles) {
- Set openFilesIIP =
- leaseManager.getINodeWithLeases(snapshotRoot);
- for (INodesInPath openFileIIP : openFilesIIP) {
- INodeFile openFile = openFileIIP.getLastINode().asFile();
- openFile.recordModification(openFileIIP.getLatestSnapshotId());
+ try {
+ Set openFilesIIP =
+ leaseManager.getINodeWithLeases(snapshotRoot);
+ for (INodesInPath openFileIIP : openFilesIIP) {
+ INodeFile openFile = openFileIIP.getLastINode().asFile();
+ openFile.recordModification(openFileIIP.getLatestSnapshotId());
+ }
+ } catch (Exception e) {
+ throw new SnapshotException("Failed to add snapshot: Unable to " +
+ "capture all open files under the snapshot dir " +
+ snapshotRoot.getFullPathName() + " for snapshot '" + name + "'", e);
}
}
return s;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index ffc203f9d93..4b479e04d8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -25,6 +25,7 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -35,6 +36,7 @@ import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
@@ -345,8 +347,9 @@ public class SnapshotManager implements SnapshotStatsMXBean {
if (userName == null || userName.equals(dir.getUserName())) {
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
dir.getModificationTime(), dir.getAccessTime(),
- dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
- dir.getLocalNameBytes(), dir.getId(),
+ dir.getFsPermission(), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+ dir.getUserName(), dir.getGroupName(),
+ dir.getLocalNameBytes(), dir.getId(),
dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
dir.getDirectorySnapshottableFeature().getNumSnapshots(),
dir.getDirectorySnapshottableFeature().getSnapshotQuota(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 5006b5a20e0..55d85ff8d8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.util.ECPolicyLoader;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
@@ -33,7 +34,6 @@ import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
-import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -310,7 +310,8 @@ public class ECAdmin extends Configured implements Tool {
@Override
public String getShortUsage() {
- return "[" + getName() + " -path -policy ]\n";
+ return "[" + getName() +
+ " -path [-policy ] [-replicate]]\n";
}
@Override
@@ -319,9 +320,13 @@ public class ECAdmin extends Configured implements Tool {
listing.addRow("", "The path of the file/directory to set " +
"the erasure coding policy");
listing.addRow("", "The name of the erasure coding policy");
+ listing.addRow("-replicate",
+ "force 3x replication scheme on the directory");
return getShortUsage() + "\n" +
"Set the erasure coding policy for a file/directory.\n\n" +
- listing.toString();
+ listing.toString() + "\n" +
+ "-replicate and -policy are optional arguments. They cannot been " +
+ "used at the same time";
}
@Override
@@ -333,28 +338,38 @@ public class ECAdmin extends Configured implements Tool {
return 1;
}
- final String ecPolicyName = StringUtils.popOptionWithArgument("-policy",
+ String ecPolicyName = StringUtils.popOptionWithArgument("-policy",
args);
- if (ecPolicyName == null) {
- System.err.println("Please specify the policy name.\nUsage: " +
- getLongUsage());
- return 1;
- }
+ final boolean replicate = StringUtils.popOption("-replicate", args);
if (args.size() > 0) {
System.err.println(getName() + ": Too many arguments");
return 1;
}
+ if (replicate) {
+ if (ecPolicyName != null) {
+ System.err.println(getName() +
+ ": -replicate and -policy cannot been used at the same time");
+ return 2;
+ }
+ ecPolicyName = ErasureCodeConstants.REPLICATION_POLICY_NAME;
+ }
+
final Path p = new Path(path);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
try {
dfs.setErasureCodingPolicy(p, ecPolicyName);
- System.out.println("Set erasure coding policy " + ecPolicyName +
- " on " + path);
+ if (ecPolicyName == null){
+ System.out.println("Set default erasure coding policy" +
+ " on " + path);
+ } else {
+ System.out.println("Set erasure coding policy " + ecPolicyName +
+ " on " + path);
+ }
} catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e));
- return 2;
+ return 3;
}
return 0;
}
@@ -441,7 +456,7 @@ public class ECAdmin extends Configured implements Tool {
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
- HashMap codecs =
+ Map codecs =
dfs.getAllErasureCodingCodecs();
if (codecs.isEmpty()) {
System.out.println("No erasure coding codecs are supported on the " +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
index 7a39ba6072b..ddf7933f032 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
@@ -20,17 +20,21 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.IOException;
import java.io.OutputStream;
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.TransformerConfigurationException;
+import javax.xml.transform.sax.SAXTransformerFactory;
+import javax.xml.transform.sax.TransformerHandler;
+import javax.xml.transform.stream.StreamResult;
+import org.xml.sax.ContentHandler;
+import org.xml.sax.SAXException;
+import org.xml.sax.helpers.AttributesImpl;
+
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.xml.sax.ContentHandler;
-import org.xml.sax.SAXException;
-import org.xml.sax.helpers.AttributesImpl;
-import org.apache.xml.serialize.OutputFormat;
-import org.apache.xml.serialize.XMLSerializer;
/**
* An XmlEditsVisitor walks over an EditLog structure and writes out
@@ -41,26 +45,37 @@ import org.apache.xml.serialize.XMLSerializer;
public class XmlEditsVisitor implements OfflineEditsVisitor {
private final OutputStream out;
private ContentHandler contentHandler;
+ private final SAXTransformerFactory factory;
+ private final static String XML_INDENTATION_PROP ="{http://xml.apache.org/" +
+ "xslt}indent-amount";
+ private final static String XML_INDENTATION_NUM ="2";
/**
* Create a processor that writes to the file named and may or may not
* also output to the screen, as specified.
*
- * @param filename Name of file to write output to
- * @param printToScreen Mirror output to screen?
+ * @param out output stream to write
+ * @throws IOException on any error
*/
public XmlEditsVisitor(OutputStream out)
throws IOException {
this.out = out;
- OutputFormat outFormat = new OutputFormat("XML", "UTF-8", true);
- outFormat.setIndenting(true);
- outFormat.setIndent(2);
- outFormat.setDoctype(null, null);
- XMLSerializer serializer = new XMLSerializer(out, outFormat);
- contentHandler = serializer.asContentHandler();
+ factory =(SAXTransformerFactory)SAXTransformerFactory.newInstance();
try {
+ TransformerHandler handler = factory.newTransformerHandler();
+ handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "xml");
+ handler.getTransformer().setOutputProperty(OutputKeys.ENCODING, "UTF-8");
+ handler.getTransformer().setOutputProperty(OutputKeys.INDENT, "yes");
+ handler.getTransformer().setOutputProperty(XML_INDENTATION_PROP,
+ XML_INDENTATION_NUM);
+ handler.getTransformer().setOutputProperty(OutputKeys.STANDALONE, "yes");
+ handler.setResult(new StreamResult(out));
+ contentHandler = handler;
+
contentHandler.startDocument();
contentHandler.startElement("", "", "EDITS", new AttributesImpl());
+ } catch (TransformerConfigurationException e) {
+ throw new IOException("SAXTransformer error: " + e.getMessage());
} catch (SAXException e) {
throw new IOException("SAX error: " + e.getMessage());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index a8861a8ccb8..94752f53576 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -17,10 +17,18 @@
*/
package org.apache.hadoop.hdfs.web;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.ipc.RemoteException;
@@ -110,21 +118,20 @@ public class JsonUtil {
m.put("pathSuffix", status.getLocalName());
m.put("type", WebHdfsConstants.PathType.valueOf(status));
if (status.isSymlink()) {
- m.put("symlink", status.getSymlink());
+ m.put("symlink", DFSUtilClient.bytes2String(status.getSymlinkInBytes()));
}
-
m.put("length", status.getLen());
m.put("owner", status.getOwner());
m.put("group", status.getGroup());
FsPermission perm = status.getPermission();
m.put("permission", toString(perm));
- if (perm.getAclBit()) {
+ if (status.hasAcl()) {
m.put("aclBit", true);
}
- if (perm.getEncryptedBit()) {
+ if (status.isEncrypted()) {
m.put("encBit", true);
}
- if (perm.getErasureCodedBit()) {
+ if (status.isErasureCoded()) {
m.put("ecBit", true);
}
m.put("accessTime", status.getAccessTime());
@@ -373,15 +380,6 @@ public class JsonUtil {
FsPermission perm = status.getPermission();
if (perm != null) {
m.put("permission", toString(perm));
- if (perm.getAclBit()) {
- m.put("aclBit", true);
- }
- if (perm.getEncryptedBit()) {
- m.put("encBit", true);
- }
- if (perm.getErasureCodedBit()) {
- m.put("ecBit", true);
- }
}
final Map> finalMap =
new TreeMap>();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 4caee9e76f8..03becc96ea0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -459,7 +459,7 @@
dfs.namenode.posix.acl.inheritance.enabled
- false
+ true
Set to true to enable POSIX style ACL inheritance. When it is enabled
and the create request comes from a compatible client, the NameNode
@@ -960,17 +960,17 @@
dfs.namenode.decommission.interval30s
- Namenode periodicity in seconds to check if decommission is
- complete. Support multiple time unit suffix(case insensitive), as described
- in dfs.heartbeat.interval.
+ Namenode periodicity in seconds to check if
+ decommission or maintenance is complete. Support multiple time unit
+ suffix(case insensitive), as described in dfs.heartbeat.interval.
dfs.namenode.decommission.blocks.per.interval500000
- The approximate number of blocks to process per
- decommission interval, as defined in dfs.namenode.decommission.interval.
+ The approximate number of blocks to process per decommission
+ or maintenance interval, as defined in dfs.namenode.decommission.interval.
@@ -978,11 +978,12 @@
dfs.namenode.decommission.max.concurrent.tracked.nodes100
- The maximum number of decommission-in-progress datanodes nodes that will be
- tracked at one time by the namenode. Tracking a decommission-in-progress
- datanode consumes additional NN memory proportional to the number of blocks
- on the datnode. Having a conservative limit reduces the potential impact
- of decomissioning a large number of nodes at once.
+ The maximum number of decommission-in-progress or
+ entering-maintenance datanodes nodes that will be tracked at one time by
+ the namenode. Tracking these datanode consumes additional NN memory
+ proportional to the number of blocks on the datnode. Having a conservative
+ limit reduces the potential impact of decommissioning or maintenance of
+ a large number of nodes at once.
A value of 0 means no limit will be enforced.
@@ -2544,13 +2545,14 @@
dfs.client.socket.send.buffer.size
- 131072
+ 0
Socket send buffer size for a write pipeline in DFSClient side.
This may affect TCP connection throughput.
If it is set to zero or negative value,
no buffer size will be set explicitly,
thus enable tcp auto-tuning on some system.
+ The default value is 0.
@@ -2973,6 +2975,14 @@
+
+ dfs.namenode.ec.system.default.policy
+ RS-6-3-64k
+ The default erasure coding policy name will be used
+ on the path if no policy name is passed.
+
+
+
dfs.namenode.ec.policies.max.cellsize4194304
@@ -3024,23 +3034,25 @@
dfs.datanode.transfer.socket.send.buffer.size
- 131072
+ 0
Socket send buffer size for DataXceiver (mirroring packets to downstream
in pipeline). This may affect TCP connection throughput.
If it is set to zero or negative value, no buffer size will be set
explicitly, thus enable tcp auto-tuning on some system.
+ The default value is 0.
dfs.datanode.transfer.socket.recv.buffer.size
- 131072
+ 0
Socket receive buffer size for DataXceiver (receiving packets from client
during block writing). This may affect TCP connection throughput.
If it is set to zero or negative value, no buffer size will be set
explicitly, thus enable tcp auto-tuning on some system.
+ The default value is 0.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 3e276a9a9f6..dae35196cfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -370,6 +370,12 @@
var b = function() { browse_directory($('#directory').val()); };
$('#btn-nav-directory').click(b);
+ //Also navigate to the directory when a user presses enter.
+ $('#directory').on('keyup', function (e) {
+ if (e.which == 13) {
+ browse_directory($('#directory').val());
+ }
+ });
var dir = window.location.hash.slice(1);
if(dir == "") {
window.location.hash = "/";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 1c0a2de1ea9..786b51278d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -65,9 +65,11 @@ Architecture
2. _The size of a striping cell._ This determines the granularity of striped reads and writes, including buffer sizes and encoding work.
- Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, five built-in policies are supported: `RS-3-2-64k`, `RS-6-3-64k`, `RS-10-4-64k`, `RS-LEGACY-6-3-64k`, and `XOR-2-1-64k`.
+ Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, six built-in policies are supported: `RS-3-2-64k`, `RS-6-3-64k`, `RS-10-4-64k`, `RS-LEGACY-6-3-64k`, `XOR-2-1-64k` and `REPLICATION`.
- By default, all built-in erasure coding policies are disabled.
+ `REPLICATION` is a special policy. It can only be set on directory, to force the directory to adopt 3x replication scheme, instead of inheriting its ancestor's erasure coding policy. This policy makes it possible to interleave 3x replication scheme directory with erasure coding directory.
+
+ `REPLICATION` policy is always enabled. For other built-in policies, unless they are configured in `dfs.namenode.ec.policies.enabled` property, otherwise they are disabled by default.
Similar to HDFS storage policies, erasure coding policies are set on a directory. When a file is created, it inherits the EC policy of its nearest ancestor directory.
@@ -112,11 +114,16 @@ Deployment
what EC policies can be set by clients. It does not affect the behavior of already set file or directory-level EC policies.
By default, all built-in erasure coding policies are disabled. Typically, the cluster administrator will enable set of policies by including them
- in the `dfs .namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance,
+ in the `dfs.namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance,
for a cluster with 9 racks, a policy like `RS-10-4-64k` will not preserve rack-level fault-tolerance, and `RS-6-3-64k` or `RS-3-2-64k` might
be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-64k` would still be appropriate as long as
there are at least 14 DataNodes in the cluster.
+ A system default EC policy can be configured via 'dfs.namenode.ec.system.default.policy' configuration. With this configuration,
+ the default EC policy will be used when no policy name is passed as an argument in the '-setPolicy' command.
+
+ By default, the 'dfs.namenode.ec.system.default.policy' is "RS-6-3-64k".
+
The codec implementations for Reed-Solomon and XOR can be configured with the following client and DataNode configuration keys:
`io.erasurecode.codec.rs.rawcoders` for the default RS codec,
`io.erasurecode.codec.rs-legacy.rawcoders` for the legacy RS codec,
@@ -148,7 +155,7 @@ Deployment
HDFS provides an `ec` subcommand to perform administrative commands related to erasure coding.
hdfs ec [generic options]
- [-setPolicy -policy -path ]
+ [-setPolicy -path [-policy ] [-replicate]]
[-getPolicy -path ]
[-unsetPolicy -path ]
[-listPolicies]
@@ -160,13 +167,20 @@ Deployment
Below are the details about each command.
- * `[-setPolicy -policy -path ]`
+ * `[-setPolicy -path [-policy ] [-replicate]]`
Sets an erasure coding policy on a directory at the specified path.
`path`: An directory in HDFS. This is a mandatory parameter. Setting a policy only affects newly created files, and does not affect existing files.
`policyName`: The erasure coding policy to be used for files under this directory.
+ This parameter can be omitted if a 'dfs.namenode.ec.system.default.policy' configuration is set.
+ The EC policy of the path will be set with the default value in configuration.
+
+ `-replicate` apply the special `REPLICATION` policy on the directory, force the directory to adopt 3x replication scheme.
+
+ `-replicate` and `-policy ` are optional arguments. They cannot be specified at the same time.
+
* `[-getPolicy -path ]`
@@ -199,3 +213,22 @@ Below are the details about each command.
* `[-disablePolicy -policy ]`
Disable an erasure coding policy.
+
+Limitations
+-----------
+
+Certain HDFS file write operations, i.e., `hflush`, `hsync` and `append`,
+are not supported on erasure coded files due to substantial technical
+challenges.
+
+* `append()` on an erasure coded file will throw `IOException`.
+* `hflush()` and `hsync()` on `DFSStripedOutputStream` are no-op. Thus calling
+`hflush()` or `hsync()` on an erasure coded file can not guarantee data
+being persistent.
+
+A client can use [`StreamCapabilities`](../hadoop-common/filesystem/filesystem.html#interface_StreamCapabilities)
+API to query whether a `OutputStream` supports `hflush()` and `hsync()`.
+If the client desires data persistence via `hflush()` and `hsync()`, the current
+remedy is creating such files as regular 3x replication files in a
+non-erasure-coded directory, or using `FSDataOutputStreamBuilder#replicate()`
+API to create 3x replication files in an erasure-coded directory.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
index c50253459da..82b5cec09f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
@@ -322,7 +322,7 @@ Configuration Parameters
* `dfs.namenode.posix.acl.inheritance.enabled`
- Set to true to enable POSIX style ACL inheritance. Disabled by default.
+ Set to true to enable POSIX style ACL inheritance. Enabled by default.
When it is enabled and the create request comes from a compatible client,
the NameNode will apply default ACLs from the parent directory to
the create mode and ignore the client umask. If no default ACL is found,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 7544c80ae6c..03834ebf07d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -495,6 +495,45 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileSt
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
+### List a File
+
+* Submit a HTTP GET request.
+
+ curl -i "http://:/webhdfs/v1/?op=LISTSTATUS"
+
+ The client receives a response with a [`FileStatuses` JSON object](#FileStatuses_JSON_Schema):
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+ Content-Length: 427
+
+ {
+ "FileStatuses":
+ {
+ "FileStatus":
+ [
+ {
+ "accessTime" : 1320171722771,
+ "blockSize" : 33554432,
+ "childrenNum" : 0,
+ "fileId" : 16390,
+ "group" : "supergroup",
+ "length" : 1366,
+ "modificationTime": 1501770633062,
+ "owner" : "webuser",
+ "pathSuffix" : "",
+ "permission" : "644",
+ "replication" : 1,
+ "storagePolicy" : 0,
+ "type" : "FILE"
+ }
+ ]
+ }
+ }
+
+See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
+
+
### Iteratively List a Directory
* Submit a HTTP GET request.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
index 3c73c28c2a6..dcd91c7d848 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
@@ -72,16 +72,16 @@ public class TestGenericRefresh {
public void setUp() throws Exception {
// Register Handlers, first one just sends an ok response
firstHandler = Mockito.mock(RefreshHandler.class);
- Mockito.stub(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toReturn(RefreshResponse.successResponse());
+ Mockito.when(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenReturn(RefreshResponse.successResponse());
RefreshRegistry.defaultRegistry().register("firstHandler", firstHandler);
// Second handler has conditional response for testing args
secondHandler = Mockito.mock(RefreshHandler.class);
- Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"}))
- .toReturn(new RefreshResponse(3, "three"));
- Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one"}))
- .toReturn(new RefreshResponse(2, "two"));
+ Mockito.when(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"}))
+ .thenReturn(new RefreshResponse(3, "three"));
+ Mockito.when(secondHandler.handleRefresh("secondHandler", new String[]{"one"}))
+ .thenReturn(new RefreshResponse(2, "two"));
RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
}
@@ -181,12 +181,12 @@ public class TestGenericRefresh {
public void testMultipleReturnCodeMerging() throws Exception {
// Two handlers which return two non-zero values
RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class);
- Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toReturn(new RefreshResponse(23, "Twenty Three"));
+ Mockito.when(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenReturn(new RefreshResponse(23, "Twenty Three"));
RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class);
- Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toReturn(new RefreshResponse(10, "Ten"));
+ Mockito.when(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenReturn(new RefreshResponse(10, "Ten"));
// Then registered to the same ID
RefreshRegistry.defaultRegistry().register("shared", handlerOne);
@@ -210,12 +210,12 @@ public class TestGenericRefresh {
public void testExceptionResultsInNormalError() throws Exception {
// In this test, we ensure that all handlers are called even if we throw an exception in one
RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
- Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
+ Mockito.when(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenThrow(new RuntimeException("Exceptional Handler Throws Exception"));
RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
- Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
- .toThrow(new RuntimeException("More Exceptions"));
+ Mockito.when(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+ .thenThrow(new RuntimeException("More Exceptions"));
RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
index 75111bb4844..9cf2180ff59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
@@ -34,6 +34,8 @@ public class TestAclCLI extends CLITestHelperDFS {
protected void initConf() {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ conf.setBoolean(
+ DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY, false);
}
@Before
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
index 6fc97a2948d..5a04f67846b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import java.io.File;
import java.io.IOException;
@@ -32,6 +33,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.test.PathUtils;
+
+import org.junit.BeforeClass;
import org.junit.Test;
/**
@@ -39,8 +42,22 @@ import org.junit.Test;
*/
public class TestUrlStreamHandler {
- private static final File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class);
-
+ private static final File TEST_ROOT_DIR =
+ PathUtils.getTestDir(TestUrlStreamHandler.class);
+
+ private static final FsUrlStreamHandlerFactory HANDLER_FACTORY
+ = new FsUrlStreamHandlerFactory();
+
+ @BeforeClass
+ public static void setupHandler() {
+
+ // Setup our own factory
+ // setURLStreamHandlerFactor is can be set at most once in the JVM
+ // the new URLStreamHandler is valid for all tests cases
+ // in TestStreamHandler
+ URL.setURLStreamHandlerFactory(HANDLER_FACTORY);
+ }
+
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
*
@@ -55,15 +72,6 @@ public class TestUrlStreamHandler {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
-
- // Setup our own factory
- // setURLSteramHandlerFactor is can be set at most once in the JVM
- // the new URLStreamHandler is valid for all tests cases
- // in TestStreamHandler
- FsUrlStreamHandlerFactory factory =
- new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
- java.net.URL.setURLStreamHandlerFactory(factory);
-
Path filePath = new Path("/thefile");
try {
@@ -156,4 +164,22 @@ public class TestUrlStreamHandler {
}
+ @Test
+ public void testHttpDefaultHandler() throws Throwable {
+ assertNull("Handler for HTTP is the Hadoop one",
+ HANDLER_FACTORY.createURLStreamHandler("http"));
+ }
+
+ @Test
+ public void testHttpsDefaultHandler() throws Throwable {
+ assertNull("Handler for HTTPS is the Hadoop one",
+ HANDLER_FACTORY.createURLStreamHandler("https"));
+ }
+
+ @Test
+ public void testUnknownProtocol() throws Throwable {
+ assertNull("Unknown protocols are not handled",
+ HANDLER_FACTORY.createURLStreamHandler("gopher"));
+ }
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 820917f6727..ba9c436831e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -61,6 +61,7 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.google.common.base.Supplier;
@@ -148,6 +149,8 @@ public class MiniDFSCluster implements AutoCloseable {
public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
= DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
+ public static final String DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY
+ = DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY + ".testing";
// Changing this default may break some tests that assume it is 2.
private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
@@ -826,7 +829,10 @@ public class MiniDFSCluster implements AutoCloseable {
int safemodeExtension = conf.getInt(
DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
- conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
+ long decommissionInterval = conf.getTimeDuration(
+ DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY, 3, TimeUnit.SECONDS);
+ conf.setTimeDuration(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
+ decommissionInterval, TimeUnit.SECONDS);
if (!useConfiguredTopologyMappingClass) {
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
StaticMapping.class, DNSToSwitchMapping.class);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 7a71df8d90b..5b16f4c0388 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -259,12 +259,14 @@ public class TestDFSClientRetries {
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
- (short) 777), "owner", "group", new byte[0], new byte[0],
+ (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+ "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
- (short) 777), "owner", "group", new byte[0], new byte[0],
+ (short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
+ "owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null))
.when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
index fa12f34af25..40cd676f3ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
@@ -30,7 +30,6 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.Socket;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY;
import static org.junit.Assert.assertTrue;
@@ -42,15 +41,16 @@ public class TestDFSClientSocketSize {
}
/**
- * The setting of socket send buffer size in
- * {@link java.net.Socket#setSendBufferSize(int)} is only a hint. Actual
- * value may differ. We just sanity check that it is somewhere close.
+ * Test that the send buffer size default value is 0, in which case the socket
+ * will use a TCP auto-tuned value.
*/
@Test
public void testDefaultSendBufferSize() throws IOException {
- assertTrue("Send buffer size should be somewhere near default.",
- getSendBufferSize(new Configuration()) >=
- DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT / 2);
+ final int sendBufferSize = getSendBufferSize(new Configuration());
+ LOG.info("If not specified, the auto tuned send buffer size is: {}",
+ sendBufferSize);
+ assertTrue("Send buffer size should be non-negative value which is " +
+ "determined by system (kernel).", sendBufferSize > 0);
}
/**
@@ -73,6 +73,10 @@ public class TestDFSClientSocketSize {
sendBufferSize1 > sendBufferSize2);
}
+ /**
+ * Test that if the send buffer size is 0, the socket will use a TCP
+ * auto-tuned value.
+ */
@Test
public void testAutoTuningSendBufferSize() throws IOException {
final Configuration conf = new Configuration();
@@ -80,7 +84,7 @@ public class TestDFSClientSocketSize {
final int sendBufferSize = getSendBufferSize(conf);
LOG.info("The auto tuned send buffer size is: {}", sendBufferSize);
assertTrue("Send buffer size should be non-negative value which is " +
- "determined by system (kernel).", sendBufferSize > 0);
+ "determined by system (kernel).", sendBufferSize > 0);
}
private int getSendBufferSize(Configuration conf) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index c82c0455eee..9ae49aa9ef5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -36,12 +36,12 @@ import java.util.zip.GZIPOutputStream;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
+
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.log4j.Level;
import org.junit.Test;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
@@ -65,6 +65,7 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
import org.junit.rules.Timeout;
import org.junit.AfterClass;
@@ -115,6 +116,7 @@ public class TestDFSShell {
GenericTestUtils.getTestDir("TestDFSShell").getAbsolutePath());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+ conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1000);
miniCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
miniCluster.waitActive();
@@ -2002,8 +2004,12 @@ public class TestDFSShell {
DFSTestUtil.createFile(dfs, testFile2, 2 * BLOCK_SIZE, (short) 3, 0);
final FileStatus status1 = dfs.getFileStatus(testDir1);
final String mtime1 = fmt.format(new Date(status1.getModificationTime()));
+ final String atime1 = fmt.format(new Date(status1.getAccessTime()));
+ long now = Time.now();
+ dfs.setTimes(testFile2, now + 3000, now + 6000);
final FileStatus status2 = dfs.getFileStatus(testFile2);
final String mtime2 = fmt.format(new Date(status2.getModificationTime()));
+ final String atime2 = fmt.format(new Date(status2.getAccessTime()));
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
@@ -2036,17 +2042,19 @@ public class TestDFSShell {
out.toString().contains(String.valueOf(octal)));
out.reset();
- doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %y %n", testDir1, testFile2);
+ doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %x %y %n", testDir1, testFile2);
n = status2.getPermission().toShort();
octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
assertTrue(out.toString(), out.toString().contains(mtime1));
+ assertTrue(out.toString(), out.toString().contains(atime1));
assertTrue(out.toString(), out.toString().contains("regular file"));
assertTrue(out.toString(),
out.toString().contains(status2.getPermission().toString()));
assertTrue(out.toString(),
out.toString().contains(String.valueOf(octal)));
assertTrue(out.toString(), out.toString().contains(mtime2));
+ assertTrue(out.toString(), out.toString().contains(atime2));
}
private static void doFsStat(Configuration conf, String format, Path... files)
@@ -2181,7 +2189,7 @@ public class TestDFSShell {
assertTrue(xattrs.isEmpty());
List acls = dfs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
- assertFalse(targetPerm.getAclBit());
+ assertFalse(targetStatus.hasAcl());
// -ptop
Path target2 = new Path(hdfsTestDir, "targetfile2");
@@ -2200,7 +2208,7 @@ public class TestDFSShell {
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
- assertFalse(targetPerm.getAclBit());
+ assertFalse(targetStatus.hasAcl());
// -ptopx
Path target3 = new Path(hdfsTestDir, "targetfile3");
@@ -2221,7 +2229,7 @@ public class TestDFSShell {
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = dfs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
- assertFalse(targetPerm.getAclBit());
+ assertFalse(targetStatus.hasAcl());
// -ptopa
Path target4 = new Path(hdfsTestDir, "targetfile4");
@@ -2240,7 +2248,7 @@ public class TestDFSShell {
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
- assertTrue(targetPerm.getAclBit());
+ assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target4));
// -ptoa (verify -pa option will preserve permissions also)
@@ -2260,7 +2268,7 @@ public class TestDFSShell {
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
- assertTrue(targetPerm.getAclBit());
+ assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target5));
} finally {
if (null != shell) {
@@ -2472,7 +2480,7 @@ public class TestDFSShell {
assertTrue(xattrs.isEmpty());
List acls = dfs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
- assertFalse(targetPerm.getAclBit());
+ assertFalse(targetStatus.hasAcl());
// -ptop
Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
@@ -2491,7 +2499,7 @@ public class TestDFSShell {
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
- assertFalse(targetPerm.getAclBit());
+ assertFalse(targetStatus.hasAcl());
// -ptopx
Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
@@ -2512,7 +2520,7 @@ public class TestDFSShell {
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = dfs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
- assertFalse(targetPerm.getAclBit());
+ assertFalse(targetStatus.hasAcl());
// -ptopa
Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
@@ -2531,7 +2539,7 @@ public class TestDFSShell {
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
- assertTrue(targetPerm.getAclBit());
+ assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir4));
// -ptoa (verify -pa option will preserve permissions also)
@@ -2551,7 +2559,7 @@ public class TestDFSShell {
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
- assertTrue(targetPerm.getAclBit());
+ assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir5));
} finally {
if (shell != null) {
@@ -2607,7 +2615,7 @@ public class TestDFSShell {
assertTrue(perm.equals(targetPerm));
List acls = dfs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
- assertFalse(targetPerm.getAclBit());
+ assertFalse(targetStatus.hasAcl());
// -ptopa preserves both sticky bit and ACL
Path target2 = new Path(hdfsTestDir, "targetfile2");
@@ -2624,7 +2632,7 @@ public class TestDFSShell {
assertTrue(perm.equals(targetPerm));
acls = dfs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
- assertTrue(targetPerm.getAclBit());
+ assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target2));
} finally {
if (null != shell) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 5dee6e0f08c..e42e08cf77e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Logger;
import org.junit.Test;
-import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent;
import static org.junit.Assert.*;
/**
@@ -572,7 +571,7 @@ public class TestDFSUpgradeFromImage {
Path path) throws IOException {
String pathStr = path.toString();
HdfsFileStatus status = dfs.getFileInfo(pathStr);
- if (!status.isDir()) {
+ if (!status.isDirectory()) {
for (int retries = 10; retries > 0; retries--) {
if (dfs.recoverLease(pathStr)) {
return;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index c2c6be12d1a..ac14a2a41ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -256,9 +256,10 @@ public class TestDecommission extends AdminStatesBaseTest {
startSimpleHACluster(3);
- // Step 1, create a cluster with 4 DNs. Blocks are stored on the first 3 DNs.
- // The last DN is empty. Also configure the last DN to have slow heartbeat
- // so that it will be chosen as excess replica candidate during recommission.
+ // Step 1, create a cluster with 4 DNs. Blocks are stored on the
+ // first 3 DNs. The last DN is empty. Also configure the last DN to have
+ // slow heartbeat so that it will be chosen as excess replica candidate
+ // during recommission.
// Step 1.a, copy blocks to the first 3 DNs. Given the replica count is the
// same as # of DNs, each DN will have a replica for any block.
@@ -290,9 +291,9 @@ public class TestDecommission extends AdminStatesBaseTest {
// Step 3, recommission the first DN on SBN and ANN to create excess replica
// It recommissions the node on SBN first to create potential
- // inconsistent state. In production cluster, such insistent state can happen
- // even if recommission command was issued on ANN first given the async nature
- // of the system.
+ // inconsistent state. In production cluster, such insistent state can
+ // happen even if recommission command was issued on ANN first given the
+ // async nature of the system.
// Step 3.a, ask SBN to recomm the first DN.
// SBN has been fixed so that it no longer invalidates excess replica during
@@ -301,10 +302,10 @@ public class TestDecommission extends AdminStatesBaseTest {
// 1. the last DN would have been chosen as excess replica, given its
// heartbeat is considered old.
// Please refer to BlockPlacementPolicyDefault#chooseReplicaToDelete
- // 2. After recommissionNode finishes, SBN has 3 live replicas ( 0, 1, 2 )
+ // 2. After recommissionNode finishes, SBN has 3 live replicas (0, 1, 2)
// and one excess replica ( 3 )
// After the fix,
- // After recommissionNode finishes, SBN has 4 live replicas ( 0, 1, 2, 3 )
+ // After recommissionNode finishes, SBN has 4 live replicas (0, 1, 2, 3)
Thread.sleep(slowHeartbeatDNwaitTime);
putNodeInService(1, decomNodeFromSBN);
@@ -561,7 +562,8 @@ public class TestDecommission extends AdminStatesBaseTest {
* federated cluster.
*/
@Test(timeout=360000)
- public void testHostsFileFederation() throws IOException, InterruptedException {
+ public void testHostsFileFederation()
+ throws IOException, InterruptedException {
// Test for 3 namenode federated cluster
testHostsFile(3);
}
@@ -598,7 +600,8 @@ public class TestDecommission extends AdminStatesBaseTest {
}
@Test(timeout=120000)
- public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
+ public void testDecommissionWithOpenfile()
+ throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithOpenfile");
//At most 4 nodes will be decommissioned
@@ -742,14 +745,15 @@ public class TestDecommission extends AdminStatesBaseTest {
// make sure the two datanodes remain in decomm in progress state
BlockManagerTestUtil.recheckDecommissionState(dm);
- assertTrackedAndPending(dm.getDecomManager(), 2, 0);
+ assertTrackedAndPending(dm.getDatanodeAdminManager(), 2, 0);
}
/**
* Tests restart of namenode while datanode hosts are added to exclude file
**/
@Test(timeout=360000)
- public void testDecommissionWithNamenodeRestart()throws IOException, InterruptedException {
+ public void testDecommissionWithNamenodeRestart()
+ throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithNamenodeRestart");
int numNamenodes = 1;
int numDatanodes = 1;
@@ -914,7 +918,7 @@ public class TestDecommission extends AdminStatesBaseTest {
@Test(timeout=120000)
public void testBlocksPerInterval() throws Exception {
- org.apache.log4j.Logger.getLogger(DecommissionManager.class)
+ org.apache.log4j.Logger.getLogger(DatanodeAdminManager.class)
.setLevel(Level.TRACE);
// Turn the blocks per interval way down
getConf().setInt(
@@ -927,7 +931,8 @@ public class TestDecommission extends AdminStatesBaseTest {
final FileSystem fs = getCluster().getFileSystem();
final DatanodeManager datanodeManager =
getCluster().getNamesystem().getBlockManager().getDatanodeManager();
- final DecommissionManager decomManager = datanodeManager.getDecomManager();
+ final DatanodeAdminManager decomManager =
+ datanodeManager.getDatanodeAdminManager();
// Write a 3 block file, so each node has one block. Should scan 3 nodes.
DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
@@ -944,7 +949,7 @@ public class TestDecommission extends AdminStatesBaseTest {
}
private void doDecomCheck(DatanodeManager datanodeManager,
- DecommissionManager decomManager, int expectedNumCheckedNodes)
+ DatanodeAdminManager decomManager, int expectedNumCheckedNodes)
throws IOException, ExecutionException, InterruptedException {
// Decom all nodes
ArrayList decommissionedNodes = Lists.newArrayList();
@@ -965,7 +970,7 @@ public class TestDecommission extends AdminStatesBaseTest {
@Test(timeout=120000)
public void testPendingNodes() throws Exception {
- org.apache.log4j.Logger.getLogger(DecommissionManager.class)
+ org.apache.log4j.Logger.getLogger(DatanodeAdminManager.class)
.setLevel(Level.TRACE);
// Only allow one node to be decom'd at a time
getConf().setInt(
@@ -978,7 +983,8 @@ public class TestDecommission extends AdminStatesBaseTest {
final FileSystem fs = getCluster().getFileSystem();
final DatanodeManager datanodeManager =
getCluster().getNamesystem().getBlockManager().getDatanodeManager();
- final DecommissionManager decomManager = datanodeManager.getDecomManager();
+ final DatanodeAdminManager decomManager =
+ datanodeManager.getDatanodeAdminManager();
// Keep a file open to prevent decom from progressing
HdfsDataOutputStream open1 =
@@ -1014,7 +1020,7 @@ public class TestDecommission extends AdminStatesBaseTest {
assertTrackedAndPending(decomManager, 1, 0);
}
- private void assertTrackedAndPending(DecommissionManager decomManager,
+ private void assertTrackedAndPending(DatanodeAdminManager decomManager,
int tracked, int pending) {
assertEquals("Unexpected number of tracked nodes", tracked,
decomManager.getNumTrackedNodes());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index b35d3747868..95256096e73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.ScriptBasedMapping;
import org.apache.hadoop.net.StaticMapping;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum;
@@ -1561,6 +1562,27 @@ public class TestDistributedFileSystem {
fs.removeErasureCodingPolicy(policyName);
assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
getRemovedPolicies().get(0).getName());
+
+ // remove erasure coding policy as a user without privilege
+ UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
+ "ProbablyNotARealUserName", new String[] {"ShangriLa"});
+ final MiniDFSCluster finalCluster = cluster;
+ fakeUGI.doAs(new PrivilegedExceptionAction