diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index 6becedc48e5..96b560c649a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDE import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE_DEFAULT; -import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.isParentEntry; +import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry; import java.io.IOException; import java.util.Collection; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java index df22c4a8050..d2eec04cd4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java @@ -27,7 +27,6 @@ import java.net.URLConnection; import java.util.EnumSet; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -224,25 +223,6 @@ public final class FederationUtil { return newInstance(conf, null, null, clazz); } - /** - * Check if the given path is the child of parent path. - * @param path Path to be check. - * @param parent Parent path. - * @return True if parent path is parent entry for given path. - */ - public static boolean isParentEntry(final String path, final String parent) { - if (!path.startsWith(parent)) { - return false; - } - - if (path.equals(parent)) { - return true; - } - - return path.charAt(parent.length()) == Path.SEPARATOR_CHAR - || parent.equals(Path.SEPARATOR); - } - /** * Add the the number of children for an existing HdfsFileStatus object. * @param dirStatus HdfsfileStatus object. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java index ee656083c7e..0a08f72a538 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -217,7 +219,7 @@ public class Quota { boolean isChildPath = false; for (RemoteLocation d : dests) { - if (FederationUtil.isParentEntry(loc.getDest(), d.getDest())) { + if (isParentEntry(loc.getDest(), d.getDest())) { isChildPath = true; break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java index f061cc58d9a..c1a51463e1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.router; -import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.isParentEntry; +import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry; import java.util.HashSet; import java.util.Map.Entry; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java index 8682a3d5e4b..fb9f6a3c7fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.federation.store.impl; +import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry; + import java.io.IOException; import java.util.Collections; import java.util.Iterator; @@ -25,7 +27,6 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hdfs.server.federation.router.FederationUtil; import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer; import org.apache.hadoop.hdfs.server.federation.router.RouterPermissionChecker; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; @@ -140,7 +141,7 @@ public class MountTableStoreImpl extends MountTableStore { while (it.hasNext()) { MountTable record = it.next(); String srcPath = record.getSourcePath(); - if (!FederationUtil.isParentEntry(srcPath, reqSrcPath)) { + if (!isParentEntry(srcPath, reqSrcPath)) { it.remove(); } else if (pc != null) { // do the READ permission check diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 8ad11f805eb..1968e329cee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1816,4 +1816,22 @@ public class DFSUtil { return flags; } + /** + * Check if the given path is the child of parent path. + * @param path Path to be check. + * @param parent Parent path. + * @return True if parent path is parent entry for given path. + */ + public static boolean isParentEntry(final String path, final String parent) { + if (!path.startsWith(parent)) { + return false; + } + + if (path.equals(parent)) { + return true; + } + + return path.charAt(parent.length()) == Path.SEPARATOR_CHAR + || parent.equals(Path.SEPARATOR); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 0bf21665458..8d1884e41ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -89,6 +89,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY; @@ -5665,7 +5666,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, skip++; if (inode != null) { String src = inode.getFullPathName(); - if (src.startsWith(path)){ + if (isParentEntry(src, path)) { corruptFiles.add(new CorruptFileBlockInfo(src, blk)); count++; if (count >= maxCorruptFileBlocksReturn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index ad8aaaab3f8..69b1555802d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -94,7 +94,8 @@ import com.google.common.annotations.VisibleForTesting; * This class provides rudimentary checking of DFS volumes for errors and * sub-optimal conditions. *

The tool scans all files and directories, starting from an indicated - * root path. The following abnormal conditions are detected and handled:

+ * root path and its descendants. The following abnormal conditions are + * detected and handled:

*