From 24080666e5e2214d4a362c889cd9aa617be5de81 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Mon, 16 Dec 2019 18:41:45 -0800 Subject: [PATCH] HDFS-14908. LeaseManager should check parent-child relationship when filter open files. Contributed by Jinglun. --- .../hdfs/server/namenode/FSNamesystem.java | 7 +-- .../hdfs/server/namenode/LeaseManager.java | 4 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 21 +++++++- .../server/namenode/TestListOpenFiles.java | 48 +++++++++++++++++-- 4 files changed, 71 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 8d1884e41ef..b626271ebf7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1826,16 +1826,17 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); BatchedListEntries batchedListEntries; + String normalizedPath = new Path(path).toString(); // normalize path. try { readLock(); try { checkOperation(OperationCategory.READ); if (openFilesTypes.contains(OpenFilesType.ALL_OPEN_FILES)) { batchedListEntries = leaseManager.getUnderConstructionFiles(prevId, - path); + normalizedPath); } else { if (openFilesTypes.contains(OpenFilesType.BLOCKING_DECOMMISSION)) { - batchedListEntries = getFilesBlockingDecom(prevId, path); + batchedListEntries = getFilesBlockingDecom(prevId, normalizedPath); } else { throw new IllegalArgumentException("Unknown OpenFileType: " + openFilesTypes); @@ -1874,7 +1875,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, String fullPathName = inodeFile.getFullPathName(); if (org.apache.commons.lang3.StringUtils.isEmpty(path) - || fullPathName.startsWith(path)) { + || DFSUtil.isParentEntry(fullPathName, path)) { openFileEntries.add(new OpenFileEntry(inodeFile.getId(), inodeFile.getFullPathName(), inodeFile.getFileUnderConstructionFeature().getClientName(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 996144073ab..2efadfc4ec3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -42,6 +42,7 @@ import com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.OpenFilesIterator; @@ -315,7 +316,8 @@ public class LeaseManager { } fullPathName = inodeFile.getFullPathName(); - if (StringUtils.isEmpty(path) || fullPathName.startsWith(path)) { + if (StringUtils.isEmpty(path) || + DFSUtil.isParentEntry(fullPathName, path)) { openFileEntries.add(new OpenFileEntry(inodeFile.getId(), fullPathName, inodeFile.getFileUnderConstructionFeature().getClientName(), inodeFile.getFileUnderConstructionFeature().getClientMachine())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 36f2eb2d805..96466763174 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -2392,13 +2392,32 @@ public class DFSTestUtil { } } + /** + * Create open files under root path. + * @param fs the filesystem. + * @param filePrefix the prefix of the files. + * @param numFilesToCreate the number of files to create. + */ public static Map createOpenFiles(FileSystem fs, String filePrefix, int numFilesToCreate) throws IOException { + return createOpenFiles(fs, new Path("/"), filePrefix, numFilesToCreate); + } + + /** + * Create open files. + * @param fs the filesystem. + * @param baseDir the base path of the files. + * @param filePrefix the prefix of the files. + * @param numFilesToCreate the number of files to create. + */ + public static Map createOpenFiles(FileSystem fs, + Path baseDir, String filePrefix, int numFilesToCreate) + throws IOException { final Map filesCreated = new HashMap<>(); final byte[] buffer = new byte[(int) (1024 * 1.75)]; final Random rand = new Random(0xFEED0BACL); for (int i = 0; i < numFilesToCreate; i++) { - Path file = new Path("/" + filePrefix + "-" + i); + Path file = new Path(baseDir, filePrefix + "-" + i); FSDataOutputStream stm = fs.create(file, true, 1024, (short) 1, 1024); rand.nextBytes(buffer); stm.write(buffer); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index 337e3728a91..2158bc7a44c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -157,13 +157,22 @@ public class TestListOpenFiles { remainingFiles.size() == 0); } + /** + * Verify all open files. + */ private void verifyOpenFiles(Map openFiles) throws IOException { - verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), - OpenFilesIterator.FILTER_PATH_DEFAULT); + verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT); + } + + /** + * Verify open files with specified filter path. + */ + private void verifyOpenFiles(Map openFiles, + String path) throws IOException { + verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), path); verifyOpenFiles(new HashMap<>(), - EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION), - OpenFilesIterator.FILTER_PATH_DEFAULT); + EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION), path); } private Set createFiles(FileSystem fileSystem, String fileNamePrefix, @@ -255,4 +264,35 @@ public class TestListOpenFiles { } } } + + @Test(timeout = 120000) + public void testListOpenFilesWithFilterPath() throws IOException { + HashMap openFiles = new HashMap<>(); + createFiles(fs, "closed", 10); + verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT); + + BatchedEntries openFileEntryBatchedEntries = nnRpc + .listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), + OpenFilesIterator.FILTER_PATH_DEFAULT); + assertTrue("Open files list should be empty!", + openFileEntryBatchedEntries.size() == 0); + BatchedEntries openFilesBlockingDecomEntries = nnRpc + .listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION), + OpenFilesIterator.FILTER_PATH_DEFAULT); + assertTrue("Open files list blocking decommission should be empty!", + openFilesBlockingDecomEntries.size() == 0); + + openFiles.putAll( + DFSTestUtil.createOpenFiles(fs, new Path("/base"), "open-1", 1)); + Map baseOpen = + DFSTestUtil.createOpenFiles(fs, new Path("/base-open"), "open-1", 1); + verifyOpenFiles(openFiles, "/base"); + verifyOpenFiles(openFiles, "/base/"); + + openFiles.putAll(baseOpen); + while (openFiles.size() > 0) { + DFSTestUtil.closeOpenFiles(openFiles, 1); + verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT); + } + } }