HDFS-14595. HDFS-11848 breaks API compatibility. Contributed by Siyao Meng.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
Reviewed-by: Ayush Saxena <ayushsaxena@apache.org>
(cherry picked from commit 3c0382f1b9)
(cherry picked from commit 136a97a74dbc12f05b88d0abda101690e7c727d9)
This commit is contained in:
Siyao Meng 2019-08-14 07:24:22 -07:00 committed by Wei-Chiu Chuang
parent 6966b76230
commit 224643a58c
4 changed files with 60 additions and 7 deletions

View File

@ -3299,6 +3299,12 @@ public class DistributedFileSystem extends FileSystem
return dfs.listOpenFiles();
}
@Deprecated
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes) throws IOException {
return dfs.listOpenFiles(openFilesTypes);
}
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
return dfs.listOpenFiles(openFilesTypes, path);

View File

@ -616,6 +616,12 @@ public class HdfsAdmin {
return dfs.listOpenFiles();
}
@Deprecated
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes) throws IOException {
return dfs.listOpenFiles(openFilesTypes);
}
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
return dfs.listOpenFiles(openFilesTypes, path);

View File

@ -86,6 +86,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@ -183,8 +185,10 @@ public class TestDistributedFileSystem {
* Tests DFSClient.close throws no ConcurrentModificationException if
* multiple files are open.
* Also tests that any cached sockets are closed. (HDFS-3359)
* Also tests deprecated listOpenFiles(EnumSet<>). (HDFS-14595)
*/
@Test
@SuppressWarnings("deprecation") // call to listOpenFiles(EnumSet<>)
public void testDFSClose() throws Exception {
Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null;
@ -196,6 +200,19 @@ public class TestDistributedFileSystem {
fileSys.create(new Path("/test/dfsclose/file-0"));
fileSys.create(new Path("/test/dfsclose/file-1"));
// Test listOpenFiles(EnumSet<>)
List<OpenFilesIterator.OpenFilesType> types = new ArrayList<>();
types.add(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES);
RemoteIterator<OpenFileEntry> listOpenFiles =
fileSys.listOpenFiles(EnumSet.copyOf(types));
assertTrue("Two files should be open", listOpenFiles.hasNext());
int countOpenFiles = 0;
while (listOpenFiles.hasNext()) {
listOpenFiles.next();
++countOpenFiles;
}
assertEquals("Mismatch of open files count", 2, countOpenFiles);
// create another file, close it, and read it, so
// the client gets a socket in its SocketCache
Path p = new Path("/non-empty-file");

View File

@ -233,6 +233,9 @@ public class TestHdfsAdmin {
closedFileSet.add(filePath);
}
verifyOpenFiles(closedFileSet, openFileMap);
// Verify again with the old listOpenFiles(EnumSet<>) API
// Just to verify old API's validity
verifyOpenFilesOld(closedFileSet, openFileMap);
openFileMap.putAll(
DFSTestUtil.createOpenFiles(fs, "open-file-1", numOpenFiles));
@ -252,13 +255,10 @@ public class TestHdfsAdmin {
}
}
private void verifyOpenFiles(HashSet<Path> closedFiles,
HashMap<Path, FSDataOutputStream> openFileMap) throws IOException {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
RemoteIterator<OpenFileEntry> openFilesRemoteItr =
hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
private void verifyOpenFilesHelper(
RemoteIterator<OpenFileEntry> openFilesRemoteItr,
HashSet<Path> closedFiles,
HashSet<Path> openFiles) throws IOException {
while (openFilesRemoteItr.hasNext()) {
String filePath = openFilesRemoteItr.next().getFilePath();
assertFalse(filePath + " should not be listed under open files!",
@ -266,6 +266,30 @@ public class TestHdfsAdmin {
assertTrue(filePath + " is not listed under open files!",
openFiles.remove(new Path(filePath)));
}
}
private void verifyOpenFiles(HashSet<Path> closedFiles,
HashMap<Path, FSDataOutputStream> openFileMap) throws IOException {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
RemoteIterator<OpenFileEntry> openFilesRemoteItr =
hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
assertTrue("Not all open files are listed!", openFiles.isEmpty());
}
/**
* Using deprecated HdfsAdmin#listOpenFiles(EnumSet<>) to verify open files.
*/
@SuppressWarnings("deprecation") // call to listOpenFiles(EnumSet<>)
private void verifyOpenFilesOld(HashSet<Path> closedFiles,
HashMap<Path, FSDataOutputStream> openFileMap) throws IOException {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
RemoteIterator<OpenFileEntry> openFilesRemoteItr =
hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
assertTrue("Not all open files are listed!", openFiles.isEmpty());
}
}