HDFS-14595. HDFS-11848 breaks API compatibility. Contributed by Siyao Meng.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
Reviewed-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
Siyao Meng 2019-08-14 07:24:22 -07:00 committed by Wei-Chiu Chuang
parent 83e452ecea
commit 3c0382f1b9
4 changed files with 60 additions and 7 deletions

View File

@ -3380,6 +3380,12 @@ public class DistributedFileSystem extends FileSystem
return dfs.listOpenFiles(); return dfs.listOpenFiles();
} }
@Deprecated
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes) throws IOException {
return dfs.listOpenFiles(openFilesTypes);
}
public RemoteIterator<OpenFileEntry> listOpenFiles( public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException { EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
return dfs.listOpenFiles(openFilesTypes, path); return dfs.listOpenFiles(openFilesTypes, path);

View File

@ -626,6 +626,12 @@ public class HdfsAdmin {
return dfs.listOpenFiles(); return dfs.listOpenFiles();
} }
@Deprecated
public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes) throws IOException {
return dfs.listOpenFiles(openFilesTypes);
}
public RemoteIterator<OpenFileEntry> listOpenFiles( public RemoteIterator<OpenFileEntry> listOpenFiles(
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException { EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
return dfs.listOpenFiles(openFilesTypes, path); return dfs.listOpenFiles(openFilesTypes, path);

View File

@ -94,6 +94,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode; import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@ -194,8 +196,10 @@ public class TestDistributedFileSystem {
* Tests DFSClient.close throws no ConcurrentModificationException if * Tests DFSClient.close throws no ConcurrentModificationException if
* multiple files are open. * multiple files are open.
* Also tests that any cached sockets are closed. (HDFS-3359) * Also tests that any cached sockets are closed. (HDFS-3359)
* Also tests deprecated listOpenFiles(EnumSet<>). (HDFS-14595)
*/ */
@Test @Test
@SuppressWarnings("deprecation") // call to listOpenFiles(EnumSet<>)
public void testDFSClose() throws Exception { public void testDFSClose() throws Exception {
Configuration conf = getTestConfiguration(); Configuration conf = getTestConfiguration();
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
@ -207,6 +211,19 @@ public class TestDistributedFileSystem {
fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.create(new Path("/test/dfsclose/file-0"));
fileSys.create(new Path("/test/dfsclose/file-1")); fileSys.create(new Path("/test/dfsclose/file-1"));
// Test listOpenFiles(EnumSet<>)
List<OpenFilesIterator.OpenFilesType> types = new ArrayList<>();
types.add(OpenFilesIterator.OpenFilesType.ALL_OPEN_FILES);
RemoteIterator<OpenFileEntry> listOpenFiles =
fileSys.listOpenFiles(EnumSet.copyOf(types));
assertTrue("Two files should be open", listOpenFiles.hasNext());
int countOpenFiles = 0;
while (listOpenFiles.hasNext()) {
listOpenFiles.next();
++countOpenFiles;
}
assertEquals("Mismatch of open files count", 2, countOpenFiles);
// create another file, close it, and read it, so // create another file, close it, and read it, so
// the client gets a socket in its SocketCache // the client gets a socket in its SocketCache
Path p = new Path("/non-empty-file"); Path p = new Path("/non-empty-file");

View File

@ -233,6 +233,9 @@ public class TestHdfsAdmin {
closedFileSet.add(filePath); closedFileSet.add(filePath);
} }
verifyOpenFiles(closedFileSet, openFileMap); verifyOpenFiles(closedFileSet, openFileMap);
// Verify again with the old listOpenFiles(EnumSet<>) API
// Just to verify old API's validity
verifyOpenFilesOld(closedFileSet, openFileMap);
openFileMap.putAll( openFileMap.putAll(
DFSTestUtil.createOpenFiles(fs, "open-file-1", numOpenFiles)); DFSTestUtil.createOpenFiles(fs, "open-file-1", numOpenFiles));
@ -252,13 +255,10 @@ public class TestHdfsAdmin {
} }
} }
private void verifyOpenFiles(HashSet<Path> closedFiles, private void verifyOpenFilesHelper(
HashMap<Path, FSDataOutputStream> openFileMap) throws IOException { RemoteIterator<OpenFileEntry> openFilesRemoteItr,
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); HashSet<Path> closedFiles,
HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet()); HashSet<Path> openFiles) throws IOException {
RemoteIterator<OpenFileEntry> openFilesRemoteItr =
hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
while (openFilesRemoteItr.hasNext()) { while (openFilesRemoteItr.hasNext()) {
String filePath = openFilesRemoteItr.next().getFilePath(); String filePath = openFilesRemoteItr.next().getFilePath();
assertFalse(filePath + " should not be listed under open files!", assertFalse(filePath + " should not be listed under open files!",
@ -266,6 +266,30 @@ public class TestHdfsAdmin {
assertTrue(filePath + " is not listed under open files!", assertTrue(filePath + " is not listed under open files!",
openFiles.remove(new Path(filePath))); openFiles.remove(new Path(filePath)));
} }
}
private void verifyOpenFiles(HashSet<Path> closedFiles,
HashMap<Path, FSDataOutputStream> openFileMap) throws IOException {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
RemoteIterator<OpenFileEntry> openFilesRemoteItr =
hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
assertTrue("Not all open files are listed!", openFiles.isEmpty());
}
/**
* Using deprecated HdfsAdmin#listOpenFiles(EnumSet<>) to verify open files.
*/
@SuppressWarnings("deprecation") // call to listOpenFiles(EnumSet<>)
private void verifyOpenFilesOld(HashSet<Path> closedFiles,
HashMap<Path, FSDataOutputStream> openFileMap) throws IOException {
HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
HashSet<Path> openFiles = new HashSet<>(openFileMap.keySet());
RemoteIterator<OpenFileEntry> openFilesRemoteItr =
hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
assertTrue("Not all open files are listed!", openFiles.isEmpty()); assertTrue("Not all open files are listed!", openFiles.isEmpty());
} }
} }