diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 60f82c8ff94..37e4a8b97d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -997,6 +997,9 @@ Release 2.8.0 - UNRELEASED HDFS-9797. Log Standby exceptions thrown by RequestHedgingProxyProvider at DEBUG Level (Inigo Goiri via asuresh) + HDFS-9773. Remove dead code related to SimulatedFSDataset in tests. + (Brahma Reddy Battula via aajisaka) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 84699a95171..40589f5e9ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; import org.apache.hadoop.io.IOUtils; @@ -59,8 +58,6 @@ public class TestFileAppend{ private static final long RANDOM_TEST_RUNTIME = 10000; - final boolean simulatedStorage = false; - private static byte[] fileContents = null; // @@ -101,13 +98,7 @@ private void checkFile(DistributedFileSystem fileSys, Path name, int repl) } byte[] expected = new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE]; - if (simulatedStorage) { - LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(), - 0, AppendTestUtil.FILE_SIZE); - DFSTestUtil.fillExpectedBuf(lbs, expected); - } else { - System.arraycopy(fileContents, 0, expected, 0, expected.length); - } + System.arraycopy(fileContents, 0, expected, 0, expected.length); // do a sanity check. Read the file // do not check file status since the file is not yet closed. AppendTestUtil.checkFullFile(fileSys, name, @@ -118,9 +109,6 @@ private void checkFile(DistributedFileSystem fileSys, Path name, int repl) @Test public void testBreakHardlinksIfNeeded() throws IOException { Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); InetSocketAddress addr = new InetSocketAddress("localhost", @@ -186,9 +174,6 @@ public void testBreakHardlinksIfNeeded() throws IOException { @Test public void testSimpleFlush() throws IOException { Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem fs = cluster.getFileSystem(); @@ -242,9 +227,6 @@ public void testSimpleFlush() throws IOException { @Test public void testComplexFlush() throws IOException { Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem fs = cluster.getFileSystem(); @@ -293,9 +275,6 @@ public void testComplexFlush() throws IOException { @Test(expected = FileNotFoundException.class) public void testFileNotFound() throws IOException { Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index 3c72db35bd2..cd1b8510e95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -59,7 +58,6 @@ public class TestFileAppend2 { } static final int numBlocks = 5; - final boolean simulatedStorage = false; private byte[] fileContents = null; @@ -81,9 +79,6 @@ public class TestFileAppend2 { @Test public void testSimpleAppend() throws IOException { final Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -233,9 +228,6 @@ public void testSimpleAppend() throws IOException { @Test public void testSimpleAppend2() throws Exception { final Configuration conf = new HdfsConfiguration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index 62f6cd2ee4d..265b510ec10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; @@ -67,7 +66,6 @@ public class TestFileAppend4 { MiniDFSCluster cluster; Path file1; FSDataOutputStream stm; - final boolean simulatedStorage = false; { DFSTestUtil.setNameNodeLogLevel(Level.ALL); @@ -78,9 +76,6 @@ public class TestFileAppend4 { @Before public void setUp() throws Exception { this.conf = new Configuration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } // lower heartbeat interval for fast recognition of DN death conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index 08961202653..a37da35a893 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.junit.Test; /** @@ -51,7 +50,6 @@ public class TestLargeBlock { // should we verify the data read back from the file? (slow) static final boolean verifyData = true; static final byte[] pattern = { 'D', 'E', 'A', 'D', 'B', 'E', 'E', 'F'}; - static final boolean simulatedStorage = false; // creates a file static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl, @@ -177,9 +175,6 @@ public void runTest(final long blockSize) throws IOException { final long fileSize = blockSize + 1L; Configuration conf = new Configuration(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java index a0690039093..f4fbebcfc29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java @@ -97,7 +97,6 @@ public void before() { static final long seed = 0xDEADBEEFL; static final int blockSize = 5120; - final boolean simulatedStorage = false; // creates a file but does not close it static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) @@ -268,9 +267,6 @@ public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size, conf.setBoolean( HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); } - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); FileSystem fs = cluster.getFileSystem(); @@ -399,9 +395,6 @@ public void testSkipWithVerifyChecksum() throws IOException { new File(sockDir.getDir(), "testSkipWithVerifyChecksum._PORT.sock").getAbsolutePath()); DomainSocket.disableBindPathValidation(); - if (simulatedStorage) { - SimulatedFSDataset.setFactory(conf); - } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); FileSystem fs = cluster.getFileSystem();