HDFS-9773. Remove dead code related to SimulatedFSDataset in tests. Contributed by Brahma Reddy Battula.

(cherry picked from commit b0738ae673)
(cherry picked from commit 5e78264cde)
This commit is contained in:
Akira Ajisaka 2016-02-15 15:35:06 +09:00
parent bb02c3c803
commit 4d1c23f55c
6 changed files with 4 additions and 47 deletions

View File

@ -913,6 +913,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9797. Log Standby exceptions thrown by RequestHedgingProxyProvider HDFS-9797. Log Standby exceptions thrown by RequestHedgingProxyProvider
at DEBUG Level (Inigo Goiri via asuresh) at DEBUG Level (Inigo Goiri via asuresh)
HDFS-9773. Remove dead code related to SimulatedFSDataset in tests.
(Brahma Reddy Battula via aajisaka)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -59,8 +58,6 @@ import org.junit.Test;
public class TestFileAppend{ public class TestFileAppend{
private static final long RANDOM_TEST_RUNTIME = 10000; private static final long RANDOM_TEST_RUNTIME = 10000;
final boolean simulatedStorage = false;
private static byte[] fileContents = null; private static byte[] fileContents = null;
// //
@ -101,13 +98,7 @@ public class TestFileAppend{
} }
byte[] expected = byte[] expected =
new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE]; new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
if (simulatedStorage) { System.arraycopy(fileContents, 0, expected, 0, expected.length);
LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(),
0, AppendTestUtil.FILE_SIZE);
DFSTestUtil.fillExpectedBuf(lbs, expected);
} else {
System.arraycopy(fileContents, 0, expected, 0, expected.length);
}
// do a sanity check. Read the file // do a sanity check. Read the file
// do not check file status since the file is not yet closed. // do not check file status since the file is not yet closed.
AppendTestUtil.checkFullFile(fileSys, name, AppendTestUtil.checkFullFile(fileSys, name,
@ -118,9 +109,6 @@ public class TestFileAppend{
@Test @Test
public void testBreakHardlinksIfNeeded() throws IOException { public void testBreakHardlinksIfNeeded() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
InetSocketAddress addr = new InetSocketAddress("localhost", InetSocketAddress addr = new InetSocketAddress("localhost",
@ -186,9 +174,6 @@ public class TestFileAppend{
@Test @Test
public void testSimpleFlush() throws IOException { public void testSimpleFlush() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();
@ -242,9 +227,6 @@ public class TestFileAppend{
@Test @Test
public void testComplexFlush() throws IOException { public void testComplexFlush() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();
@ -293,9 +275,6 @@ public class TestFileAppend{
@Test(expected = FileNotFoundException.class) @Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException { public void testFileNotFound() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
try { try {

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -59,7 +58,6 @@ public class TestFileAppend2 {
} }
static final int numBlocks = 5; static final int numBlocks = 5;
final boolean simulatedStorage = false;
private byte[] fileContents = null; private byte[] fileContents = null;
@ -81,9 +79,6 @@ public class TestFileAppend2 {
@Test @Test
public void testSimpleAppend() throws IOException { public void testSimpleAppend() throws IOException {
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@ -233,9 +228,6 @@ public class TestFileAppend2 {
@Test @Test
public void testSimpleAppend2() throws Exception { public void testSimpleAppend2() throws Exception {
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

View File

@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
@ -67,7 +66,6 @@ public class TestFileAppend4 {
MiniDFSCluster cluster; MiniDFSCluster cluster;
Path file1; Path file1;
FSDataOutputStream stm; FSDataOutputStream stm;
final boolean simulatedStorage = false;
{ {
DFSTestUtil.setNameNodeLogLevel(Level.ALL); DFSTestUtil.setNameNodeLogLevel(Level.ALL);
@ -78,9 +76,6 @@ public class TestFileAppend4 {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
this.conf = new Configuration(); this.conf = new Configuration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// lower heartbeat interval for fast recognition of DN death // lower heartbeat interval for fast recognition of DN death
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,

View File

@ -30,7 +30,6 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.junit.Test; import org.junit.Test;
/** /**
@ -51,7 +50,6 @@ public class TestLargeBlock {
// should we verify the data read back from the file? (slow) // should we verify the data read back from the file? (slow)
static final boolean verifyData = true; static final boolean verifyData = true;
static final byte[] pattern = { 'D', 'E', 'A', 'D', 'B', 'E', 'E', 'F'}; static final byte[] pattern = { 'D', 'E', 'A', 'D', 'B', 'E', 'E', 'F'};
static final boolean simulatedStorage = false;
// creates a file // creates a file
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl, static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl,
@ -177,9 +175,6 @@ public class TestLargeBlock {
final long fileSize = blockSize + 1L; final long fileSize = blockSize + 1L;
Configuration conf = new Configuration(); Configuration conf = new Configuration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
try { try {

View File

@ -97,7 +97,6 @@ public class TestShortCircuitLocalRead {
static final long seed = 0xDEADBEEFL; static final long seed = 0xDEADBEEFL;
static final int blockSize = 5120; static final int blockSize = 5120;
final boolean simulatedStorage = false;
// creates a file but does not close it // creates a file but does not close it
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
@ -268,9 +267,6 @@ public class TestShortCircuitLocalRead {
conf.setBoolean( conf.setBoolean(
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
} }
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build(); .format(true).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
@ -399,9 +395,6 @@ public class TestShortCircuitLocalRead {
new File(sockDir.getDir(), new File(sockDir.getDir(),
"testSkipWithVerifyChecksum._PORT.sock").getAbsolutePath()); "testSkipWithVerifyChecksum._PORT.sock").getAbsolutePath());
DomainSocket.disableBindPathValidation(); DomainSocket.disableBindPathValidation();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build(); .format(true).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();