HDFS-9773. Remove dead code related to SimulatedFSDataset in tests. Contributed by Brahma Reddy Battula.
This commit is contained in:
parent
ac5da11399
commit
b0738ae673
|
@ -1925,6 +1925,9 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-9797. Log Standby exceptions thrown by RequestHedgingProxyProvider
|
||||
at DEBUG Level (Inigo Goiri via asuresh)
|
||||
|
||||
HDFS-9773. Remove dead code related to SimulatedFSDataset in tests.
|
||||
(Brahma Reddy Battula via aajisaka)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
@ -59,8 +58,6 @@ import org.junit.Test;
|
|||
public class TestFileAppend{
|
||||
private static final long RANDOM_TEST_RUNTIME = 10000;
|
||||
|
||||
final boolean simulatedStorage = false;
|
||||
|
||||
private static byte[] fileContents = null;
|
||||
|
||||
//
|
||||
|
@ -101,13 +98,7 @@ public class TestFileAppend{
|
|||
}
|
||||
byte[] expected =
|
||||
new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
|
||||
if (simulatedStorage) {
|
||||
LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(),
|
||||
0, AppendTestUtil.FILE_SIZE);
|
||||
DFSTestUtil.fillExpectedBuf(lbs, expected);
|
||||
} else {
|
||||
System.arraycopy(fileContents, 0, expected, 0, expected.length);
|
||||
}
|
||||
System.arraycopy(fileContents, 0, expected, 0, expected.length);
|
||||
// do a sanity check. Read the file
|
||||
// do not check file status since the file is not yet closed.
|
||||
AppendTestUtil.checkFullFile(fileSys, name,
|
||||
|
@ -118,9 +109,6 @@ public class TestFileAppend{
|
|||
@Test
|
||||
public void testBreakHardlinksIfNeeded() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
InetSocketAddress addr = new InetSocketAddress("localhost",
|
||||
|
@ -186,9 +174,6 @@ public class TestFileAppend{
|
|||
@Test
|
||||
public void testSimpleFlush() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
|
@ -242,9 +227,6 @@ public class TestFileAppend{
|
|||
@Test
|
||||
public void testComplexFlush() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
|
@ -293,9 +275,6 @@ public class TestFileAppend{
|
|||
@Test(expected = FileNotFoundException.class)
|
||||
public void testFileNotFound() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
try {
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
|||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -59,7 +58,6 @@ public class TestFileAppend2 {
|
|||
}
|
||||
|
||||
static final int numBlocks = 5;
|
||||
final boolean simulatedStorage = false;
|
||||
|
||||
private byte[] fileContents = null;
|
||||
|
||||
|
@ -81,9 +79,6 @@ public class TestFileAppend2 {
|
|||
@Test
|
||||
public void testSimpleAppend() throws IOException {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
|
||||
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
|
@ -233,9 +228,6 @@ public class TestFileAppend2 {
|
|||
@Test
|
||||
public void testSimpleAppend2() throws Exception {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
|
||||
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
|
||||
|
@ -67,7 +66,6 @@ public class TestFileAppend4 {
|
|||
MiniDFSCluster cluster;
|
||||
Path file1;
|
||||
FSDataOutputStream stm;
|
||||
final boolean simulatedStorage = false;
|
||||
|
||||
{
|
||||
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
|
||||
|
@ -78,9 +76,6 @@ public class TestFileAppend4 {
|
|||
@Before
|
||||
public void setUp() throws Exception {
|
||||
this.conf = new Configuration();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
|
||||
// lower heartbeat interval for fast recognition of DN death
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -51,7 +50,6 @@ public class TestLargeBlock {
|
|||
// should we verify the data read back from the file? (slow)
|
||||
static final boolean verifyData = true;
|
||||
static final byte[] pattern = { 'D', 'E', 'A', 'D', 'B', 'E', 'E', 'F'};
|
||||
static final boolean simulatedStorage = false;
|
||||
|
||||
// creates a file
|
||||
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl,
|
||||
|
@ -177,9 +175,6 @@ public class TestLargeBlock {
|
|||
final long fileSize = blockSize + 1L;
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
try {
|
||||
|
|
|
@ -97,7 +97,6 @@ public class TestShortCircuitLocalRead {
|
|||
|
||||
static final long seed = 0xDEADBEEFL;
|
||||
static final int blockSize = 5120;
|
||||
final boolean simulatedStorage = false;
|
||||
|
||||
// creates a file but does not close it
|
||||
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
|
||||
|
@ -268,9 +267,6 @@ public class TestShortCircuitLocalRead {
|
|||
conf.setBoolean(
|
||||
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
||||
}
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||
.format(true).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
@ -399,9 +395,6 @@ public class TestShortCircuitLocalRead {
|
|||
new File(sockDir.getDir(),
|
||||
"testSkipWithVerifyChecksum._PORT.sock").getAbsolutePath());
|
||||
DomainSocket.disableBindPathValidation();
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||
.format(true).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
|
Loading…
Reference in New Issue