HDFS-9238. Update TestFileCreation.testLeaseExpireHardLimit() to avoid using DataNodeTestUtils.getFile(). (Tony Wu via lei)

This commit is contained in:
Lei Xu 2015-10-14 13:10:26 -07:00
parent 0e4fb329ed
commit 8651677446
2 changed files with 6 additions and 11 deletions

View File

@ -694,6 +694,9 @@ Release 2.8.0 - UNRELEASED
HDFS-1172. Blocks in newly completed files are considered under-replicated HDFS-1172. Blocks in newly completed files are considered under-replicated
too quickly. (Masatake Iwasaki via jing9) too quickly. (Masatake Iwasaki via jing9)
HDFS-9238. Update TestFileCreation.testLeaseExpireHardLimit() to avoid using
DataNodeTestUtils.getFile(). (Tony Wu via lei)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -40,10 +40,9 @@ import static org.junit.Assume.assumeTrue;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException; import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.reflect.Field; import java.lang.reflect.Field;
import java.lang.reflect.Modifier; import java.lang.reflect.Modifier;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
@ -66,7 +65,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -1000,15 +998,9 @@ public class TestFileCreation {
for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) { for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort()); DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk = locatedblock.getBlock(); ExtendedBlock blk = locatedblock.getBlock();
Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock( try (BufferedReader in = new BufferedReader(new InputStreamReader(
blk.getBlockPoolId(), blk.getBlockId()); datanode.getFSDataset().getBlockInputStream(blk, 0)))) {
final File blockfile = DataNodeTestUtils.getFile(datanode,
blk.getBlockPoolId(), b.getBlockId());
System.out.println("blockfile=" + blockfile);
if (blockfile != null) {
BufferedReader in = new BufferedReader(new FileReader(blockfile));
assertEquals("something", in.readLine()); assertEquals("something", in.readLine());
in.close();
successcount++; successcount++;
} }
} }