HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks fails using IBM java (Ayappan via aw)
This commit is contained in:
parent
b01d3433ae
commit
dbc9b6433e
|
@ -306,6 +306,9 @@ Trunk (Unreleased)
|
|||
HDFS-7803. Wrong command mentioned in HDFSHighAvailabilityWithQJM
|
||||
documentation (Arshad Mohammad via aw)
|
||||
|
||||
HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks
|
||||
fails using IBM java (Ayappan via aw)
|
||||
|
||||
Release 2.7.0 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -251,6 +251,12 @@ public class DFSTestUtil {
|
|||
public void createFiles(FileSystem fs, String topdir) throws IOException {
|
||||
createFiles(fs, topdir, (short)3);
|
||||
}
|
||||
|
||||
public static byte[] readFileAsBytes(FileSystem fs, Path fileName) throws IOException {
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||
IOUtils.copyBytes(fs.open(fileName), os, 1024, true);
|
||||
return os.toByteArray();
|
||||
}
|
||||
|
||||
/** create nFiles with random names and directory hierarchies
|
||||
* with random (but reproducible) data in them.
|
||||
|
@ -723,6 +729,12 @@ public class DFSTestUtil {
|
|||
return b.toString();
|
||||
}
|
||||
|
||||
public static byte[] readFileAsBytes(File f) throws IOException {
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||
IOUtils.copyBytes(new FileInputStream(f), os, 1024, true);
|
||||
return os.toByteArray();
|
||||
}
|
||||
|
||||
/* Write the given string to the given file */
|
||||
public static void writeFile(FileSystem fs, Path p, String s)
|
||||
throws IOException {
|
||||
|
|
|
@ -1869,6 +1869,16 @@ public class MiniDFSCluster {
|
|||
return null;
|
||||
}
|
||||
|
||||
public byte[] readBlockOnDataNodeAsBytes(int i, ExtendedBlock block)
|
||||
throws IOException {
|
||||
assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i;
|
||||
File blockFile = getBlockFile(i, block);
|
||||
if (blockFile != null && blockFile.exists()) {
|
||||
return DFSTestUtil.readFileAsBytes(blockFile);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Corrupt a block on a particular datanode.
|
||||
*
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
|
@ -202,7 +203,7 @@ public class TestBlocksWithNotEnoughRacks {
|
|||
final FileSystem fs = cluster.getFileSystem();
|
||||
|
||||
DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L);
|
||||
final String fileContent = DFSTestUtil.readFile(fs, filePath);
|
||||
final byte[] fileContent = DFSTestUtil.readFileAsBytes(fs, filePath);
|
||||
|
||||
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
|
||||
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
|
||||
|
@ -224,9 +225,9 @@ public class TestBlocksWithNotEnoughRacks {
|
|||
// Ensure all replicas are valid (the corrupt replica may not
|
||||
// have been cleaned up yet).
|
||||
for (int i = 0; i < racks.length; i++) {
|
||||
String blockContent = cluster.readBlockOnDataNode(i, b);
|
||||
byte[] blockContent = cluster.readBlockOnDataNodeAsBytes(i, b);
|
||||
if (blockContent != null && i != dnToCorrupt) {
|
||||
assertEquals("Corrupt replica", fileContent, blockContent);
|
||||
assertArrayEquals("Corrupt replica", fileContent, blockContent);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
|
Loading…
Reference in New Issue