HDFS-10915. Fix time measurement bug in TestDatanodeRestart. Contributed by Xiaobing Zhou
This commit is contained in:
parent
9c9736463b
commit
6437ba18c5
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.util.Time;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -172,17 +173,17 @@ public class TestDatanodeRestart {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
||||||
start = System.currentTimeMillis();
|
start = Time.monotonicNow();
|
||||||
FileSystem fileSys = cluster.getFileSystem();
|
FileSystem fileSys = cluster.getFileSystem();
|
||||||
try {
|
try {
|
||||||
DFSTestUtil.createFile(fileSys, file, 10240L, (short)1, 0L);
|
DFSTestUtil.createFile(fileSys, file, 10240L, (short)1, 0L);
|
||||||
// It is a bug if this does not fail.
|
// It is a bug if this does not fail.
|
||||||
throw new IOException("Did not fail!");
|
throw new IOException("Did not fail!");
|
||||||
} catch (org.apache.hadoop.ipc.RemoteException e) {
|
} catch (org.apache.hadoop.ipc.RemoteException e) {
|
||||||
long elapsed = System.currentTimeMillis() - start;
|
long elapsed = Time.monotonicNow() - start;
|
||||||
// timers have at-least semantics, so it should be at least 5 seconds.
|
// timers have at-least semantics, so it should be at least 5 seconds.
|
||||||
if (elapsed < 5000 || elapsed > 10000) {
|
if (elapsed < 5000 || elapsed > 10000) {
|
||||||
throw new IOException(elapsed + " seconds passed.", e);
|
throw new IOException(elapsed + " milliseconds passed.", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DataNodeFaultInjector.set(oldDnInjector);
|
DataNodeFaultInjector.set(oldDnInjector);
|
||||||
|
@ -195,18 +196,18 @@ public class TestDatanodeRestart {
|
||||||
// back to simulating unregistered node.
|
// back to simulating unregistered node.
|
||||||
DataNodeFaultInjector.set(dnFaultInjector);
|
DataNodeFaultInjector.set(dnFaultInjector);
|
||||||
byte[] buffer = new byte[8];
|
byte[] buffer = new byte[8];
|
||||||
start = System.currentTimeMillis();
|
start = Time.monotonicNow();
|
||||||
try {
|
try {
|
||||||
fileSys.open(file).read(0L, buffer, 0, 1);
|
fileSys.open(file).read(0L, buffer, 0, 1);
|
||||||
throw new IOException("Did not fail!");
|
throw new IOException("Did not fail!");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
long elapsed = System.currentTimeMillis() - start;
|
long elapsed = Time.monotonicNow() - start;
|
||||||
if (e.getMessage().contains("readBlockLength")) {
|
if (e.getMessage().contains("readBlockLength")) {
|
||||||
throw new IOException("Failed, but with unexpected exception:", e);
|
throw new IOException("Failed, but with unexpected exception:", e);
|
||||||
}
|
}
|
||||||
// timers have at-least semantics, so it should be at least 5 seconds.
|
// timers have at-least semantics, so it should be at least 5 seconds.
|
||||||
if (elapsed < 5000 || elapsed > 10000) {
|
if (elapsed < 5000 || elapsed > 10000) {
|
||||||
throw new IOException(elapsed + " seconds passed.", e);
|
throw new IOException(elapsed + " milliseconds passed.", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DataNodeFaultInjector.set(oldDnInjector);
|
DataNodeFaultInjector.set(oldDnInjector);
|
||||||
|
|
Loading…
Reference in New Issue