HDFS-6453. Use Time#monotonicNow to avoid system clock reset. Contributed by Liang Xie.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1598145 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d25d41d49b
commit
e82172c455
|
@ -289,6 +289,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6448. BlockReaderLocalLegacy should set socket timeout based on
|
||||
conf.socketTimeout (liangxie via cmccabe)
|
||||
|
||||
HDFS-6453. Use Time#monotonicNow to avoid system clock reset.
|
||||
(Liang Xie via wang)
|
||||
|
||||
Release 2.4.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -2588,7 +2588,7 @@ public class DataNode extends Configured
|
|||
return;
|
||||
}
|
||||
synchronized(checkDiskErrorMutex) {
|
||||
lastDiskErrorCheck = System.currentTimeMillis();
|
||||
lastDiskErrorCheck = Time.monotonicNow();
|
||||
}
|
||||
}
|
||||
try {
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.*;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
|
||||
class FsVolumeList {
|
||||
|
@ -97,7 +98,7 @@ class FsVolumeList {
|
|||
}
|
||||
|
||||
void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap) throws IOException {
|
||||
long totalStartTime = System.currentTimeMillis();
|
||||
long totalStartTime = Time.monotonicNow();
|
||||
final List<IOException> exceptions = Collections.synchronizedList(
|
||||
new ArrayList<IOException>());
|
||||
List<Thread> replicaAddingThreads = new ArrayList<Thread>();
|
||||
|
@ -107,9 +108,9 @@ class FsVolumeList {
|
|||
try {
|
||||
FsDatasetImpl.LOG.info("Adding replicas to map for block pool " +
|
||||
bpid + " on volume " + v + "...");
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = Time.monotonicNow();
|
||||
v.getVolumeMap(bpid, volumeMap);
|
||||
long timeTaken = System.currentTimeMillis() - startTime;
|
||||
long timeTaken = Time.monotonicNow() - startTime;
|
||||
FsDatasetImpl.LOG.info("Time to add replicas to map for block pool"
|
||||
+ " " + bpid + " on volume " + v + ": " + timeTaken + "ms");
|
||||
} catch (IOException ioe) {
|
||||
|
@ -132,7 +133,7 @@ class FsVolumeList {
|
|||
if (!exceptions.isEmpty()) {
|
||||
throw exceptions.get(0);
|
||||
}
|
||||
long totalTimeTaken = System.currentTimeMillis() - totalStartTime;
|
||||
long totalTimeTaken = Time.monotonicNow() - totalStartTime;
|
||||
FsDatasetImpl.LOG.info("Total time to add all replicas to map: "
|
||||
+ totalTimeTaken + "ms");
|
||||
}
|
||||
|
@ -141,9 +142,9 @@ class FsVolumeList {
|
|||
throws IOException {
|
||||
FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + bpid +
|
||||
" on volume " + volume + "...");
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = Time.monotonicNow();
|
||||
volume.getVolumeMap(bpid, volumeMap);
|
||||
long timeTaken = System.currentTimeMillis() - startTime;
|
||||
long timeTaken = Time.monotonicNow() - startTime;
|
||||
FsDatasetImpl.LOG.info("Time to add replicas to map for block pool " + bpid +
|
||||
" on volume " + volume + ": " + timeTaken + "ms");
|
||||
}
|
||||
|
@ -195,7 +196,7 @@ class FsVolumeList {
|
|||
|
||||
|
||||
void addBlockPool(final String bpid, final Configuration conf) throws IOException {
|
||||
long totalStartTime = System.currentTimeMillis();
|
||||
long totalStartTime = Time.monotonicNow();
|
||||
|
||||
final List<IOException> exceptions = Collections.synchronizedList(
|
||||
new ArrayList<IOException>());
|
||||
|
@ -206,9 +207,9 @@ class FsVolumeList {
|
|||
try {
|
||||
FsDatasetImpl.LOG.info("Scanning block pool " + bpid +
|
||||
" on volume " + v + "...");
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = Time.monotonicNow();
|
||||
v.addBlockPool(bpid, conf);
|
||||
long timeTaken = System.currentTimeMillis() - startTime;
|
||||
long timeTaken = Time.monotonicNow() - startTime;
|
||||
FsDatasetImpl.LOG.info("Time taken to scan block pool " + bpid +
|
||||
" on " + v + ": " + timeTaken + "ms");
|
||||
} catch (IOException ioe) {
|
||||
|
@ -232,7 +233,7 @@ class FsVolumeList {
|
|||
throw exceptions.get(0);
|
||||
}
|
||||
|
||||
long totalTimeTaken = System.currentTimeMillis() - totalStartTime;
|
||||
long totalTimeTaken = Time.monotonicNow() - totalStartTime;
|
||||
FsDatasetImpl.LOG.info("Total time to scan all replicas for block pool " +
|
||||
bpid + ": " + totalTimeTaken + "ms");
|
||||
}
|
||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
|||
import org.apache.hadoop.io.MD5Hash;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
import org.apache.hadoop.io.compress.CompressorStream;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
@ -160,13 +161,13 @@ public final class FSImageFormatProtobuf {
|
|||
}
|
||||
|
||||
void load(File file) throws IOException {
|
||||
long start = System.currentTimeMillis();
|
||||
long start = Time.monotonicNow();
|
||||
imgDigest = MD5FileUtils.computeMd5ForFile(file);
|
||||
RandomAccessFile raFile = new RandomAccessFile(file, "r");
|
||||
FileInputStream fin = new FileInputStream(file);
|
||||
try {
|
||||
loadInternal(raFile, fin);
|
||||
long end = System.currentTimeMillis();
|
||||
long end = Time.monotonicNow();
|
||||
LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds.");
|
||||
} finally {
|
||||
fin.close();
|
||||
|
|
|
@ -159,7 +159,7 @@ public class TransferFsImage {
|
|||
}
|
||||
}
|
||||
|
||||
final long milliTime = System.currentTimeMillis();
|
||||
final long milliTime = Time.monotonicNow();
|
||||
String tmpFileName = NNStorage.getTemporaryEditsFileName(
|
||||
log.getStartTxId(), log.getEndTxId(), milliTime);
|
||||
List<File> tmpFiles = dstStorage.getFiles(NameNodeDirType.EDITS,
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
|||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
@ -215,6 +216,6 @@ public class TestDiskError {
|
|||
dataNode.checkDiskError();
|
||||
Thread.sleep(dataNode.checkDiskErrorInterval);
|
||||
long lastDiskErrorCheck = dataNode.getLastDiskErrorCheck();
|
||||
assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms", ((System.currentTimeMillis()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
|
||||
assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms", ((Time.monotonicNow()-lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue