diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java index 5c7bef2104c..8ff922336e7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java @@ -142,7 +142,7 @@ public class Threads { * @param msToWait the amount of time to sleep in milliseconds */ public static void sleepWithoutInterrupt(final long msToWait) { - long timeMillis = System.currentTimeMillis(); + long timeMillis = EnvironmentEdgeManager.currentTimeMillis(); long endTime = timeMillis + msToWait; boolean interrupted = false; while (timeMillis < endTime) { @@ -151,7 +151,7 @@ public class Threads { } catch (InterruptedException ex) { interrupted = true; } - timeMillis = System.currentTimeMillis(); + timeMillis = EnvironmentEdgeManager.currentTimeMillis(); } if (interrupted) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java index 692e3fc8f28..ca5bb7062d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Chore.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.Sleeper; @@ -60,7 +61,7 @@ public abstract class Chore extends HasThread { try { boolean initialChoreComplete = false; while (!this.stopper.isStopped()) { - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); try { if (!initialChoreComplete) { initialChoreComplete = initialChore(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index fe718f69311..25856c1e26c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.JenkinsHash; import org.apache.hadoop.hbase.util.MD5Hash; @@ -261,7 +262,7 @@ public class HRegionInfo implements Comparable { public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey, final boolean split) throws IllegalArgumentException { - this(tableName, startKey, endKey, split, System.currentTimeMillis()); + this(tableName, startKey, endKey, split, EnvironmentEdgeManager.currentTimeMillis()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java index cb2d9c3a2e3..f48951ad4b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HServerInfo.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.InetSocketAddress; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.VersionedWritable; import org.apache.hadoop.io.WritableComparable; @@ -56,7 +57,7 @@ implements WritableComparable { * @param webuiport Port the webui runs on. */ public HServerInfo(final HServerAddress serverAddress, final int webuiport) { - this(serverAddress, System.currentTimeMillis(), webuiport); + this(serverAddress, EnvironmentEdgeManager.currentTimeMillis(), webuiport); } public HServerInfo(HServerAddress serverAddress, long startCode, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java index ccc22fd47e6..862fecc6d02 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionTransition.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; @@ -108,7 +109,7 @@ public class RegionTransition { ZooKeeperProtos.RegionTransition.Builder builder = ZooKeeperProtos.RegionTransition.newBuilder(). setEventTypeCode(type.getCode()).setRegionName(ByteString.copyFrom(regionName)). setOriginServerName(pbsn); - builder.setCreateTime(System.currentTimeMillis()); + builder.setCreateTime(EnvironmentEdgeManager.currentTimeMillis()); if (payload != null) builder.setPayload(ByteString.copyFrom(payload)); return new RegionTransition(builder.build()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index 8a383e4b91f..d4bde3a615c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.MetaNodeTracker; import org.apache.hadoop.hbase.zookeeper.RootRegionTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -470,10 +471,10 @@ public class CatalogTracker { */ public ServerName waitForMeta(long timeout) throws InterruptedException, IOException, NotAllMetaRegionsOnlineException { - long stop = System.currentTimeMillis() + timeout; + long stop = EnvironmentEdgeManager.currentTimeMillis() + timeout; long waitTime = Math.min(50, timeout); synchronized (metaAvailable) { - while(!stopped && (timeout == 0 || System.currentTimeMillis() < stop)) { + while(!stopped && (timeout == 0 || EnvironmentEdgeManager.currentTimeMillis() < stop)) { if (getMetaServerConnection() != null) { return metaLocation; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 088753c52fb..198f2bd03fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.DataOutputBuffer; /** @@ -97,7 +98,7 @@ public class ClientScanner extends AbstractClientScanner { } this.scan = scan; this.tableName = tableName; - this.lastNext = System.currentTimeMillis(); + this.lastNext = EnvironmentEdgeManager.currentTimeMillis(); this.connection = connection; if (scan.getMaxResultSize() > 0) { this.maxScannerResultSize = scan.getMaxResultSize(); @@ -285,8 +286,8 @@ public class ClientScanner extends AbstractClientScanner { // If we are over the timeout, throw this exception to the client // Else, it's because the region moved and we used the old id // against the new region server; reset the scanner. - if (timeout < System.currentTimeMillis()) { - long elapsed = System.currentTimeMillis() - lastNext; + if (timeout < EnvironmentEdgeManager.currentTimeMillis()) { + long elapsed = EnvironmentEdgeManager.currentTimeMillis() - lastNext; ScannerTimeoutException ex = new ScannerTimeoutException( elapsed + "ms passed since the last invocation, " + "timeout is currently set to " + scannerTimeout); @@ -313,7 +314,7 @@ public class ClientScanner extends AbstractClientScanner { callable = null; continue; } - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); if (this.scanMetrics != null ) { this.scanMetrics.sumOfMillisSecBetweenNexts.inc(currentTime-lastNext); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 86ee32b6239..c4cc37a9674 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -54,7 +54,7 @@ import java.util.Map; * Specifying timestamps, deleteFamily and deleteColumns will delete all * versions with a timestamp less than or equal to that passed. If no * timestamp is specified, an entry is added with a timestamp of 'now' - * where 'now' is the servers's System.currentTimeMillis(). + * where 'now' is the servers's EnvironmentEdgeManager.currentTimeMillis(). * Specifying a timestamp to the deleteColumn method will * delete versions only with a timestamp equal to that specified. * If no timestamp is passed to deleteColumn, internally, it figures the diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index ab2ab19416c..a6feb508bc3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDe import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.SoftValueSortedMap; import org.apache.hadoop.hbase.util.Triple; @@ -1446,7 +1447,7 @@ public class HConnectionManager { --keepAliveZookeeperUserCount; if (keepAliveZookeeperUserCount <=0 ){ keepZooKeeperWatcherAliveUntil = - System.currentTimeMillis() + keepAlive; + EnvironmentEdgeManager.currentTimeMillis() + keepAlive; } } } @@ -1484,7 +1485,7 @@ public class HConnectionManager { } protected void closeMasterProtocol(MasterProtocolState protocolState) { - if (System.currentTimeMillis() > protocolState.keepAliveUntil) { + if (EnvironmentEdgeManager.currentTimeMillis() > protocolState.keepAliveUntil) { hci.closeMasterProtocol(protocolState); protocolState.keepAliveUntil = Long.MAX_VALUE; } @@ -1494,7 +1495,7 @@ public class HConnectionManager { protected void chore() { synchronized (hci.masterAndZKLock) { if (hci.canCloseZKW) { - if (System.currentTimeMillis() > + if (EnvironmentEdgeManager.currentTimeMillis() > hci.keepZooKeeperWatcherAliveUntil) { hci.closeZooKeeperWatcher(); @@ -1659,7 +1660,7 @@ public class HConnectionManager { --protocolState.userCount; if (protocolState.userCount <= 0) { protocolState.keepAliveUntil = - System.currentTimeMillis() + keepAlive; + EnvironmentEdgeManager.currentTimeMillis() + keepAlive; } } } @@ -2096,12 +2097,12 @@ public class HConnectionManager { final Callable delegate = hci.createCallable(loc, multi, tableName); return new Callable() { - private final long creationTime = System.currentTimeMillis(); + private final long creationTime = EnvironmentEdgeManager.currentTimeMillis(); @Override public MultiResponse call() throws Exception { try { - final long waitingTime = delay + creationTime - System.currentTimeMillis(); + final long waitingTime = delay + creationTime - EnvironmentEdgeManager.currentTimeMillis(); if (waitingTime > 0) { Thread.sleep(waitingTime); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index eba32a738fe..baab481a895 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.PairOfSameType; /** @@ -409,7 +410,7 @@ public class MetaScanner { HRegionInfo splitB = daughters.getSecond(); HTable metaTable = getMetaTable(); - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTimeMillis(); Result resultA = getRegionResultBlocking(metaTable, blockingTimeout, splitA.getRegionName()); if (resultA != null) { @@ -419,7 +420,7 @@ public class MetaScanner { throw new RegionOfflineException("Split daughter region " + splitA.getRegionNameAsString() + " cannot be found in META."); } - long rem = blockingTimeout - (System.currentTimeMillis() - start); + long rem = blockingTimeout - (EnvironmentEdgeManager.currentTimeMillis() - start); Result resultB = getRegionResultBlocking(metaTable, rem, splitB.getRegionName()); @@ -440,8 +441,8 @@ public class MetaScanner { if (LOG.isDebugEnabled()) { LOG.debug("blocking until region is in META: " + Bytes.toStringBinary(regionName)); } - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start < timeout) { + long start = EnvironmentEdgeManager.currentTimeMillis(); + while (EnvironmentEdgeManager.currentTimeMillis() - start < timeout) { Get get = new Get(regionName); Result result = metaTable.get(get); HRegionInfo info = getHRegionInfo(result); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 7f1efc6d91a..d8ade8bc770 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.DNS; @@ -141,10 +142,10 @@ public class ScannerCallable extends ServerCallable { RequestConverter.buildScanRequest(scannerId, caching, false); try { ScanResponse response = server.scan(null, request); - long timestamp = System.currentTimeMillis(); + long timestamp = EnvironmentEdgeManager.currentTimeMillis(); rrs = ResponseConverter.getResults(response); if (logScannerActivity) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); if (now - timestamp > logCutOffLatency) { int rows = rrs == null ? 0 : rrs.length; LOG.info("Took " + (now-timestamp) + "ms to fetch " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java index 47f699a0280..660716a6731 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.client.ClientProtocol; import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.ipc.RemoteException; import com.google.protobuf.ServiceException; @@ -114,12 +115,12 @@ public abstract class ServerCallable implements Callable { public void beforeCall() { HBaseRPC.setRpcTimeout(this.callTimeout); - this.startTime = System.currentTimeMillis(); + this.startTime = EnvironmentEdgeManager.currentTimeMillis(); } public void afterCall() { HBaseRPC.resetRpcTimeout(); - this.endTime = System.currentTimeMillis(); + this.endTime = EnvironmentEdgeManager.currentTimeMillis(); } public void shouldRetry(Throwable throwable) throws IOException { @@ -182,7 +183,7 @@ public abstract class ServerCallable implements Callable { } RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(t, - System.currentTimeMillis(), toString()); + EnvironmentEdgeManager.currentTimeMillis(), toString()); exceptions.add(qt); if (tries == numRetries - 1) { throw new RetriesExhaustedException(tries, exceptions); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 49d4c0fd34c..63b5cf70709 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.CoprocessorProtocol; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet; import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.Server; @@ -187,7 +188,7 @@ public abstract class CoprocessorHost { FileSystem fs = path.getFileSystem(HBaseConfiguration.create()); Path dst = new Path(System.getProperty("java.io.tmpdir") + java.io.File.separator +"." + pathPrefix + - "." + className + "." + System.currentTimeMillis() + ".jar"); + "." + className + "." + EnvironmentEdgeManager.currentTimeMillis() + ".jar"); fs.copyToLocalFile(path, dst); File tmpLocal = new File(dst.toString()); tmpLocal.deleteOnExit(); @@ -213,7 +214,7 @@ public abstract class CoprocessorHost { if (entry.getName().matches("/lib/[^/]+\\.jar")) { File file = new File(System.getProperty("java.io.tmpdir") + java.io.File.separator +"." + pathPrefix + - "." + className + "." + System.currentTimeMillis() + "." + entry.getName().substring(5)); + "." + className + "." + EnvironmentEdgeManager.currentTimeMillis() + "." + entry.getName().substring(5)); IOUtils.copyBytes(jarFile.getInputStream(entry), new FileOutputStream(file), conf, true); file.deleteOnExit(); paths.add(file.toURL()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java index 379982ebc9e..5e6c03301f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseClient.java @@ -266,7 +266,7 @@ public class HBaseClient { protected Call(RpcRequestBody param) { this.param = param; - this.startTime = System.currentTimeMillis(); + this.startTime = EnvironmentEdgeManager.currentTimeMillis(); synchronized (HBaseClient.this) { this.id = counter++; } @@ -432,7 +432,7 @@ public class HBaseClient { /** Update lastActivity with the current time. */ protected void touch() { - lastActivity.set(System.currentTimeMillis()); + lastActivity.set(EnvironmentEdgeManager.currentTimeMillis()); } /** @@ -604,7 +604,7 @@ public class HBaseClient { protected synchronized boolean waitForWork() { if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { long timeout = maxIdleTime- - (System.currentTimeMillis()-lastActivity.get()); + (EnvironmentEdgeManager.currentTimeMillis()-lastActivity.get()); if (timeout>0) { try { wait(timeout); @@ -634,7 +634,7 @@ public class HBaseClient { * since last I/O activity is equal to or greater than the ping interval */ protected synchronized void sendPing() throws IOException { - long curTime = System.currentTimeMillis(); + long curTime = EnvironmentEdgeManager.currentTimeMillis(); if ( curTime - lastActivity.get() >= pingInterval) { lastActivity.set(curTime); //noinspection SynchronizeOnNonFinalField @@ -1056,7 +1056,7 @@ public class HBaseClient { Iterator> itor = calls.entrySet().iterator(); while (itor.hasNext()) { Call c = itor.next().getValue(); - long waitTime = System.currentTimeMillis() - c.getStartTime(); + long waitTime = EnvironmentEdgeManager.currentTimeMillis() - c.getStartTime(); if (waitTime >= rpcTimeout) { if (this.closeException == null) { // There may be no exception in the case that there are many calls @@ -1080,7 +1080,7 @@ public class HBaseClient { try { if (!calls.isEmpty()) { Call firstCall = calls.get(calls.firstKey()); - long maxWaitTime = System.currentTimeMillis() - firstCall.getStartTime(); + long maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() - firstCall.getStartTime(); if (maxWaitTime < rpcTimeout) { rpcTimeout -= maxWaitTime; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java index 63efc483a9f..98bf9736458 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseRPC.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.ReflectionUtils; @@ -224,7 +225,7 @@ public class HBaseRPC { long timeout ) throws IOException { // HBase does limited number of reconnects which is different from hadoop. - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); IOException ioe; int reconnectAttempts = 0; while (true) { @@ -257,7 +258,7 @@ public class HBaseRPC { } } // check if timed out - if (System.currentTimeMillis() - timeout >= startTime) { + if (EnvironmentEdgeManager.currentTimeMillis() - timeout >= startTime) { throw ioe; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java index 8f0cef1d4c7..d8d9725c065 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHan import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslStatus; import org.apache.hadoop.hbase.util.ByteBufferOutputStream; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Writable; @@ -333,7 +334,7 @@ public abstract class HBaseServer implements RpcServer { this.id = id; this.rpcRequestBody = rpcRequestBody; this.connection = connection; - this.timestamp = System.currentTimeMillis(); + this.timestamp = EnvironmentEdgeManager.currentTimeMillis(); this.response = null; this.delayResponse = false; this.responder = responder; @@ -464,7 +465,7 @@ public abstract class HBaseServer implements RpcServer { @Override public void throwExceptionIfCallerDisconnected() throws CallerDisconnectedException { if (!connection.channel.isOpen()) { - long afterTime = System.currentTimeMillis() - timestamp; + long afterTime = EnvironmentEdgeManager.currentTimeMillis() - timestamp; throw new CallerDisconnectedException( "Aborting call " + this + " after " + afterTime + " ms, since " + "caller disconnected"); @@ -616,7 +617,7 @@ public abstract class HBaseServer implements RpcServer { */ private void cleanupConnections(boolean force) { if (force || numConnections > thresholdIdleConnections) { - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) { return; } @@ -653,7 +654,7 @@ public abstract class HBaseServer implements RpcServer { } else i++; } - lastCleanupRunTime = System.currentTimeMillis(); + lastCleanupRunTime = EnvironmentEdgeManager.currentTimeMillis(); } } @@ -751,7 +752,7 @@ public abstract class HBaseServer implements RpcServer { try { reader.startAdd(); SelectionKey readKey = reader.registerChannel(channel); - c = getConnection(channel, System.currentTimeMillis()); + c = getConnection(channel, EnvironmentEdgeManager.currentTimeMillis()); readKey.attach(c); synchronized (connectionList) { connectionList.add(numConnections, c); @@ -774,7 +775,7 @@ public abstract class HBaseServer implements RpcServer { if (c == null) { return; } - c.setLastContact(System.currentTimeMillis()); + c.setLastContact(EnvironmentEdgeManager.currentTimeMillis()); try { count = c.readAndProcess(); @@ -793,7 +794,7 @@ public abstract class HBaseServer implements RpcServer { // c = null; } else { - c.setLastContact(System.currentTimeMillis()); + c.setLastContact(EnvironmentEdgeManager.currentTimeMillis()); } } @@ -867,7 +868,7 @@ public abstract class HBaseServer implements RpcServer { LOG.info(getName() + ": doAsyncWrite threw exception " + e); } } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); if (now < lastPurgeTime + purgeTimeout) { continue; } @@ -1022,7 +1023,7 @@ public abstract class HBaseServer implements RpcServer { if (inHandler) { // set the serve time when the response has to be sent later - call.timestamp = System.currentTimeMillis(); + call.timestamp = EnvironmentEdgeManager.currentTimeMillis(); if (enqueueInSelector(call)) done = true; } @@ -1070,7 +1071,7 @@ public abstract class HBaseServer implements RpcServer { // void doRespond(Call call) throws IOException { // set the serve time when the response has to be sent later - call.timestamp = System.currentTimeMillis(); + call.timestamp = EnvironmentEdgeManager.currentTimeMillis(); responseQueueLen++; boolean doRegister = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java index 9b42b2daca3..e18da0c5b12 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ProtobufRpcEngine.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Objects; import org.codehaus.jackson.map.ObjectMapper; @@ -169,7 +170,7 @@ class ProtobufRpcEngine implements RpcEngine { throws ServiceException { long startTime = 0; if (LOG.isDebugEnabled()) { - startTime = System.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTimeMillis(); } RpcRequestBody rpcRequest = constructRpcRequest(method, args); @@ -178,7 +179,7 @@ class ProtobufRpcEngine implements RpcEngine { val = client.call(rpcRequest, address, protocol, ticket, rpcTimeout); if (LOG.isDebugEnabled()) { - long callTime = System.currentTimeMillis() - startTime; + long callTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; if (LOG.isTraceEnabled()) LOG.trace("Call: " + method.getName() + " " + callTime); } return val; @@ -350,7 +351,7 @@ class ProtobufRpcEngine implements RpcEngine { throw new HBaseRPC.UnknownProtocolException(protocol); } - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); if (method.getParameterTypes().length == 2) { // RpcController + Message in the method args // (generated code from RPC bits in .proto files have RpcController) @@ -363,7 +364,7 @@ class ProtobufRpcEngine implements RpcEngine { + method.getName() + "]" + ", allowed (at most): 2, Actual: " + method.getParameterTypes().length); } - int processingTime = (int) (System.currentTimeMillis() - startTime); + int processingTime = (int) (EnvironmentEdgeManager.currentTimeMillis() - startTime); int qTime = (int) (startTime-receiveTime); if (TRACELOG.isDebugEnabled()) { TRACELOG.debug("Call #" + CurCall.get().id + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java index 670857783bc..347a67406bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.util.StringUtils; @@ -92,7 +93,7 @@ public class TableRecordReaderImpl { } if (logScannerActivity) { LOG.info("Current scan=" + currentScan.toString()); - timestamp = System.currentTimeMillis(); + timestamp = EnvironmentEdgeManager.currentTimeMillis(); rowcount = 0; } } @@ -197,7 +198,7 @@ public class TableRecordReaderImpl { if (logScannerActivity) { rowcount ++; if (rowcount >= logPerRowCount) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); LOG.info("Mapper took " + (now-timestamp) + "ms to process " + rowcount + " rows"); timestamp = now; @@ -232,7 +233,7 @@ public class TableRecordReaderImpl { return false; } catch (IOException ioe) { if (logScannerActivity) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); LOG.info("Mapper took " + (now-timestamp) + "ms to process " + rowcount + " rows"); LOG.info(ioe); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java index 3cbf328b876..bffef298114 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.WritableUtils; @@ -126,7 +127,7 @@ public class HFileOutputFormat extends FileOutputFormat writers = new TreeMap(Bytes.BYTES_COMPARATOR); private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY; - private final byte [] now = Bytes.toBytes(System.currentTimeMillis()); + private final byte [] now = Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis()); private boolean rollRequested = false; public void write(ImmutableBytesWritable row, KeyValue kv) @@ -213,7 +214,7 @@ public class HFileOutputFormat extends FileOutputFormat maxWaitTime) throw e; LOG.debug("Server is not yet up; waiting up to " + (maxWaitTime - now) + "ms", e); @@ -1338,7 +1339,7 @@ public class AssignmentManager extends ZooKeeperListener { // call to open risks our writing PENDING_OPEN after state has been moved // to OPENING by the regionserver. regionStates.updateRegionState(state.getRegion(), - RegionState.State.PENDING_OPEN, System.currentTimeMillis(), + RegionState.State.PENDING_OPEN, EnvironmentEdgeManager.currentTimeMillis(), destination); this.counter.addAndGet(1); } @@ -1441,7 +1442,7 @@ public class AssignmentManager extends ZooKeeperListener { " to " + plan.getDestination().toString()); // Transition RegionState to PENDING_OPEN regionStates.updateRegionState(state.getRegion(), - RegionState.State.PENDING_OPEN, System.currentTimeMillis(), + RegionState.State.PENDING_OPEN, EnvironmentEdgeManager.currentTimeMillis(), plan.getDestination()); // Send OPEN RPC. This can fail if the server on other end is is not up. // Pass the version that was obtained while setting the node to OFFLINE. @@ -2099,10 +2100,10 @@ public class AssignmentManager extends ZooKeeperListener { // that if it returns without an exception that there was a period of time // with no regions in transition from the point-of-view of the in-memory // state of the Master. - final long endTime = System.currentTimeMillis() + timeout; + final long endTime = EnvironmentEdgeManager.currentTimeMillis() + timeout; while (!this.server.isStopped() && regionStates.isRegionsInTransition() - && endTime > System.currentTimeMillis()) { + && endTime > EnvironmentEdgeManager.currentTimeMillis()) { regionStates.waitForUpdate(100); } @@ -2299,7 +2300,7 @@ public class AssignmentManager extends ZooKeeperListener { * on a frequent interval. */ public void updateRegionsInTransitionMetrics() { - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); int totalRITs = 0; int totalRITsOverThreshold = 0; long oldestRITTime = 0; @@ -2430,7 +2431,7 @@ public class AssignmentManager extends ZooKeeperListener { boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty(); // Iterate all regions in transition checking for time outs - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); // no lock concurrent access ok: we will be working on a copy, and it's java-valid to do // a copy while another thread is adding/removing items for (RegionState regionState : regionStates.getRegionsInTransition().values()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java index 242616c313d..38f3c5ff7e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java @@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Run bulk assign. Does one RCP per regionserver passing a @@ -103,10 +104,10 @@ public class GeneralBulkAssigner extends BulkAssigner { pool.shutdown(); // no more task allowed int serverCount = bulkPlan.size(); int regionCount = regionSet.size(); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); long rpcWaitTime = startTime + timeout; while (!server.isStopped() && !pool.isTerminated() - && rpcWaitTime > System.currentTimeMillis()) { + && rpcWaitTime > EnvironmentEdgeManager.currentTimeMillis()) { if (failedPlans.isEmpty()) { pool.awaitTermination(100, TimeUnit.MILLISECONDS); } else { @@ -115,7 +116,7 @@ public class GeneralBulkAssigner extends BulkAssigner { } if (!pool.isTerminated()) { LOG.warn("bulk assigner is still running after " - + (System.currentTimeMillis() - startTime) + "ms, shut it down now"); + + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms, shut it down now"); // some assigner hangs, can't wait any more, shutdown the pool now List notStarted = pool.shutdownNow(); if (notStarted != null && !notStarted.isEmpty()) { @@ -133,11 +134,11 @@ public class GeneralBulkAssigner extends BulkAssigner { Configuration conf = server.getConfiguration(); long perRegionOpenTimeGuesstimate = conf.getLong("hbase.bulk.assignment.perregion.open.time", 1000); - long endTime = Math.max(System.currentTimeMillis(), rpcWaitTime) + long endTime = Math.max(EnvironmentEdgeManager.currentTimeMillis(), rpcWaitTime) + perRegionOpenTimeGuesstimate * (reassigningRegions + 1); RegionStates regionStates = assignmentManager.getRegionStates(); // We're not synchronizing on regionsInTransition now because we don't use any iterator. - while (!regionSet.isEmpty() && !server.isStopped() && endTime > System.currentTimeMillis()) { + while (!regionSet.isEmpty() && !server.isStopped() && endTime > EnvironmentEdgeManager.currentTimeMillis()) { Iterator regionInfoIterator = regionSet.iterator(); while (regionInfoIterator.hasNext()) { HRegionInfo hri = regionInfoIterator.next(); @@ -153,7 +154,7 @@ public class GeneralBulkAssigner extends BulkAssigner { } if (LOG.isDebugEnabled()) { - long elapsedTime = System.currentTimeMillis() - startTime; + long elapsedTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; String status = "successfully"; if (!regionSet.isEmpty()) { status = "with " + regionSet.size() + " regions still not assigned yet"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 2d271117d2d..5614b9b1f7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -164,6 +164,7 @@ import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CompressionTest; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HasThread; @@ -355,7 +356,7 @@ Server { // Set our address. this.isa = this.rpcServer.getListenerAddress(); this.serverName = new ServerName(this.isa.getHostName(), - this.isa.getPort(), System.currentTimeMillis()); + this.isa.getPort(), EnvironmentEdgeManager.currentTimeMillis()); this.rsFatals = new MemoryBoundedLogMessageBuffer( conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024)); @@ -431,7 +432,7 @@ Server { MonitoredTask startupStatus = TaskMonitor.get().createStatus("Master startup"); startupStatus.setDescription("Master startup"); - masterStartTime = System.currentTimeMillis(); + masterStartTime = EnvironmentEdgeManager.currentTimeMillis(); try { /* * Block on becoming the active master. @@ -571,10 +572,10 @@ Server { long lastMsgTs = 0l; long now = 0l; while (!this.stopped) { - now = System.currentTimeMillis(); + now = EnvironmentEdgeManager.currentTimeMillis(); if ((now - lastMsgTs) >= this.msgInterval) { doMetrics(); - lastMsgTs = System.currentTimeMillis(); + lastMsgTs = EnvironmentEdgeManager.currentTimeMillis(); } stopSleeper.sleep(); } @@ -625,7 +626,7 @@ Server { */ status.setStatus("Initializing Master file system"); - this.masterActiveTime = System.currentTimeMillis(); + this.masterActiveTime = EnvironmentEdgeManager.currentTimeMillis(); // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery); @@ -1265,7 +1266,7 @@ Server { if (!this.loadBalancerTracker.isBalancerOn()) return false; // Do this call outside of synchronized block. int maximumBalanceTime = getBalancerCutoffTime(); - long cutoffTime = System.currentTimeMillis() + maximumBalanceTime; + long cutoffTime = EnvironmentEdgeManager.currentTimeMillis() + maximumBalanceTime; boolean balancerRan; synchronized (this.balancer) { // Only allow one balance run at at time. @@ -1311,13 +1312,13 @@ Server { if (plans != null && !plans.isEmpty()) { for (RegionPlan plan: plans) { LOG.info("balance " + plan); - long balStartTime = System.currentTimeMillis(); + long balStartTime = EnvironmentEdgeManager.currentTimeMillis(); this.assignmentManager.balance(plan); - totalRegPlanExecTime += System.currentTimeMillis()-balStartTime; + totalRegPlanExecTime += EnvironmentEdgeManager.currentTimeMillis()-balStartTime; rpCount++; if (rpCount < plans.size() && // if performing next balance exceeds cutoff time, exit the loop - (System.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) { + (EnvironmentEdgeManager.currentTimeMillis() + (totalRegPlanExecTime / rpCount)) > cutoffTime) { LOG.debug("No more balancing till next balance run; maximumBalanceTime=" + maximumBalanceTime); break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index fd1a39dff53..5f032604119 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * State of a Region while undergoing transitions. @@ -55,11 +56,11 @@ public class RegionState implements org.apache.hadoop.io.Writable { private volatile State state; public RegionState() { - this.stamp = new AtomicLong(System.currentTimeMillis()); + this.stamp = new AtomicLong(EnvironmentEdgeManager.currentTimeMillis()); } public RegionState(HRegionInfo region, State state) { - this(region, state, System.currentTimeMillis(), null); + this(region, state, EnvironmentEdgeManager.currentTimeMillis(), null); } public RegionState(HRegionInfo region, @@ -71,7 +72,7 @@ public class RegionState implements org.apache.hadoop.io.Writable { } public void updateTimestampToNow() { - this.stamp.set(System.currentTimeMillis()); + this.stamp.set(EnvironmentEdgeManager.currentTimeMillis()); } public State getState() { @@ -139,7 +140,7 @@ public class RegionState implements org.apache.hadoop.io.Writable { */ public String toDescriptiveString() { long lstamp = stamp.get(); - long relTime = System.currentTimeMillis() - lstamp; + long relTime = EnvironmentEdgeManager.currentTimeMillis() - lstamp; return region.getRegionNameAsString() + " state=" + state diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 7dbaa4eecc9..9d1463b9012 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; /** @@ -211,7 +212,7 @@ public class RegionStates { */ public synchronized RegionState updateRegionState( final HRegionInfo hri, final State state, final ServerName serverName) { - return updateRegionState(hri, state, System.currentTimeMillis(), serverName); + return updateRegionState(hri, state, EnvironmentEdgeManager.currentTimeMillis(), serverName); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 87fe4743d61..b6cff9ee7ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.protobuf.ServiceException; @@ -287,7 +288,7 @@ public class ServerManager { */ private void checkClockSkew(final ServerName serverName, final long serverCurrentTime) throws ClockOutOfSyncException { - long skew = System.currentTimeMillis() - serverCurrentTime; + long skew = EnvironmentEdgeManager.currentTimeMillis() - serverCurrentTime; if (skew > maxSkew) { String message = "Server " + serverName + " has been " + "rejected; Reported time is too far out of sync with master. " + @@ -408,7 +409,7 @@ public class ServerManager { long previousLogTime = 0; while (!onlineServers.isEmpty()) { - if (System.currentTimeMillis() > (previousLogTime + 1000)) { + if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime + 1000)) { StringBuilder sb = new StringBuilder(); for (ServerName key : this.onlineServers.keySet()) { if (sb.length() > 0) { @@ -417,7 +418,7 @@ public class ServerManager { sb.append(key); } LOG.info("Waiting on regionserver(s) to go down " + sb.toString()); - previousLogTime = System.currentTimeMillis(); + previousLogTime = EnvironmentEdgeManager.currentTimeMillis(); } synchronized (onlineServers) { @@ -694,7 +695,7 @@ public class ServerManager { final int maxToStart = this.master.getConfiguration(). getInt("hbase.master.wait.on.regionservers.maxtostart", Integer.MAX_VALUE); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); final long startTime = now; long slept = 0; long lastLogTime = 0; @@ -723,7 +724,7 @@ public class ServerManager { // We sleep for some time final long sleepTime = 50; Thread.sleep(sleepTime); - now = System.currentTimeMillis(); + now = EnvironmentEdgeManager.currentTimeMillis(); slept = now - startTime; oldCount = count; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index e40322fb90f..7cb2d538d60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.base.Joiner; import com.google.common.collect.ArrayListMultimap; @@ -49,7 +50,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { // slop for regions private float slop; private Configuration config; - private static final Random RANDOM = new Random(System.currentTimeMillis()); + private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis()); private static final Log LOG = LogFactory.getLog(BaseLoadBalancer.class); protected MasterServices services; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java index b68901e8023..337a38a2357 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/DefaultLoadBalancer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.collect.MinMaxPriorityQueue; @@ -56,7 +57,7 @@ import com.google.common.collect.MinMaxPriorityQueue; @InterfaceAudience.Private public class DefaultLoadBalancer extends BaseLoadBalancer { private static final Log LOG = LogFactory.getLog(DefaultLoadBalancer.class); - private static final Random RANDOM = new Random(System.currentTimeMillis()); + private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis()); private RegionInfoComparator riComparator = new RegionInfoComparator(); private RegionPlan.RegionPlanComparator rpComparator = new RegionPlan.RegionPlanComparator(); @@ -181,7 +182,7 @@ public class DefaultLoadBalancer extends BaseLoadBalancer { public List balanceCluster( Map> clusterMap) { boolean emptyRegionServerPresent = false; - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); ClusterLoadState cs = new ClusterLoadState(clusterMap); @@ -318,7 +319,7 @@ public class DefaultLoadBalancer extends BaseLoadBalancer { // If none needed to fill all to min and none left to drain all to max, // we are done if (neededRegions == 0 && regionsToMove.isEmpty()) { - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTimeMillis(); LOG.info("Calculated a load balance in " + (endTime-startTime) + "ms. " + "Moving " + totalNumMoved + " regions off of " + serversOverloaded + " overloaded servers onto " + @@ -396,7 +397,7 @@ public class DefaultLoadBalancer extends BaseLoadBalancer { } } - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTimeMillis(); if (!regionsToMove.isEmpty() || neededRegions != 0) { // Emit data so can diagnose how balancer went astray. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index b4b139bd318..bafd3e4bfa6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import java.util.ArrayList; import java.util.HashMap; @@ -106,7 +107,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private static final String MAX_MOVES_KEY = "hbase.master.balancer.stochastic.maxMoveRegions"; private static final String KEEP_REGION_LOADS = "hbase.master.balancer.stochastic.numRegionLoadsToRemember"; - private static final Random RANDOM = new Random(System.currentTimeMillis()); + private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTimeMillis()); private static final Log LOG = LogFactory.getLog(StochasticLoadBalancer.class); private final RegionLocationFinder regionFinder = new RegionLocationFinder(); private ClusterStatus clusterStatus = null; @@ -183,7 +184,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { return null; } - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); // Keep track of servers to iterate through them. List servers = new ArrayList(clusterState.keySet()); @@ -248,7 +249,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { } - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTimeMillis(); if (initCost > currentCost) { List plans = createRegionPlans(initialRegionMapping, clusterState); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java index 0f471e51be8..52014847dd5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.commons.logging.Log; @@ -40,7 +41,7 @@ public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate { @Override public boolean isLogDeletable(Path filePath) { long time = 0; - long currentTime = System.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTimeMillis(); try { FileStatus fStat = filePath.getFileSystem(this.getConf()).getFileStatus(filePath); time = fStat.getModificationTime(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 868cd5eb4af..7367608dde7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.KeeperException; @@ -59,8 +60,8 @@ public class DeleteTableHandler extends TableEventHandler { long waitTime = server.getConfiguration(). getLong("hbase.master.wait.on.region", 5 * 60 * 1000); for (HRegionInfo region : regions) { - long done = System.currentTimeMillis() + waitTime; - while (System.currentTimeMillis() < done) { + long done = EnvironmentEdgeManager.currentTimeMillis() + waitTime; + while (EnvironmentEdgeManager.currentTimeMillis() < done) { if (!am.getRegionStates().isRegionInTransition(region)) break; Threads.sleep(waitingTimeForEvents); LOG.debug("Waiting on region to clear regions in transition; " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index 3a6b6299e53..29d68d24c38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.zookeeper.KeeperException; import org.cloudera.htrace.Trace; @@ -178,14 +179,14 @@ public class DisableTableHandler extends EventHandler { @Override protected boolean waitUntilDone(long timeout) throws InterruptedException { - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); long remaining = timeout; List regions = null; while (!server.isStopped() && remaining > 0) { Thread.sleep(waitingTimeForEvents); regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName); if (regions.isEmpty()) break; - remaining = timeout - (System.currentTimeMillis() - startTime); + remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime); } return regions != null && regions.isEmpty(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index 043f0a32ee5..30f01a999f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.master.BulkAssigner; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.zookeeper.KeeperException; import org.cloudera.htrace.Trace; @@ -219,7 +220,7 @@ public class EnableTableHandler extends EventHandler { @Override protected boolean waitUntilDone(long timeout) throws InterruptedException { - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); long remaining = timeout; List regions = null; int lastNumberOfRegions = 0; @@ -234,7 +235,7 @@ public class EnableTableHandler extends EventHandler { lastNumberOfRegions = regions.size(); timeout += waitingTimeForEvents; } - remaining = timeout - (System.currentTimeMillis() - startTime); + remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime); } return isDone(regions); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java index 66f1f8302b5..f1d37a98700 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/MetricsRate.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.metrics; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.metrics.MetricsRecord; import org.apache.hadoop.metrics.util.MetricsBase; import org.apache.hadoop.metrics.util.MetricsRegistry; @@ -43,7 +44,7 @@ public class MetricsRate extends MetricsBase { super(name, description); this.value = 0; this.prevRate = 0; - this.ts = System.currentTimeMillis(); + this.ts = EnvironmentEdgeManager.currentTimeMillis(); registry.add(name, this); } @@ -60,7 +61,7 @@ public class MetricsRate extends MetricsBase { } public synchronized void intervalHeartBeat() { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); long diff = (now-ts) / 1000; if (diff < 1){ // To make sure our averages aren't skewed by fast repeated calls, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java index 5b5f623fd93..1c61ec41309 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MemoryBoundedLogMessageBuffer.java @@ -25,6 +25,7 @@ import java.util.LinkedList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; @@ -54,7 +55,7 @@ public class MemoryBoundedLogMessageBuffer { * older messages until the desired memory limit is achieved. */ public synchronized void add(String messageText) { - LogMessage message = new LogMessage(messageText, System.currentTimeMillis()); + LogMessage message = new LogMessage(messageText, EnvironmentEdgeManager.currentTimeMillis()); usage += message.estimateHeapUsage(); messages.add(message); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java index bb5b92823c0..3f16f4604c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.client.Operation; import org.apache.hadoop.hbase.io.WritableWithSize; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RpcRequestBody; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.Writable; import org.codehaus.jackson.map.ObjectMapper; @@ -191,7 +192,7 @@ public class MonitoredRPCHandlerImpl extends MonitoredTaskImpl long queueTime) { this.methodName = methodName; this.params = params; - this.rpcStartTime = System.currentTimeMillis(); + this.rpcStartTime = EnvironmentEdgeManager.currentTimeMillis(); this.rpcQueueTime = queueTime; this.state = State.RUNNING; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java index 9a19cb25e67..e95f1ed7ac3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.monitoring; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.codehaus.jackson.map.ObjectMapper; import java.io.IOException; @@ -37,7 +38,7 @@ class MonitoredTaskImpl implements MonitoredTask { protected volatile State state = State.RUNNING; public MonitoredTaskImpl() { - startTime = System.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTimeMillis(); statusTime = startTime; stateTime = startTime; } @@ -116,12 +117,12 @@ class MonitoredTaskImpl implements MonitoredTask { @Override public void setStatus(String status) { this.status = status; - statusTime = System.currentTimeMillis(); + statusTime = EnvironmentEdgeManager.currentTimeMillis(); } protected void setState(State state) { this.state = state; - stateTime = System.currentTimeMillis(); + stateTime = EnvironmentEdgeManager.currentTimeMillis(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java index 728d89d8e53..0f45671386b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java @@ -30,6 +30,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; @@ -140,12 +141,12 @@ public class TaskMonitor { private boolean canPurge(MonitoredTask stat) { long cts = stat.getCompletionTimestamp(); - return (cts > 0 && System.currentTimeMillis() - cts > EXPIRATION_TIME); + return (cts > 0 && EnvironmentEdgeManager.currentTimeMillis() - cts > EXPIRATION_TIME); } public void dumpAsText(PrintWriter out) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); List tasks = getTasks(); for (MonitoredTask task : tasks) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 671362ca417..183b77419a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.base.Preconditions; @@ -83,7 +84,7 @@ public class CompactSplitThread implements CompactionRequestor { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); - t.setName(n + "-largeCompactions-" + System.currentTimeMillis()); + t.setName(n + "-largeCompactions-" + EnvironmentEdgeManager.currentTimeMillis()); return t; } }); @@ -95,7 +96,7 @@ public class CompactSplitThread implements CompactionRequestor { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); - t.setName(n + "-smallCompactions-" + System.currentTimeMillis()); + t.setName(n + "-smallCompactions-" + EnvironmentEdgeManager.currentTimeMillis()); return t; } }); @@ -107,7 +108,7 @@ public class CompactSplitThread implements CompactionRequestor { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); - t.setName(n + "-splits-" + System.currentTimeMillis()); + t.setName(n + "-splits-" + EnvironmentEdgeManager.currentTimeMillis()); return t; } }); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java index 09ca6afdf0c..a4021e032c3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}. @@ -72,7 +73,7 @@ class GetClosestRowBeforeTracker { HConstants.DELIMITER) - this.rowoffset; } this.tablenamePlusDelimiterLength = metaregion? l + 1: -1; - this.oldestts = System.currentTimeMillis() - ttl; + this.oldestts = EnvironmentEdgeManager.currentTimeMillis() - ttl; this.kvcomparator = c; KeyValue.RowComparator rc = new KeyValue.RowComparator(this.kvcomparator); this.deletes = new TreeMap>(rc); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 31c68ef1165..a87bd066d67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -522,7 +522,7 @@ public class HRegionServer implements ClientProtocol, this.rpcServer.setErrorHandler(this); this.rpcServer.setQosFunction((qosFunction = new QosFunction())); - this.startcode = System.currentTimeMillis(); + this.startcode = EnvironmentEdgeManager.currentTimeMillis(); // login the server principal (if using secure Hadoop) User.login(this.conf, "hbase.regionserver.keytab.file", @@ -885,11 +885,11 @@ public class HRegionServer implements ClientProtocol, LOG.debug("Waiting on " + getOnlineRegionsAsPrintableString()); } } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); if ((now - lastMsg) >= msgInterval) { doMetrics(); tryRegionServerReport(lastMsg, now); - lastMsg = System.currentTimeMillis(); + lastMsg = EnvironmentEdgeManager.currentTimeMillis(); } if (!this.stopped) this.sleeper.sleep(); } // for @@ -1083,8 +1083,8 @@ public class HRegionServer implements ClientProtocol, // Only print a message if the count of regions has changed. if (count != lastCount) { // Log every second at most - if (System.currentTimeMillis() > (previousLogTime + 1000)) { - previousLogTime = System.currentTimeMillis(); + if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime + 1000)) { + previousLogTime = EnvironmentEdgeManager.currentTimeMillis(); lastCount = count; LOG.info("Waiting on " + count + " regions to close"); // Only print out regions still closing if a small number else will @@ -1928,7 +1928,7 @@ public class HRegionServer implements ClientProtocol, return null; } LOG.debug("No master found; retry"); - previousLogTime = System.currentTimeMillis(); + previousLogTime = EnvironmentEdgeManager.currentTimeMillis(); refresh = true; // let's try pull it from ZK directly sleeper.sleep(); @@ -1952,14 +1952,14 @@ public class HRegionServer implements ClientProtocol, e = e instanceof RemoteException ? ((RemoteException)e).unwrapRemoteException() : e; if (e instanceof ServerNotRunningYetException) { - if (System.currentTimeMillis() > (previousLogTime+1000)){ + if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime+1000)){ LOG.info("Master isn't available yet, retrying"); - previousLogTime = System.currentTimeMillis(); + previousLogTime = EnvironmentEdgeManager.currentTimeMillis(); } } else { - if (System.currentTimeMillis() > (previousLogTime + 1000)) { + if (EnvironmentEdgeManager.currentTimeMillis() > (previousLogTime + 1000)) { LOG.warn("Unable to connect to master. Retrying. Error was:", e); - previousLogTime = System.currentTimeMillis(); + previousLogTime = EnvironmentEdgeManager.currentTimeMillis(); } } try { @@ -4043,7 +4043,7 @@ public class HRegionServer implements ClientProtocol, } protected void addToMovedRegions(String encodedName, ServerName destination){ - final Long time = System.currentTimeMillis(); + final Long time = EnvironmentEdgeManager.currentTimeMillis(); movedRegions.put( encodedName, @@ -4054,7 +4054,7 @@ public class HRegionServer implements ClientProtocol, Pair dest = movedRegions.get(encodedRegionName); if (dest != null) { - if (dest.getFirst() > (System.currentTimeMillis() - TIMEOUT_REGION_MOVED)) { + if (dest.getFirst() > (EnvironmentEdgeManager.currentTimeMillis() - TIMEOUT_REGION_MOVED)) { return dest.getSecond(); } else { movedRegions.remove(encodedRegionName); @@ -4068,7 +4068,7 @@ public class HRegionServer implements ClientProtocol, * Remove the expired entries from the moved regions list. */ protected void cleanMovedRegions(){ - final long cutOff = System.currentTimeMillis() - TIMEOUT_REGION_MOVED; + final long cutOff = EnvironmentEdgeManager.currentTimeMillis() - TIMEOUT_REGION_MOVED; Iterator>> it = movedRegions.entrySet().iterator(); while (it.hasNext()){ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 7b7b2ff2916..11b18a0e254 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1178,7 +1178,7 @@ public class HStore extends SchemaConfigured implements Store { } // TODO: Use better method for determining stamp of last major (HBASE-2990) long lowTimestamp = getLowestTimestamp(filesToCompact); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); if (lowTimestamp > 0l && lowTimestamp < (now - mcTime)) { // Major compaction time has elapsed. if (filesToCompact.size() == 1) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java index c1e201043a8..7c779c16369 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HasThread; import java.util.ConcurrentModificationException; @@ -278,7 +279,7 @@ public class Leases extends HasThread { } public long getDelay(TimeUnit unit) { - return unit.convert(this.expirationTime - System.currentTimeMillis(), + return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTimeMillis(), TimeUnit.MILLISECONDS); } @@ -293,7 +294,7 @@ public class Leases extends HasThread { * Resets the expiration time of the lease. */ public void resetExpirationTime() { - this.expirationTime = System.currentTimeMillis() + this.leaseTimeoutPeriod; + this.expirationTime = EnvironmentEdgeManager.currentTimeMillis() + this.leaseTimeoutPeriod; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index 20df71413e1..1764a6256f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HasThread; import java.io.IOException; @@ -48,7 +49,7 @@ class LogRoller extends HasThread implements WALActionsListener { private final AtomicBoolean rollLog = new AtomicBoolean(false); private final Server server; private final RegionServerServices services; - private volatile long lastrolltime = System.currentTimeMillis(); + private volatile long lastrolltime = EnvironmentEdgeManager.currentTimeMillis(); // Period to roll log. private final long rollperiod; private final int threadWakeFrequency; @@ -67,7 +68,7 @@ class LogRoller extends HasThread implements WALActionsListener { @Override public void run() { while (!server.isStopped()) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); boolean periodic = false; if (!rollLog.get()) { periodic = (now - this.lastrolltime) > this.rollperiod; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 3a6251b6491..cfb3c45f6b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.util.StringUtils; @@ -346,7 +347,7 @@ class MemStoreFlusher extends HasThread implements FlushRequester { if (!fqe.region.getRegionInfo().isMetaRegion() && isTooManyStoreFiles(region)) { if (fqe.isMaximumWait(this.blockingWaitTime)) { - LOG.info("Waited " + (System.currentTimeMillis() - fqe.createTime) + + LOG.info("Waited " + (EnvironmentEdgeManager.currentTimeMillis() - fqe.createTime) + "ms on a compaction to clean up 'too many store files'; waited " + "long enough... proceeding with flush of " + region.getRegionNameAsString()); @@ -525,7 +526,7 @@ class MemStoreFlusher extends HasThread implements FlushRequester { FlushRegionEntry(final HRegion r) { this.region = r; - this.createTime = System.currentTimeMillis(); + this.createTime = EnvironmentEdgeManager.currentTimeMillis(); this.whenToExpire = this.createTime; } @@ -534,7 +535,7 @@ class MemStoreFlusher extends HasThread implements FlushRequester { * @return True if we have been delayed > maximumWait milliseconds. */ public boolean isMaximumWait(final long maximumWait) { - return (System.currentTimeMillis() - this.createTime) > maximumWait; + return (EnvironmentEdgeManager.currentTimeMillis() - this.createTime) > maximumWait; } /** @@ -547,19 +548,19 @@ class MemStoreFlusher extends HasThread implements FlushRequester { /** * @param when When to expire, when to come up out of the queue. - * Specify in milliseconds. This method adds System.currentTimeMillis() + * Specify in milliseconds. This method adds EnvironmentEdgeManager.currentTimeMillis() * to whatever you pass. * @return This. */ public FlushRegionEntry requeue(final long when) { - this.whenToExpire = System.currentTimeMillis() + when; + this.whenToExpire = EnvironmentEdgeManager.currentTimeMillis() + when; this.requeueCount++; return this; } @Override public long getDelay(TimeUnit unit) { - return unit.convert(this.whenToExpire - System.currentTimeMillis(), + return unit.convert(this.whenToExpire - EnvironmentEdgeManager.currentTimeMillis(), TimeUnit.MILLISECONDS); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index a480f6a6695..06fbf81d466 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.master.SplitLogManager; import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -269,7 +270,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable { SplitLogCounters.tot_wkr_task_acquired.incrementAndGet(); getDataSetWatchAsync(); - t = System.currentTimeMillis(); + t = EnvironmentEdgeManager.currentTimeMillis(); TaskExecutor.Status status; status = splitTaskExecutor.exec(ZKSplitLog.getFileName(currentTask), @@ -314,7 +315,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable { } finally { if (t > 0) { LOG.info("worker " + serverName + " done with task " + path + - " in " + (System.currentTimeMillis() - t) + "ms"); + " in " + (EnvironmentEdgeManager.currentTimeMillis() - t) + "ms"); } synchronized (grabTaskLock) { workerInGrabTask = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index f945ffd277f..24ce0b897d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; import com.google.common.base.Preconditions; @@ -59,14 +60,14 @@ class SplitRequest implements Runnable { return; } try { - final long startTime = System.currentTimeMillis(); + final long startTime = EnvironmentEdgeManager.currentTimeMillis(); SplitTransaction st = new SplitTransaction(parent, midKey); // If prepare does not return true, for some reason -- logged inside in // the prepare call -- we are not ready to split just now. Just return. if (!st.prepare()) return; try { st.execute(this.server, this.server); - this.server.getMetrics().incrementSplitSuccessCount(System.currentTimeMillis() - startTime); + this.server.getMetrics().incrementSplitSuccessCount(EnvironmentEdgeManager.currentTimeMillis() - startTime); } catch (Exception e) { if (this.server.isStopping() || this.server.isStopped()) { LOG.info( @@ -98,7 +99,7 @@ class SplitRequest implements Runnable { + parent.getRegionInfo().getRegionNameAsString() + ", new regions: " + st.getFirstDaughter().getRegionNameAsString() + ", " + st.getSecondDaughter().getRegionNameAsString() + ". Split took " - + StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime)); + + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(), startTime)); } catch (IOException ex) { LOG.error("Split failed " + this, RemoteExceptionHandler .checkIOException(ex)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 87c3270124a..fef4400811d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -523,7 +523,7 @@ public class SplitTransaction { @Override public boolean progress() { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); if (now - lastLog > this.interval) { LOG.info("Opening " + this.hri.getRegionNameAsString()); this.lastLog = now; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index f94e52b01a7..78aed191503 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionServerAccounting; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.zookeeper.KeeperException; @@ -168,7 +169,7 @@ public class OpenRegionHandler extends EventHandler { // Total timeout for meta edit. If we fail adding the edit then close out // the region and let it be assigned elsewhere. long timeout = assignmentTimeout * 10; - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); long endTime = now + timeout; // Let our period at which we update OPENING state to be be 1/3rd of the // regions-in-transition timeout period. @@ -190,7 +191,7 @@ public class OpenRegionHandler extends EventHandler { // Go to the loop check. } } - now = System.currentTimeMillis(); + now = EnvironmentEdgeManager.currentTimeMillis(); } // Is thread still alive? We may have left above loop because server is // stopping or we timed out the edit. Is so, interrupt it. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java index 03ed7c4de2b..f546345e65c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram; import org.apache.hadoop.hbase.metrics.PersistentMetricsTimeVaryingRate; import com.yammer.metrics.stats.Snapshot; import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Strings; import org.apache.hadoop.metrics.ContextFactory; @@ -61,8 +62,8 @@ public class RegionServerMetrics implements Updater { @SuppressWarnings({"FieldCanBeLocal"}) private final Log LOG = LogFactory.getLog(this.getClass()); private final MetricsRecord metricsRecord; - private long lastUpdate = System.currentTimeMillis(); - private long lastExtUpdate = System.currentTimeMillis(); + private long lastUpdate = EnvironmentEdgeManager.currentTimeMillis(); + private long lastExtUpdate = EnvironmentEdgeManager.currentTimeMillis(); private long extendedPeriod = 0; private static final int MB = 1024*1024; private MetricsRegistry registry = new MetricsRegistry(); @@ -349,7 +350,7 @@ public class RegionServerMetrics implements Updater { */ public void doUpdates(MetricsContext caller) { synchronized (this) { - this.lastUpdate = System.currentTimeMillis(); + this.lastUpdate = EnvironmentEdgeManager.currentTimeMillis(); // has the extended period for long-living stats elapsed? if (this.extendedPeriod > 0 && diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 3eb99219581..6ecd6065a6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.Threads; @@ -620,7 +621,7 @@ public class HLog implements Syncable { if (currentFilenum > 0) { oldPath = computeFilename(currentFilenum); } - this.filenum = System.currentTimeMillis(); + this.filenum = EnvironmentEdgeManager.currentTimeMillis(); Path newPath = computeFilename(); // Tell our listeners that a new log is about to be created @@ -1298,7 +1299,7 @@ public class HLog implements Syncable { } try { long doneUpto; - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); // First flush all the pending writes to HDFS. Then // issue the sync to HDFS. If sync is successful, then update // syncedTillHere to indicate that transactions till this @@ -1334,7 +1335,7 @@ public class HLog implements Syncable { } this.syncedTillHere = Math.max(this.syncedTillHere, doneUpto); - syncTime.inc(System.currentTimeMillis() - now); + syncTime.inc(EnvironmentEdgeManager.currentTimeMillis() - now); if (!this.logRollRunning) { checkLowReplication(); try { @@ -1461,13 +1462,13 @@ public class HLog implements Syncable { } } try { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); // coprocessor hook: if (!coprocessorHost.preWALWrite(info, logKey, logEdit)) { // write to our buffer for the Hlog file. logSyncerThread.append(new HLog.Entry(logKey, logEdit)); } - long took = System.currentTimeMillis() - now; + long took = EnvironmentEdgeManager.currentTimeMillis() - now; coprocessorHost.postWALWrite(info, logKey, logEdit); writeTime.inc(took); long len = 0; @@ -1590,13 +1591,13 @@ public class HLog implements Syncable { } long txid = 0; synchronized (updateLock) { - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); WALEdit edit = completeCacheFlushLogEdit(); HLogKey key = makeKey(encodedRegionName, tableName, logSeqId, - System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID); + EnvironmentEdgeManager.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID); logSyncerThread.append(new Entry(key, edit)); txid = this.unflushedEntries.incrementAndGet(); - writeTime.inc(System.currentTimeMillis() - now); + writeTime.inc(EnvironmentEdgeManager.currentTimeMillis() - now); long len = 0; for (KeyValue kv : edit.getKeyValues()) { len += kv.getLength(); @@ -1618,7 +1619,7 @@ public class HLog implements Syncable { private WALEdit completeCacheFlushLogEdit() { KeyValue kv = new KeyValue(METAROW, METAFAMILY, null, - System.currentTimeMillis(), COMPLETE_CACHE_FLUSH); + EnvironmentEdgeManager.currentTimeMillis(), COMPLETE_CACHE_FLUSH); WALEdit e = new WALEdit(); e.add(kv); return e; @@ -1901,7 +1902,7 @@ public class HLog implements Syncable { final Path edits) throws IOException { Path moveAsideName = new Path(edits.getParent(), edits.getName() + "." + - System.currentTimeMillis()); + EnvironmentEdgeManager.currentTimeMillis()); if (!fs.rename(edits, moveAsideName)) { LOG.warn("Rename failed from " + edits + " to " + moveAsideName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java index 1b57aee0062..ddbf3c73d91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSinkMetrics.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication.regionserver.metrics; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * This class is for maintaining the various replication statistics for a sink and publishing them @@ -44,7 +45,7 @@ public class ReplicationSinkMetrics { * @param timestamp The timestamp of the last operation applied. */ public void setAgeOfLastAppliedOp(long timestamp) { - long age = System.currentTimeMillis() - timestamp; + long age = EnvironmentEdgeManager.currentTimeMillis() - timestamp; rms.setGauge(SINK_AGE_OF_LAST_APPLIED_OP, age); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java index fe24d396ba3..6ecc47a0f91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/metrics/ReplicationSourceMetrics.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * This class is for maintaining the various replication statistics for a source and publishing them @@ -75,7 +76,7 @@ public class ReplicationSourceMetrics { * @param timestamp write time of the edit */ public void setAgeOfLastShippedOp(long timestamp) { - long age = System.currentTimeMillis() - timestamp; + long age = EnvironmentEdgeManager.currentTimeMillis() - timestamp; rms.setGauge(ageOfLastShippedOpKey, age); rms.setGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, age); this.lastTimestamp = timestamp; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java index 3426f6dfcff..a0c5a4810d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.rest.model.ScannerModel; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; @InterfaceAudience.Private @@ -88,7 +89,7 @@ public class ScannerResultGenerator extends ResultGenerator { scan.setCacheBlocks(false); scanner = table.getScanner(scan); cached = null; - id = Long.toString(System.currentTimeMillis()) + + id = Long.toString(EnvironmentEdgeManager.currentTimeMillis()) + Integer.toHexString(scanner.hashCode()); } finally { table.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 3cb6fcd2f4b..301ab59b22f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -42,6 +42,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * A wrapper around HttpClient which provides some useful function and @@ -186,9 +187,9 @@ public class Client { method.addRequestHeader(header); } } - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); int code = httpClient.executeMethod(method); - long endTime = System.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTimeMillis(); if (LOG.isDebugEnabled()) { LOG.debug(method.getName() + " " + uri + " " + code + " " + method.getStatusText() + " in " + (endTime - startTime) + " ms"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java index 5a85e52a96c..651dd9b7f53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.thrift.CallQueue.Call; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.thrift.TException; import org.apache.thrift.TProcessor; @@ -219,7 +220,7 @@ public class TBoundedThreadPoolServer extends TServer { long msLeftToWait = serverOptions.stopTimeoutUnit.toMillis(serverOptions.stopTimeoutVal); - long timeMillis = System.currentTimeMillis(); + long timeMillis = EnvironmentEdgeManager.currentTimeMillis(); LOG.info("Waiting for up to " + msLeftToWait + " ms to finish processing" + " pending requests"); @@ -229,7 +230,7 @@ public class TBoundedThreadPoolServer extends TServer { executorService.awaitTermination(msLeftToWait, TimeUnit.MILLISECONDS); break; } catch (InterruptedException ix) { - long timePassed = System.currentTimeMillis() - timeMillis; + long timePassed = EnvironmentEdgeManager.currentTimeMillis() - timeMillis; msLeftToWait -= timePassed; timeMillis += timePassed; interrupted = true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 0cf5569f729..f88d23fb6b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * HBase Canary Tool, that that can be used to do @@ -234,9 +235,9 @@ public final class Canary implements Tool { get.addFamily(column.getName()); try { - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); table.get(get); - long time = System.currentTimeMillis() - startTime; + long time = EnvironmentEdgeManager.currentTimeMillis() - startTime; sink.publishReadTiming(region, column, time); } catch (Exception e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java index 6ec8881fb06..ccdaf2b8110 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java @@ -68,7 +68,7 @@ public class FSHDFSUtils extends FSUtils{ return; } LOG.info("Recovering file " + p); - long startWaiting = System.currentTimeMillis(); + long startWaiting = EnvironmentEdgeManager.currentTimeMillis(); // Trying recovery boolean recovered = false; @@ -95,7 +95,7 @@ public class FSHDFSUtils extends FSUtils{ // within its soft limit, but if we get it past that, it means // that the RS is holding onto the file even though it lost its // znode. We could potentially abort after some time here. - long waitedFor = System.currentTimeMillis() - startWaiting; + long waitedFor = EnvironmentEdgeManager.currentTimeMillis() - startWaiting; if (waitedFor > LEASE_SOFTLIMIT_PERIOD) { LOG.warn("Waited " + waitedFor + "ms for lease recovery on " + p + ":" + e.getMessage()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index b22eaac9725..84e84caed31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -538,7 +538,7 @@ public abstract class FSUtils { throws IOException { // Rewrite the file as pb. Move aside the old one first, write new // then delete the moved-aside file. - Path movedAsideName = new Path(p + "." + System.currentTimeMillis()); + Path movedAsideName = new Path(p + "." + EnvironmentEdgeManager.currentTimeMillis()); if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p); setClusterId(fs, rootdir, cid, 100); if (!fs.delete(movedAsideName, false)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 26873c7cce8..5f2a2a58d69 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -170,7 +170,7 @@ public class HBaseFsck { private HBaseAdmin admin; private HTable meta; protected ExecutorService executor; // threads to retrieve data from regionservers - private long startMillis = System.currentTimeMillis(); + private long startMillis = EnvironmentEdgeManager.currentTimeMillis(); private HFileCorruptionChecker hfcc; private int retcode = 0; @@ -1255,7 +1255,7 @@ public class HBaseFsck { throw new IOException(e); } MetaEntry m = - new MetaEntry(rootLocation.getRegionInfo(), sn, System.currentTimeMillis()); + new MetaEntry(rootLocation.getRegionInfo(), sn, EnvironmentEdgeManager.currentTimeMillis()); HbckInfo hbInfo = new HbckInfo(m); regionInfoMap.put(rootLocation.getRegionInfo().getEncodedName(), hbInfo); return true; @@ -1508,7 +1508,7 @@ public class HBaseFsck { (hbi.metaEntry == null)? false: hbi.metaEntry.isSplit() && hbi.metaEntry.isOffline(); boolean shouldBeDeployed = inMeta && !isTableDisabled(hbi.metaEntry); boolean recentlyModified = hbi.getHdfsRegionDir() != null && - hbi.getModTime() + timelag > System.currentTimeMillis(); + hbi.getModTime() + timelag > EnvironmentEdgeManager.currentTimeMillis(); // ========== First the healthy cases ============= if (hbi.containsOnlyHdfsEdits()) { @@ -2321,7 +2321,7 @@ public class HBaseFsck { */ HTableDescriptor[] getTables(AtomicInteger numSkipped) { List tableNames = new ArrayList(); - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); for (HbckInfo hbi : regionInfoMap.values()) { MetaEntry info = hbi.metaEntry; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index f47a0a100bd..b01084aca83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -119,8 +119,8 @@ public class HBaseFsckRepair { public static void waitUntilAssigned(HBaseAdmin admin, HRegionInfo region) throws IOException, InterruptedException { long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000); - long expiration = timeout + System.currentTimeMillis(); - while (System.currentTimeMillis() < expiration) { + long expiration = timeout + EnvironmentEdgeManager.currentTimeMillis(); + while (EnvironmentEdgeManager.currentTimeMillis() < expiration) { try { Map rits= admin.getClusterStatus().getRegionsInTransition(); @@ -153,8 +153,8 @@ public class HBaseFsckRepair { ProtobufUtil.closeRegion(rs, region.getRegionName(), false); long timeout = admin.getConfiguration() .getLong("hbase.hbck.close.timeout", 120000); - long expiration = timeout + System.currentTimeMillis(); - while (System.currentTimeMillis() < expiration) { + long expiration = timeout + EnvironmentEdgeManager.currentTimeMillis(); + while (EnvironmentEdgeManager.currentTimeMillis() < expiration) { try { HRegionInfo rsRegion = ProtobufUtil.getRegionInfo(rs, region.getRegionName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 6ec78d0dff5..e6b2f8d3065 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -155,7 +155,7 @@ class HMerge { Bytes.toString(tableName) ); this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir); - Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() + + Path logdir = new Path(tabledir, "merge_" + EnvironmentEdgeManager.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME); Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME); this.hlog = new HLog(fs, logdir, oldLogDir, conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index fc08dc26567..c74c0738b8e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -175,13 +175,13 @@ public class JVMClusterUtil { // Wait for an active master // having an active master before starting the region threads allows // then to succeed on their connection to master - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); while (findActiveMaster(masters) == null) { try { Thread.sleep(100); } catch (InterruptedException ignored) { } - if (System.currentTimeMillis() > startTime + 30000) { + if (EnvironmentEdgeManager.currentTimeMillis() > startTime + 30000) { throw new RuntimeException("Master not active after 30 seconds"); } } @@ -197,13 +197,13 @@ public class JVMClusterUtil { // Wait for an active master to be initialized (implies being master) // with this, when we return the cluster is complete - startTime = System.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTimeMillis(); while (true) { JVMClusterUtil.MasterThread t = findActiveMaster(masters); if (t != null && t.master.isInitialized()) { return t.master.getServerName().toString(); } - if (System.currentTimeMillis() > startTime + 200000) { + if (EnvironmentEdgeManager.currentTimeMillis() > startTime + 200000) { throw new RuntimeException("Master not initialized after 200 seconds"); } try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java index 85d365f05f4..f78890538cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -335,7 +335,7 @@ public class Merge extends Configured implements Tool { } Delete delete = new Delete(regioninfo.getRegionName(), - System.currentTimeMillis(), null); + EnvironmentEdgeManager.currentTimeMillis(), null); meta.delete(delete, null, true); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java index afea7e9b4a0..6c1ae2d9682 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java @@ -98,7 +98,7 @@ public class MetaUtils { public synchronized HLog getLog() throws IOException { if (this.log == null) { Path logdir = new Path(this.fs.getHomeDirectory(), - HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis()); + HConstants.HREGION_LOGDIR_NAME + "_" + EnvironmentEdgeManager.currentTimeMillis()); Path oldLogDir = new Path(this.fs.getHomeDirectory(), HConstants.HREGION_OLDLOGDIR_NAME); this.log = new HLog(this.fs, logdir, oldLogDir, this.conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 8480ed55cb1..fff71599792 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -426,7 +426,7 @@ public class RegionSplitter { daughterRegions.get(rsLocation).add(dr); } LOG.debug("Done with bucketing. Split time!"); - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); // open the split file and modify it as splits finish FSDataInputStream tmpIn = fs.open(splitFile); @@ -544,7 +544,7 @@ public class RegionSplitter { + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; if (splitCount % 10 == 0) { - long tDiff = (System.currentTimeMillis() - startTime) + long tDiff = (EnvironmentEdgeManager.currentTimeMillis() - startTime) / splitCount; LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount + ". Avg Time / Split = " @@ -570,7 +570,7 @@ public class RegionSplitter { } LOG.debug("All regions have been successfully split!"); } finally { - long tDiff = System.currentTimeMillis() - startTime; + long tDiff = EnvironmentEdgeManager.currentTimeMillis() - startTime; LOG.debug("TOTAL TIME = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); LOG.debug("Splits = " + splitCount); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java index de84646b8c1..b15210e70f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java @@ -55,7 +55,7 @@ public class Sleeper { * Sleep for period. */ public void sleep() { - sleep(System.currentTimeMillis()); + sleep(EnvironmentEdgeManager.currentTimeMillis()); } /** @@ -78,11 +78,11 @@ public class Sleeper { if (this.stopper.isStopped()) { return; } - long now = System.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTimeMillis(); long waitTime = this.period - (now - startTime); if (waitTime > this.period) { LOG.warn("Calculated wait time > " + this.period + - "; setting to this.period: " + System.currentTimeMillis() + ", " + + "; setting to this.period: " + EnvironmentEdgeManager.currentTimeMillis() + ", " + startTime); waitTime = this.period; } @@ -93,7 +93,7 @@ public class Sleeper { if (triggerWake) break; sleepLock.wait(waitTime); } - woke = System.currentTimeMillis(); + woke = EnvironmentEdgeManager.currentTimeMillis(); long slept = woke - now; if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) { LOG.warn("We slept " + slept + "ms instead of " + this.period + @@ -109,7 +109,7 @@ public class Sleeper { } } // Recalculate waitTime. - woke = (woke == -1)? System.currentTimeMillis(): woke; + woke = (woke == -1)? EnvironmentEdgeManager.currentTimeMillis(): woke; waitTime = this.period - (woke - startTime); } triggerWake = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java index 3ae29d7fac4..739e00e9f17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java @@ -38,6 +38,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.zookeeper.server.NIOServerCnxnFactory; import org.apache.zookeeper.server.ZooKeeperServer; import org.apache.zookeeper.server.persistence.FileTxnLog; @@ -310,7 +311,7 @@ public class MiniZooKeeperCluster { // XXX: From o.a.zk.t.ClientBase private static boolean waitForServerDown(int port, long timeout) { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTimeMillis(); while (true) { try { Socket sock = new Socket("localhost", port); @@ -325,7 +326,7 @@ public class MiniZooKeeperCluster { return true; } - if (System.currentTimeMillis() > start + timeout) { + if (EnvironmentEdgeManager.currentTimeMillis() > start + timeout) { break; } try { @@ -339,7 +340,7 @@ public class MiniZooKeeperCluster { // XXX: From o.a.zk.t.ClientBase private static boolean waitForServerUp(int port, long timeout) { - long start = System.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTimeMillis(); while (true) { try { Socket sock = new Socket("localhost", port); @@ -366,7 +367,7 @@ public class MiniZooKeeperCluster { LOG.info("server localhost:" + port + " not up " + e); } - if (System.currentTimeMillis() > start + timeout) { + if (EnvironmentEdgeManager.currentTimeMillis() > start + timeout) { break; } try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 1a80b588702..ab5e41abebf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.DeserializationException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.AsyncCallback; import org.apache.zookeeper.CreateMode; @@ -1211,7 +1212,7 @@ public class ZKUtil { byte[] data = null; boolean finished = false; - final long endTime = System.currentTimeMillis() + timeout; + final long endTime = EnvironmentEdgeManager.currentTimeMillis() + timeout; while (!finished) { try { data = ZKUtil.getData(zkw, znode); @@ -1219,7 +1220,7 @@ public class ZKUtil { LOG.warn("Unexpected exception handling blockUntilAvailable", e); } - if (data == null && (System.currentTimeMillis() + + if (data == null && (EnvironmentEdgeManager.currentTimeMillis() + HConstants.SOCKET_RETRY_WAIT_MS < endTime)) { Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java index 723fd7713d7..24f73d22d91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.zookeeper.KeeperException; /** @@ -126,7 +127,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { throws InterruptedException { if (timeout < 0) throw new IllegalArgumentException(); boolean notimeout = timeout == 0; - long startTime = System.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTimeMillis(); long remaining = timeout; if (refresh) { try { @@ -165,7 +166,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { // We expect a notification; but we wait with a // a timeout to lower the impact of a race condition if any wait(100); - remaining = timeout - (System.currentTimeMillis() - startTime); + remaining = timeout - (EnvironmentEdgeManager.currentTimeMillis() - startTime); } return this.data; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 128a0d9f79c..046b368d752 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.WatchedEvent; @@ -324,9 +325,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { case SyncConnected: // Now, this callback can be invoked before the this.zookeeper is set. // Wait a little while. - long finished = System.currentTimeMillis() + + long finished = EnvironmentEdgeManager.currentTimeMillis() + this.conf.getLong("hbase.zookeeper.watcher.sync.connected.wait", 2000); - while (System.currentTimeMillis() < finished) { + while (EnvironmentEdgeManager.currentTimeMillis() < finished) { Threads.sleep(1); if (this.recoverableZooKeeper != null) break; }