diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index e9ca88b5230..c63e4c62d70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -1411,7 +1411,7 @@ public class MetaTableAccessor { public static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){ // using regionserver's local time as the timestamp of Put. // See: HBASE-11536 - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now, Bytes.toBytes(sn.getHostAndPort())); p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 592c98c036b..9f229151cc9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -943,7 +943,7 @@ class AsyncProcess { * Starts waiting to issue replica calls on a different thread; or issues them immediately. */ private void startWaitingForReplicaCalls(List> actionsForReplicaThread) { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); ReplicaCallIssuingRunnable replicaRunnable = new ReplicaCallIssuingRunnable( actionsForReplicaThread, startTime); if (primaryCallTimeoutMicroseconds == 0) { @@ -1420,10 +1420,10 @@ class AsyncProcess { private boolean waitUntilDone(long cutoff) throws InterruptedException { boolean hasWait = cutoff != Long.MAX_VALUE; - long lastLog = EnvironmentEdgeManager.currentTimeMillis(); + long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress; while (0 != (currentInProgress = actionsInProgress.get())) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (hasWait && (now * 1000L) > cutoff) { return false; } @@ -1503,11 +1503,11 @@ class AsyncProcess { /** Wait until the async does not have more than max tasks in progress. */ private void waitForMaximumCurrentTasks(int max) throws InterruptedIOException { - long lastLog = EnvironmentEdgeManager.currentTimeMillis(); + long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress, oldInProgress = Long.MAX_VALUE; while ((currentInProgress = this.tasksInProgress.get()) > max) { if (oldInProgress != currentInProgress) { // Wait for in progress to change. - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > lastLog + 10000) { lastLog = now; LOG.info("#" + id + ", waiting for some tasks to finish. Expected max=" diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 08137459b73..7c9c0b93aaf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -2513,7 +2513,7 @@ class ConnectionManager { public ServerErrorTracker(long timeout, int maxRetries) { this.maxRetries = maxRetries; - this.canRetryUntil = EnvironmentEdgeManager.currentTimeMillis() + timeout; + this.canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout; this.startTrackingTime = new Date().getTime(); } @@ -2523,7 +2523,7 @@ class ConnectionManager { boolean canRetryMore(int numRetry) { // If there is a single try we must not take into account the time. return numRetry < maxRetries || (maxRetries > 1 && - EnvironmentEdgeManager.currentTimeMillis() < this.canRetryUntil); + EnvironmentEdgeManager.currentTime() < this.canRetryUntil); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 10e4d049865..4012c1464ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -822,7 +822,7 @@ public class HBaseAdmin implements Admin { */ private void waitUntilTableIsEnabled(final TableName tableName) throws IOException { boolean enabled = false; - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { try { enabled = isTableEnabled(tableName); @@ -848,7 +848,7 @@ public class HBaseAdmin implements Admin { } } if (!enabled) { - long msec = EnvironmentEdgeManager.currentTimeMillis() - start; + long msec = EnvironmentEdgeManager.currentTime() - start; throw new IOException("Table '" + tableName + "' not yet enabled, after " + msec + "ms."); } @@ -2802,7 +2802,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot) .build(); IsSnapshotDoneResponse done = null; - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); long max = response.getExpectedTimeout(); long maxPauseTime = max / this.numRetries; int tries = 0; @@ -2810,7 +2810,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " + maxPauseTime + " ms per retry)"); while (tries == 0 - || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone())) { + || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) { try { // sleep a backoff <= pauseTime amount long sleep = getPauseTime(tries++); @@ -3011,7 +3011,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName .replace("{snapshot.name}", snapshotName) .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) - .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTimeMillis())); + .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); snapshot(failSafeSnapshotSnapshotName, tableName); } @@ -3185,7 +3185,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) } }); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); long max = response.getExpectedTimeout(); long maxPauseTime = max / this.numRetries; int tries = 0; @@ -3193,7 +3193,7 @@ public synchronized byte[][] rollHLogWriter(String serverName) signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)"); boolean done = false; while (tries == 0 - || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done)) { + || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { try { // sleep a backoff <= pauseTime amount long sleep = getPauseTime(tries++); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 9d378aea3a6..4c2c4d2d116 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -485,7 +485,7 @@ public class HTableMultiplexer { int failedCount = 0; while (true) { try { - start = elapsed = EnvironmentEdgeManager.currentTimeMillis(); + start = elapsed = EnvironmentEdgeManager.currentTime(); // Clear the processingList, putToStatusMap and failedCount processingList.clear(); @@ -545,7 +545,7 @@ public class HTableMultiplexer { // Update the totalFailedCount this.totalFailedPutCount.addAndGet(failedCount); - elapsed = EnvironmentEdgeManager.currentTimeMillis() - start; + elapsed = EnvironmentEdgeManager.currentTime() - start; // Update latency counters averageLatency.add(elapsed); if (elapsed > maxLatency.get()) { @@ -566,7 +566,7 @@ public class HTableMultiplexer { // Sleep for a while if (elapsed == start) { - elapsed = EnvironmentEdgeManager.currentTimeMillis() - start; + elapsed = EnvironmentEdgeManager.currentTime() - start; } if (elapsed < frequency) { try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java index 286b6fe9352..f0889e0e503 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java @@ -73,7 +73,7 @@ public class RpcRetryingCaller { } else { if (callTimeout == Integer.MAX_VALUE) return Integer.MAX_VALUE; int remainingTime = (int) (callTimeout - - (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime)); + (EnvironmentEdgeManager.currentTime() - this.globalStartTime)); if (remainingTime < MIN_RPC_TIMEOUT) { // If there is no time left, we're trying anyway. It's too late. // 0 means no timeout, and it's not the intent here. So we secure both cases by @@ -103,7 +103,7 @@ public class RpcRetryingCaller { throws IOException, RuntimeException { List exceptions = new ArrayList(); - this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis(); + this.globalStartTime = EnvironmentEdgeManager.currentTime(); for (int tries = 0;; tries++) { long expectedSleep; try { @@ -113,7 +113,7 @@ public class RpcRetryingCaller { ExceptionUtil.rethrowIfInterrupt(t); if (LOG.isTraceEnabled()) { LOG.trace("Call exception, tries=" + tries + ", retries=" + retries + ", started=" + - (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime) + " ms ago, " + (EnvironmentEdgeManager.currentTime() - this.globalStartTime) + " ms ago, " + "cancelled=" + cancelled.get(), t); } @@ -122,7 +122,7 @@ public class RpcRetryingCaller { callable.throwable(t, retries != 1); RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(t, - EnvironmentEdgeManager.currentTimeMillis(), toString()); + EnvironmentEdgeManager.currentTime(), toString()); exceptions.add(qt); if (tries >= retries - 1) { throw new RetriesExhaustedException(tries, exceptions); @@ -158,7 +158,7 @@ public class RpcRetryingCaller { * @return Calculate how long a single call took */ private long singleCallDuration(final long expectedSleep) { - return (EnvironmentEdgeManager.currentTimeMillis() - this.globalStartTime) + expectedSleep; + return (EnvironmentEdgeManager.currentTime() - this.globalStartTime) + expectedSleep; } /** @@ -173,7 +173,7 @@ public class RpcRetryingCaller { public T callWithoutRetries(RetryingCallable callable, int callTimeout) throws IOException, RuntimeException { // The code of this method should be shared with withRetries. - this.globalStartTime = EnvironmentEdgeManager.currentTimeMillis(); + this.globalStartTime = EnvironmentEdgeManager.currentTime(); try { callable.prepare(false); return callable.call(callTimeout); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index 6cd422f8211..f15ad0230eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -252,7 +252,7 @@ public class RpcRetryingCallerWithReadReplicas { RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(t, - EnvironmentEdgeManager.currentTimeMillis(), null); + EnvironmentEdgeManager.currentTime(), null); List exceptions = Collections.singletonList(qt); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index 369b1f562a8..b4394b7d454 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -188,7 +188,7 @@ public class RpcClient { * Add an address to the list of the failed servers list. */ public synchronized void addToFailedServers(InetSocketAddress address) { - final long expiry = EnvironmentEdgeManager.currentTimeMillis() + recheckServersTimeout; + final long expiry = EnvironmentEdgeManager.currentTime() + recheckServersTimeout; failedServers.addFirst(new Pair(expiry, address.toString())); } @@ -203,7 +203,7 @@ public class RpcClient { } final String lookup = address.toString(); - final long now = EnvironmentEdgeManager.currentTimeMillis(); + final long now = EnvironmentEdgeManager.currentTime(); // iterate, looking for the search entry and cleaning expired entries Iterator> it = failedServers.iterator(); @@ -261,7 +261,7 @@ public class RpcClient { this.param = param; this.md = md; this.cells = cells; - this.startTime = EnvironmentEdgeManager.currentTimeMillis(); + this.startTime = EnvironmentEdgeManager.currentTime(); this.responseDefaultType = responseDefaultType; this.id = callIdCnt.getAndIncrement(); this.timeout = timeout; @@ -277,7 +277,7 @@ public class RpcClient { return false; } - long waitTime = EnvironmentEdgeManager.currentTimeMillis() - getStartTime(); + long waitTime = EnvironmentEdgeManager.currentTime() - getStartTime(); if (waitTime >= timeout) { IOException ie = new CallTimeoutException("Call id=" + id + ", waitTime=" + waitTime + ", operationTimeout=" + timeout + " expired."); @@ -293,7 +293,7 @@ public class RpcClient { return Integer.MAX_VALUE; } - int remaining = timeout - (int) (EnvironmentEdgeManager.currentTimeMillis() - getStartTime()); + int remaining = timeout - (int) (EnvironmentEdgeManager.currentTime() - getStartTime()); return remaining > 0 ? remaining : 0; } @@ -731,7 +731,7 @@ public class RpcClient { protected synchronized boolean waitForWork() throws InterruptedException { // beware of the concurrent access to the calls list: we can add calls, but as well // remove them. - long waitUntil = EnvironmentEdgeManager.currentTimeMillis() + minIdleTimeBeforeClose; + long waitUntil = EnvironmentEdgeManager.currentTime() + minIdleTimeBeforeClose; while (true) { if (shouldCloseConnection.get()) { @@ -749,7 +749,7 @@ public class RpcClient { return true; } - if (EnvironmentEdgeManager.currentTimeMillis() >= waitUntil) { + if (EnvironmentEdgeManager.currentTime() >= waitUntil) { // Connection is idle. // We expect the number of calls to be zero here, but actually someone can // adds a call at the any moment, as there is no synchronization between this task @@ -820,7 +820,7 @@ public class RpcClient { private synchronized boolean setupSaslConnection(final InputStream in2, final OutputStream out2) throws IOException { saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal, fallbackAllowed, - conf.get("hbase.rpc.protection", + conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase())); return saslRpcClient.saslConnect(in2, out2); } @@ -1245,7 +1245,7 @@ public class RpcClient { // To catch the calls without timeout that were cancelled. itor.remove(); } else if (allCalls) { - long waitTime = EnvironmentEdgeManager.currentTimeMillis() - c.getStartTime(); + long waitTime = EnvironmentEdgeManager.currentTime() - c.getStartTime(); IOException ie = new IOException("Connection to " + getRemoteAddress() + " is closing. Call id=" + c.id + ", waitTime=" + waitTime); c.setException(ie); @@ -1669,7 +1669,7 @@ public class RpcClient { throws ServiceException { long startTime = 0; if (LOG.isTraceEnabled()) { - startTime = EnvironmentEdgeManager.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); } int callTimeout = 0; CellScanner cells = null; @@ -1691,7 +1691,7 @@ public class RpcClient { } if (LOG.isTraceEnabled()) { - long callTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + long callTime = EnvironmentEdgeManager.currentTime() - startTime; LOG.trace("Call: " + md.getName() + ", callTime: " + callTime + "ms"); } return val.getFirst(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java index e8eab936d4e..b8461f205bf 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java @@ -25,15 +25,13 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private public class DefaultEnvironmentEdge implements EnvironmentEdge { - - /** * {@inheritDoc} *

* This implementation returns {@link System#currentTimeMillis()} */ @Override - public long currentTimeMillis() { + public long currentTime() { return System.currentTimeMillis(); } -} +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java index a43fa66041b..ee8c00a638c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdge.java @@ -28,11 +28,10 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private public interface EnvironmentEdge { - /** - * Returns the currentTimeMillis. + * Returns the currentTime. * - * @return currentTimeMillis. + * @return Current time. */ - long currentTimeMillis(); -} + long currentTime(); +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java index c7d4b2503ba..809bbe38f07 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/EnvironmentEdgeManager.java @@ -67,11 +67,11 @@ public class EnvironmentEdgeManager { /** * Defers to the delegate and calls the - * {@link EnvironmentEdge#currentTimeMillis()} method. + * {@link EnvironmentEdge#currentTime()} method. * * @return current time in millis according to the delegate. */ - public static long currentTimeMillis() { - return getDelegate().currentTimeMillis(); + public static long currentTime() { + return getDelegate().currentTime(); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java index c1ce25c33dd..18a258d0f35 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java @@ -50,7 +50,7 @@ public class IncrementingEnvironmentEdge implements EnvironmentEdge { * method is called. The first value is 1. */ @Override - public synchronized long currentTimeMillis() { + public synchronized long currentTime() { return timeIncrement++; } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java index 13e42fb814a..bd9efb5d946 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestEnvironmentEdgeManager.java @@ -58,11 +58,9 @@ public class TestEnvironmentEdgeManager { EnvironmentEdge mock = mock(EnvironmentEdge.class); EnvironmentEdgeManager.injectEdge(mock); long expectation = 3456; - when(mock.currentTimeMillis()).thenReturn(expectation); - long result = EnvironmentEdgeManager.currentTimeMillis(); - verify(mock).currentTimeMillis(); + when(mock.currentTime()).thenReturn(expectation); + long result = EnvironmentEdgeManager.currentTime(); + verify(mock).currentTime(); assertEquals(expectation, result); } - -} - +} \ No newline at end of file diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java index 1180249d2bb..8b6f9756bb8 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java @@ -98,7 +98,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION") public byte[] getData() { // try at most twice/minute - if (needSetup && EnvironmentEdgeManager.currentTimeMillis() > lastSetupTry + 30000) { + if (needSetup && EnvironmentEdgeManager.currentTime() > lastSetupTry + 30000) { synchronized (this) { // make sure only one thread tries to reconnect if (needSetup) { @@ -112,7 +112,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver { try { LOG.debug("Connecting to ZK"); // record this attempt - lastSetupTry = EnvironmentEdgeManager.currentTimeMillis(); + lastSetupTry = EnvironmentEdgeManager.currentTime(); if (zk.exists(node, false) != null) { data = zk.getData(node, this, null); LOG.debug("Read synchronously: "+(data == null ? "null" : Bytes.toLong(data))); @@ -186,7 +186,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver { if (oldSI.getTtl() == Long.MAX_VALUE) { return null; } - long ttl = Math.max(EnvironmentEdgeManager.currentTimeMillis() - + long ttl = Math.max(EnvironmentEdgeManager.currentTime() - Bytes.toLong(data), oldSI.getTtl()); return new ScanInfo(store.getFamily(), ttl, oldSI.getTimeToPurgeDeletes(), oldSI.getComparator()); diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java index 4ab53c72deb..824910aff8c 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java @@ -75,7 +75,7 @@ public class TestZooKeeperScanPolicyObserver { desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); HTable t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "test", null); ZooKeeper zk = zkw.getRecoverableZooKeeper().getZooKeeper(); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java index 377bbddd3cb..a1e306d707e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java @@ -235,7 +235,7 @@ public class IntegrationTestLazyCfLoading { writer.start(1, keysToWrite, WRITER_THREADS); // Now, do scans. - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); long timeLimit = now + (maxRuntime * 60000); boolean isWriterDone = false; while (now < timeLimit && !isWriterDone) { @@ -255,7 +255,7 @@ public class IntegrationTestLazyCfLoading { // Not a strict lower bound - writer knows nothing about filters, so we report // this from generator. Writer might have generated the value but not put it yet. long onesGennedBeforeScan = dataGen.getExpectedNumberOfKeys(); - long startTs = EnvironmentEdgeManager.currentTimeMillis(); + long startTs = EnvironmentEdgeManager.currentTime(); ResultScanner results = table.getScanner(scan); long resultCount = 0; Result result = null; @@ -265,7 +265,7 @@ public class IntegrationTestLazyCfLoading { Assert.assertTrue("Failed to verify [" + Bytes.toString(result.getRow())+ "]", isOk); ++resultCount; } - long timeTaken = EnvironmentEdgeManager.currentTimeMillis() - startTs; + long timeTaken = EnvironmentEdgeManager.currentTime() - startTs; // Verify the result count. long onesGennedAfterScan = dataGen.getExpectedNumberOfKeys(); Assert.assertTrue("Read " + resultCount + " keys when at most " + onesGennedAfterScan @@ -280,7 +280,7 @@ public class IntegrationTestLazyCfLoading { LOG.info("Scan took " + timeTaken + "ms"); if (!isWriterDone) { Thread.sleep(WAIT_BETWEEN_SCANS_MS); - now = EnvironmentEdgeManager.currentTimeMillis(); + now = EnvironmentEdgeManager.currentTime(); } } Assert.assertEquals("There are write failures", 0, writer.getNumWriteFailures()); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java index 1e2203ab986..f7f87276da2 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java @@ -246,7 +246,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase { private void runLinkedListMRJob(int iteration) throws Exception { String jobName = IntegrationTestBulkLoad.class.getSimpleName() + " - " + - EnvironmentEdgeManager.currentTimeMillis(); + EnvironmentEdgeManager.currentTime(); Configuration conf = new Configuration(util.getConfiguration()); Path p = util.getDataTestDirOnTestFS(getTablename() + "-" + iteration); HTable table = new HTable(conf, getTablename()); @@ -639,7 +639,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase { private void runCheck() throws IOException, ClassNotFoundException, InterruptedException { LOG.info("Running check"); Configuration conf = getConf(); - String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTimeMillis(); + String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime(); Path p = util.getDataTestDirOnTestFS(jobName); Job job = new Job(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index f9cc60f9c3d..a04cb88ae13 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -255,7 +255,7 @@ public class HFileArchiver { } // do the actual archive - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); File file = new FileablePath(fs, storeFile); if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) { throw new IOException("Failed to archive/delete the file for region:" @@ -280,7 +280,7 @@ public class HFileArchiver { private static boolean resolveAndArchive(FileSystem fs, Path baseArchiveDir, Collection toArchive) throws IOException { if (LOG.isTraceEnabled()) LOG.trace("Starting to archive " + toArchive); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); List failures = resolveAndArchive(fs, baseArchiveDir, toArchive, start); // notify that some files were not archived. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java index 79bfcdea290..ba25ac63815 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutExceptionInjector.java @@ -63,7 +63,7 @@ public class TimeoutExceptionInjector { // mark the task is run, to avoid repeats TimeoutExceptionInjector.this.complete = true; } - long end = EnvironmentEdgeManager.currentTimeMillis(); + long end = EnvironmentEdgeManager.currentTime(); TimeoutException tee = new TimeoutException( "Timeout caused Foreign Exception", start, end, maxTime); String source = "timer-" + timer; @@ -107,7 +107,7 @@ public class TimeoutExceptionInjector { } LOG.debug("Scheduling process timer to run in: " + maxTime + " ms"); timer.schedule(timerTask, maxTime); - this.start = EnvironmentEdgeManager.currentTimeMillis(); + this.start = EnvironmentEdgeManager.currentTime(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index e1108118342..e63471285df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -916,7 +916,7 @@ public class BucketCache implements BlockCache, HeapSize { * cache */ private void checkIOErrorIsTolerated() { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (this.ioErrorStartTime > 0) { if (cacheEnabled && (now - ioErrorStartTime) > this.ioErrorsTolerationDuration) { LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java index 5a290bf86d2..3069dc3b756 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java @@ -32,7 +32,7 @@ public class BucketCacheStats extends CacheStats { private final AtomicLong ioHitCount = new AtomicLong(0); private final AtomicLong ioHitTime = new AtomicLong(0); private final static int nanoTime = 1000000; - private long lastLogTime = EnvironmentEdgeManager.currentTimeMillis(); + private long lastLogTime = EnvironmentEdgeManager.currentTime(); BucketCacheStats() { super("BucketCache"); @@ -50,7 +50,7 @@ public class BucketCacheStats extends CacheStats { } public long getIOHitsPerSecond() { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); long took = (now - lastLogTime) / 1000; lastLogTime = now; return took == 0? 0: ioHitCount.get() / took; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index a568a29712f..24a6b6f21ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -1551,7 +1551,7 @@ public class AssignmentManager extends ZooKeeperListener { */ boolean assign(final ServerName destination, final List regions) throws InterruptedException { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { int regionCount = regions.size(); if (regionCount == 0) { @@ -1739,7 +1739,7 @@ public class AssignmentManager extends ZooKeeperListener { LOG.debug("Bulk assigning done for " + destination); return true; } finally { - metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); + metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTime() - startTime); } } @@ -1828,11 +1828,11 @@ public class AssignmentManager extends ZooKeeperListener { state.updateTimestampToNow(); if (maxWaitTime < 0) { maxWaitTime = - EnvironmentEdgeManager.currentTimeMillis() + EnvironmentEdgeManager.currentTime() + conf.getLong(ALREADY_IN_TRANSITION_WAITTIME, DEFAULT_ALREADY_IN_TRANSITION_WAITTIME); } - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now < maxWaitTime) { LOG.debug("Region is already in transition; " + "waiting up to " + (maxWaitTime - now) + "ms", t); @@ -1972,7 +1972,7 @@ public class AssignmentManager extends ZooKeeperListener { */ private void assign(RegionState state, final boolean setOfflineInZK, final boolean forceNewPlan) { - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); try { Configuration conf = server.getConfiguration(); RegionState currentState = state; @@ -2100,18 +2100,18 @@ public class AssignmentManager extends ZooKeeperListener { if (maxWaitTime < 0) { if (t instanceof RegionAlreadyInTransitionException) { - maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() + maxWaitTime = EnvironmentEdgeManager.currentTime() + this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME, DEFAULT_ALREADY_IN_TRANSITION_WAITTIME); } else { - maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() + maxWaitTime = EnvironmentEdgeManager.currentTime() + this.server.getConfiguration().getLong( "hbase.regionserver.rpc.startup.waittime", 60000); } } try { needNewPlan = false; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now < maxWaitTime) { LOG.debug("Server is not yet up or region is already in transition; " + "waiting up to " + (maxWaitTime - now) + "ms", t); @@ -2193,7 +2193,7 @@ public class AssignmentManager extends ZooKeeperListener { // Run out of attempts regionStates.updateRegionState(region, State.FAILED_OPEN); } finally { - metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); + metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTime() - startTime); } } @@ -3104,7 +3104,7 @@ public class AssignmentManager extends ZooKeeperListener { public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut) throws InterruptedException { if (!regionStates.isRegionInTransition(hri)) return true; - long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis() + long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTime() + timeOut; // There is already a timeout monitor on regions in transition so I // should not have to have one here too? @@ -3112,7 +3112,7 @@ public class AssignmentManager extends ZooKeeperListener { " to leave regions-in-transition, timeOut=" + timeOut + " ms."); while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) { regionStates.waitForUpdate(100); - if (EnvironmentEdgeManager.currentTimeMillis() > end) { + if (EnvironmentEdgeManager.currentTime() > end) { LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned."); return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index 6fe190f61b6..85ec8cfc5c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -139,7 +139,7 @@ public class ClusterStatusPublisher extends Chore { return; } - final long curTime = EnvironmentEdgeManager.currentTimeMillis(); + final long curTime = EnvironmentEdgeManager.currentTime(); if (lastMessageTime > curTime - messagePeriod) { // We already sent something less than 10 second ago. Done. return; @@ -177,7 +177,7 @@ public class ClusterStatusPublisher extends Chore { */ protected List generateDeadServersListToSend() { // We're getting the message sent since last time, and add them to the list - long since = EnvironmentEdgeManager.currentTimeMillis() - messagePeriod * 2; + long since = EnvironmentEdgeManager.currentTime() - messagePeriod * 2; for (Pair dead : getDeadServers(since)) { lastSent.putIfAbsent(dead.getFirst(), 0); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index 48b6ccfb71c..5c232a67325 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -110,7 +110,7 @@ public class DeadServer { public synchronized void add(ServerName sn) { this.numProcessing++; if (!deadServers.containsKey(sn)){ - deadServers.put(sn, EnvironmentEdgeManager.currentTimeMillis()); + deadServers.put(sn, EnvironmentEdgeManager.currentTime()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 36099650817..61394c6dfe6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -383,9 +383,9 @@ public class MasterFileSystem { List logDirs = getLogDirs(serverNames); splitLogManager.handleDeadWorkers(serverNames); - splitTime = EnvironmentEdgeManager.currentTimeMillis(); + splitTime = EnvironmentEdgeManager.currentTime(); splitLogSize = splitLogManager.splitLogDistributed(serverNames, logDirs, filter); - splitTime = EnvironmentEdgeManager.currentTimeMillis() - splitTime; + splitTime = EnvironmentEdgeManager.currentTime() - splitTime; if (this.metricsMasterFilesystem != null) { if (filter == META_FILTER) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index b9414cd5db2..a2141111424 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -164,11 +164,11 @@ public class SplitLogManager extends ZooKeeperListener { * @param stopper the stoppable in case anything is wrong * @param master the master services * @param serverName the master server name - * @throws KeeperException - * @throws InterruptedIOException + * @throws KeeperException + * @throws InterruptedIOException */ public SplitLogManager(ZooKeeperWatcher zkw, final Configuration conf, - Stoppable stopper, MasterServices master, ServerName serverName) + Stoppable stopper, MasterServices master, ServerName serverName) throws InterruptedIOException, KeeperException { this(zkw, conf, stopper, master, serverName, new TaskFinisher() { @Override @@ -210,7 +210,7 @@ public class SplitLogManager extends ZooKeeperListener { this.unassignedTimeout = conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT); - // Determine recovery mode + // Determine recovery mode setRecoveryMode(true); LOG.info("Timeout=" + timeout + ", unassigned timeout=" + unassignedTimeout + @@ -314,7 +314,7 @@ public class SplitLogManager extends ZooKeeperListener { LOG.debug("Scheduling batch of logs to split"); SplitLogCounters.tot_mgr_log_split_batch_start.incrementAndGet(); LOG.info("started splitting " + logfiles.length + " logs in " + logDirs); - long t = EnvironmentEdgeManager.currentTimeMillis(); + long t = EnvironmentEdgeManager.currentTime(); long totalSize = 0; TaskBatch batch = new TaskBatch(); Boolean isMetaRecovery = (filter == null) ? null : false; @@ -368,7 +368,7 @@ public class SplitLogManager extends ZooKeeperListener { } String msg = "finished splitting (more than or equal to) " + totalSize + " bytes in " + batch.installed + " log files in " + logDirs + " in " + - (EnvironmentEdgeManager.currentTimeMillis() - t) + "ms"; + (EnvironmentEdgeManager.currentTime() - t) + "ms"; status.markComplete(msg); LOG.info(msg); return totalSize; @@ -386,7 +386,7 @@ public class SplitLogManager extends ZooKeeperListener { // This is a znode path under the splitlog dir with the rest of the path made up of an // url encoding of the passed in log to split. String path = ZKSplitLog.getEncodedNodeName(watcher, taskname); - lastTaskCreateTime = EnvironmentEdgeManager.currentTimeMillis(); + lastTaskCreateTime = EnvironmentEdgeManager.currentTime(); Task oldtask = createTaskIfAbsent(path, batch); if (oldtask == null) { // publish the task in zk @@ -808,7 +808,7 @@ public class SplitLogManager extends ZooKeeperListener { if (task.isUnassigned()) { LOG.info("task " + path + " acquired by " + workerName); } - task.heartbeat(EnvironmentEdgeManager.currentTimeMillis(), new_version, workerName); + task.heartbeat(EnvironmentEdgeManager.currentTime(), new_version, workerName); SplitLogCounters.tot_mgr_heartbeat.incrementAndGet(); } else { // duplicate heartbeats - heartbeats w/o zk node version @@ -831,7 +831,7 @@ public class SplitLogManager extends ZooKeeperListener { // 2) after a configurable timeout if the server is not marked as dead but has still not // finished the task. This allows to continue if the worker cannot actually handle it, // for any reason. - final long time = EnvironmentEdgeManager.currentTimeMillis() - task.last_update; + final long time = EnvironmentEdgeManager.currentTime() - task.last_update; final boolean alive = master.getServerManager() != null ? master.getServerManager().isServerOnline(task.cur_worker_name) : true; if (alive && time < timeout) { @@ -863,7 +863,7 @@ public class SplitLogManager extends ZooKeeperListener { if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { LOG.debug("failed to resubmit task " + path + " version changed"); - task.heartbeatNoDetails(EnvironmentEdgeManager.currentTimeMillis()); + task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); return false; } } catch (NoNodeException e) { @@ -873,13 +873,13 @@ public class SplitLogManager extends ZooKeeperListener { getDataSetWatchSuccess(path, null, Integer.MIN_VALUE); } catch (DeserializationException e1) { LOG.debug("Failed to re-resubmit task " + path + " because of deserialization issue", e1); - task.heartbeatNoDetails(EnvironmentEdgeManager.currentTimeMillis()); + task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); return false; } return false; } catch (KeeperException.BadVersionException e) { LOG.debug("failed to resubmit task " + path + " version changed"); - task.heartbeatNoDetails(EnvironmentEdgeManager.currentTimeMillis()); + task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); return false; } catch (KeeperException e) { SplitLogCounters.tot_mgr_resubmit_failed.incrementAndGet(); @@ -951,7 +951,7 @@ public class SplitLogManager extends ZooKeeperListener { // might miss the watch-trigger that creation of RESCAN node provides. // Since the TimeoutMonitor will keep resubmitting UNASSIGNED tasks // therefore this behavior is safe. - lastTaskCreateTime = EnvironmentEdgeManager.currentTimeMillis(); + lastTaskCreateTime = EnvironmentEdgeManager.currentTime(); SplitLogTask slt = new SplitLogTask.Done(this.serverName, this.recoveryMode); this.watcher.getRecoverableZooKeeper().getZooKeeper(). create(ZKSplitLog.getRescanNode(watcher), slt.toByteArray(), @@ -1051,7 +1051,7 @@ public class SplitLogManager extends ZooKeeperListener { task = tasks.get(path); if (task != null || ZKSplitLog.isRescanNode(watcher, path)) { if (task != null) { - task.heartbeatNoDetails(EnvironmentEdgeManager.currentTimeMillis()); + task.heartbeatNoDetails(EnvironmentEdgeManager.currentTime()); } getDataSetWatch(path, zkretries); } @@ -1107,7 +1107,7 @@ public class SplitLogManager extends ZooKeeperListener { try { this.recoveringRegionLock.lock(); // mark that we're creating recovering znodes - this.lastRecoveringNodeCreationTime = EnvironmentEdgeManager.currentTimeMillis(); + this.lastRecoveringNodeCreationTime = EnvironmentEdgeManager.currentTime(); for (HRegionInfo region : userRegions) { String regionEncodeName = region.getEncodedName(); @@ -1243,7 +1243,7 @@ public class SplitLogManager extends ZooKeeperListener { } return result; } - + /** * This function is to set recovery mode from outstanding split log tasks from before or * current configuration setting @@ -1267,7 +1267,7 @@ public class SplitLogManager extends ZooKeeperListener { boolean hasSplitLogTask = false; boolean hasRecoveringRegions = false; RecoveryMode previousRecoveryMode = RecoveryMode.UNKNOWN; - RecoveryMode recoveryModeInConfig = (isDistributedLogReplay(conf)) ? + RecoveryMode recoveryModeInConfig = (isDistributedLogReplay(conf)) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING; // Firstly check if there are outstanding recovering regions @@ -1292,7 +1292,7 @@ public class SplitLogManager extends ZooKeeperListener { previousRecoveryMode = slt.getMode(); if (previousRecoveryMode == RecoveryMode.UNKNOWN) { // created by old code base where we don't set recovery mode in splitlogtask - // we can safely set to LOG_SPLITTING because we're in master initialization code + // we can safely set to LOG_SPLITTING because we're in master initialization code // before SSH is enabled & there is no outstanding recovering regions previousRecoveryMode = RecoveryMode.LOG_SPLITTING; } @@ -1319,7 +1319,7 @@ public class SplitLogManager extends ZooKeeperListener { // splitlogtask hasn't drained yet, keep existing recovery mode return; } - + if (previousRecoveryMode != RecoveryMode.UNKNOWN) { this.isDrainingDone = (previousRecoveryMode == recoveryModeInConfig); this.recoveryMode = previousRecoveryMode; @@ -1332,7 +1332,7 @@ public class SplitLogManager extends ZooKeeperListener { public RecoveryMode getRecoveryMode() { return this.recoveryMode; } - + /** * Returns if distributed log replay is turned on or not * @param conf @@ -1497,7 +1497,7 @@ public class SplitLogManager extends ZooKeeperListener { } } if (tot > 0) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > lastLog + 5000) { lastLog = now; LOG.info("total tasks = " + tot + " unassigned = " + unassigned + " tasks=" + tasks); @@ -1516,7 +1516,7 @@ public class SplitLogManager extends ZooKeeperListener { // master should spawn both a manager and a worker thread to guarantee // that there is always one worker in the system if (tot > 0 && !found_assigned_task && - ((EnvironmentEdgeManager.currentTimeMillis() - lastTaskCreateTime) > + ((EnvironmentEdgeManager.currentTime() - lastTaskCreateTime) > unassignedTimeout)) { for (Map.Entry e : tasks.entrySet()) { String path = e.getKey(); @@ -1546,7 +1546,7 @@ public class SplitLogManager extends ZooKeeperListener { } // Garbage collect left-over /hbase/recovering-regions/... znode - long timeInterval = EnvironmentEdgeManager.currentTimeMillis() + long timeInterval = EnvironmentEdgeManager.currentTime() - lastRecoveringNodeCreationTime; if (!failedRecoveringRegionDeletions.isEmpty() || (tot == 0 && tasks.size() == 0 && (timeInterval > checkRecoveringTimeThreshold))) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java index faa11e3334f..f111f4bd95d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java @@ -326,7 +326,7 @@ public abstract class TableLockManager { .setThreadId(Thread.currentThread().getId()) .setPurpose(purpose) .setIsShared(isShared) - .setCreateTime(EnvironmentEdgeManager.currentTimeMillis()).build(); + .setCreateTime(EnvironmentEdgeManager.currentTime()).build(); byte[] lockMetadata = toBytes(data); InterProcessReadWriteLock lock = new ZKInterProcessReadWriteLock(zkWatcher, tableLockZNode, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 99d794d97b6..6a83dc3dbb5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -90,10 +90,10 @@ public class TableNamespaceManager { // Wait for the namespace table to be assigned. // If timed out, we will move ahead without initializing it. // So that it should be initialized later on lazily. - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); int timeout = conf.getInt(NS_INIT_TIMEOUT, DEFAULT_NS_INIT_TIMEOUT); while (!isTableAssigned()) { - if (EnvironmentEdgeManager.currentTimeMillis() - startTime + 100 > timeout) { + if (EnvironmentEdgeManager.currentTime() - startTime + 100 > timeout) { LOG.warn("Timedout waiting for namespace table to be assigned."); return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index ae59f265a77..79fd21ec59d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -144,7 +144,7 @@ public class FavoredNodeAssignmentHelper { put = MetaTableAccessor.makePutFromRegionInfo(regionInfo); byte[] favoredNodes = getFavoredNodes(favoredNodeList); put.addImmutable(HConstants.CATALOG_FAMILY, FAVOREDNODES_QUALIFIER, - EnvironmentEdgeManager.currentTimeMillis(), favoredNodes); + EnvironmentEdgeManager.currentTime(), favoredNodes); LOG.info("Create the region " + regionInfo.getRegionNameAsString() + " with favored nodes " + Bytes.toString(favoredNodes)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index e353316abc4..8f6314f12e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -220,7 +220,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { return null; } - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); initCosts(cluster); @@ -259,13 +259,13 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { updateCostsWithAction(cluster, undoAction); } - if (EnvironmentEdgeManager.currentTimeMillis() - startTime > + if (EnvironmentEdgeManager.currentTime() - startTime > maxRunningTime) { break; } } - long endTime = EnvironmentEdgeManager.currentTimeMillis(); + long endTime = EnvironmentEdgeManager.currentTime(); metricsBalancer.balanceCluster(endTime - startTime); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java index b65fbf9a84b..46fe971da18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java @@ -46,7 +46,7 @@ public class TimeToLiveHFileCleaner extends BaseHFileCleanerDelegate { @Override public boolean isFileDeletable(FileStatus fStat) { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); long time = fStat.getModificationTime(); long life = currentTime - time; if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java index 66b04230ea2..f02195404bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java @@ -37,7 +37,7 @@ public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate { @Override public boolean isLogDeletable(FileStatus fStat) { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); long time = fStat.getModificationTime(); long life = currentTime - time; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java index d09ea64362a..53f4108e678 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java @@ -92,7 +92,7 @@ public class DispatchMergingRegionHandler extends EventHandler { .getEncodedName()) + " is not online now"); return; } - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); boolean onSameRS = region_a_location.equals(region_b_location); // Make sure regions are on the same regionserver before send merge @@ -134,7 +134,7 @@ public class DispatchMergingRegionHandler extends EventHandler { // RegionInTransition any more break; } - if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) break; + if ((EnvironmentEdgeManager.currentTime() - startTime) > timeout) break; } catch (InterruptedException e) { InterruptedIOException iioe = new InterruptedIOException(); iioe.initCause(e); @@ -144,7 +144,7 @@ public class DispatchMergingRegionHandler extends EventHandler { } if (onSameRS) { - startTime = EnvironmentEdgeManager.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); while (!masterServices.isStopped()) { try { masterServices.getServerManager().sendRegionsMerge(region_a_location, @@ -153,7 +153,7 @@ public class DispatchMergingRegionHandler extends EventHandler { region_a.getEncodedName() + "," + region_b.getEncodedName() + ", focible=" + forcible); break; } catch (RegionOpeningException roe) { - if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) { + if ((EnvironmentEdgeManager.currentTime() - startTime) > timeout) { LOG.warn("Failed sending merge to " + region_a_location + " after " + timeout + "ms", roe); break; @@ -170,7 +170,7 @@ public class DispatchMergingRegionHandler extends EventHandler { LOG.info("Cancel merging regions " + region_a.getRegionNameAsString() + ", " + region_b.getRegionNameAsString() + ", because can't move them together after " - + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms"); + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 3bf704ad0f7..31ec0982af8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -866,7 +866,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @param sentinels map of sentinels to clean */ private synchronized void cleanupSentinels(final Map sentinels) { - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); Iterator> it = sentinels.entrySet().iterator(); while (it.hasNext()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 23e1cd5e037..27fd87a4239 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -474,9 +474,9 @@ public class CompactSplitThread implements CompactionRequestor { try { // Note: please don't put single-compaction logic here; // put it into region/store/etc. This is CST logic. - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); boolean completed = region.compact(compaction, store); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " + this + "; duration=" + StringUtils.formatTimeDiff(now, start)); if (completed) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index 5a4c904b1d1..4417bd9b958 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -360,7 +360,7 @@ public class CompactionTool extends Configured implements Tool { Path stagingDir = JobUtil.getStagingDir(conf); try { // Create input file with the store dirs - Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTimeMillis()); + Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime()); CompactionInputFormat.createInputFile(fs, inputPath, toCompactDirs); CompactionInputFormat.addInputPath(job, inputPath); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index d90357b98e5..759f84295e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -150,7 +150,7 @@ public class DefaultMemStore implements MemStore { LOG.warn("Snapshot called again without clearing previous. " + "Doing nothing. Another ongoing flush or did we fail last attempt?"); } else { - this.snapshotId = EnvironmentEdgeManager.currentTimeMillis(); + this.snapshotId = EnvironmentEdgeManager.currentTime(); this.snapshotSize = keySize(); if (!this.kvset.isEmpty()) { this.snapshot = this.kvset; @@ -239,7 +239,7 @@ public class DefaultMemStore implements MemStore { void setOldestEditTimeToNow() { if (timeOfOldestEdit == Long.MAX_VALUE) { - timeOfOldestEdit = EnvironmentEdgeManager.currentTimeMillis(); + timeOfOldestEdit = EnvironmentEdgeManager.currentTime(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 7c9f527eb41..9e8c5a2fc75 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -754,7 +754,7 @@ public class HRegion implements HeapSize { // , Writable{ // Initialize split policy this.splitPolicy = RegionSplitPolicy.create(this, conf); - this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis(); + this.lastFlushTime = EnvironmentEdgeManager.currentTime(); // Use maximum of log sequenceid or that which was found in stores // (particularly if no recovered edits, seqid will be -1). long nextSeqid = maxSeqId + 1; @@ -1685,7 +1685,7 @@ public class HRegion implements HeapSize { // , Writable{ if (flushCheckInterval <= 0) { //disabled return false; } - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); //if we flushed in the recent past, we don't need to do again now if ((now - getLastFlushTime() < flushCheckInterval)) { return false; @@ -1736,7 +1736,7 @@ public class HRegion implements HeapSize { // , Writable{ // Don't flush when server aborting, it's unsafe throw new IOException("Aborting flush because server is aborted..."); } - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + final long startTime = EnvironmentEdgeManager.currentTime(); // If nothing to flush, return, but we need to safely update the region sequence id if (this.memstoreSize.get() <= 0) { // Take an update lock because am about to change the sequence id and we want the sequence id @@ -1948,7 +1948,7 @@ public class HRegion implements HeapSize { // , Writable{ } // Record latest flush time - this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis(); + this.lastFlushTime = EnvironmentEdgeManager.currentTime(); // Update the last flushed sequence id for region. TODO: This is dup'd inside the WAL/FSHlog. this.lastFlushSeqId = flushSeqId; @@ -1959,7 +1959,7 @@ public class HRegion implements HeapSize { // , Writable{ notifyAll(); // FindBugs NN_NAKED_NOTIFY } - long time = EnvironmentEdgeManager.currentTimeMillis() - startTime; + long time = EnvironmentEdgeManager.currentTime() - startTime; long memstoresize = this.memstoreSize.get(); String msg = "Finished memstore flush of ~" + StringUtils.byteDesc(totalFlushableSize) + "/" + totalFlushableSize + @@ -2503,7 +2503,7 @@ public class HRegion implements HeapSize { // , Writable{ // we acquire at least one. // ---------------------------------- int numReadyToWrite = 0; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); while (lastIndexExclusive < batchOp.operations.length) { Mutation mutation = batchOp.getMutation(lastIndexExclusive); boolean isPutMutation = mutation instanceof Put; @@ -2588,7 +2588,7 @@ public class HRegion implements HeapSize { // , Writable{ // we should record the timestamp only after we have acquired the rowLock, // otherwise, newer puts/deletes are not guaranteed to have a newer timestamp - now = EnvironmentEdgeManager.currentTimeMillis(); + now = EnvironmentEdgeManager.currentTime(); byte[] byteNow = Bytes.toBytes(now); // Nothing to put/delete -- an exception in the above such as NoSuchColumnFamily? @@ -3358,7 +3358,7 @@ public class HRegion implements HeapSize { // , Writable{ 2000); // How often to send a progress report (default 1/2 master timeout) int period = this.conf.getInt("hbase.hstore.report.period", 300000); - long lastReport = EnvironmentEdgeManager.currentTimeMillis(); + long lastReport = EnvironmentEdgeManager.currentTime(); while ((entry = reader.next()) != null) { HLogKey key = entry.getKey(); @@ -3373,7 +3373,7 @@ public class HRegion implements HeapSize { // , Writable{ if (intervalEdits >= interval) { // Number of edits interval reached intervalEdits = 0; - long cur = EnvironmentEdgeManager.currentTimeMillis(); + long cur = EnvironmentEdgeManager.currentTime(); if (lastReport + period <= cur) { status.setStatus("Replaying edits..." + " skipped=" + skippedEdits + @@ -4714,7 +4714,7 @@ public class HRegion implements HeapSize { // , Writable{ meta.checkResources(); // The row key is the region name byte[] row = r.getRegionName(); - final long now = EnvironmentEdgeManager.currentTimeMillis(); + final long now = EnvironmentEdgeManager.currentTime(); final List cells = new ArrayList(2); cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, now, @@ -5013,7 +5013,7 @@ public class HRegion implements HeapSize { // , Writable{ // Short circuit the read only case if (processor.readOnly()) { try { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); doProcessRowWithTimeout( processor, now, this, null, null, timeout); processor.postProcess(this, walEdit, true); @@ -5048,7 +5048,7 @@ public class HRegion implements HeapSize { // , Writable{ // Get a mvcc write number mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); try { // 4. Let the processor scan the rows, generate mutations and add // waledits @@ -5249,7 +5249,7 @@ public class HRegion implements HeapSize { // , Writable{ // now start my own transaction mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId); w = mvcc.beginMemstoreInsertWithSeqNum(mvccNum); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Process each family for (Map.Entry> family : append.getFamilyCellMap().entrySet()) { @@ -5467,7 +5467,7 @@ public class HRegion implements HeapSize { // , Writable{ // now start my own transaction mvccNum = MultiVersionConsistencyControl.getPreAssignedWriteNumber(this.sequenceId); w = mvcc.beginMemstoreInsertWithSeqNum(mvccNum); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Process each family for (Map.Entry> family: increment.getFamilyCellMap().entrySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 1e31fa19153..94d19d990aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -2020,7 +2020,7 @@ public class HRegionServer extends HasThread implements rpcServices.requestCount.set(0); LOG.info("reportForDuty to master=" + masterServerName + " with port=" + rpcServices.isa.getPort() + ", startcode=" + this.startcode); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); int port = rpcServices.isa.getPort(); RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); request.setPort(port); @@ -2702,7 +2702,7 @@ public class HRegionServer extends HasThread implements public MovedRegionInfo(ServerName serverName, long closeSeqNum) { this.serverName = serverName; this.seqNum = closeSeqNum; - ts = EnvironmentEdgeManager.currentTimeMillis(); + ts = EnvironmentEdgeManager.currentTime(); } public ServerName getServerName() { @@ -2744,7 +2744,7 @@ public class HRegionServer extends HasThread implements private MovedRegionInfo getMovedRegion(final String encodedRegionName) { MovedRegionInfo dest = movedRegions.get(encodedRegionName); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (dest != null) { if (dest.getMoveTime() > (now - TIMEOUT_REGION_MOVED)) { return dest; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index d44ce07f52d..00ebbfba7cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -774,7 +774,7 @@ public class HStore implements Store { + " into store " + this + " (new location: " + dstPath + ")"); if (LOG.isTraceEnabled()) { String traceMessage = "BULK LOAD time,size,store size,store files [" - + EnvironmentEdgeManager.currentTimeMillis() + "," + r.length() + "," + storeSize + + EnvironmentEdgeManager.currentTime() + "," + r.length() + "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]"; LOG.trace(traceMessage); } @@ -1018,7 +1018,7 @@ public class HStore implements Store { totalSize += sf.getReader().length(); } String traceMessage = "FLUSH time,count,size,store size,store files [" - + EnvironmentEdgeManager.currentTimeMillis() + "," + sfs.size() + "," + totalSize + + EnvironmentEdgeManager.currentTime() + "," + sfs.size() + "," + totalSize + "," + storeSize + "," + storeEngine.getStoreFileManager().getStorefileCount() + "]"; LOG.trace(traceMessage); } @@ -1147,7 +1147,7 @@ public class HStore implements Store { + " into tmpdir=" + fs.getTempDir() + ", totalSize=" + StringUtils.humanReadableInt(cr.getSize())); - long compactionStartTime = EnvironmentEdgeManager.currentTimeMillis(); + long compactionStartTime = EnvironmentEdgeManager.currentTime(); List sfs = null; try { // Commence the compaction. @@ -1251,7 +1251,7 @@ public class HStore implements Store { */ private void logCompactionEndMessage( CompactionRequest cr, List sfs, long compactionStartTime) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); StringBuilder message = new StringBuilder( "Completed" + (cr.isMajor() ? " major" : "") + " compaction of " + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in " @@ -1523,7 +1523,7 @@ public class HStore implements Store { long cfTtl = getStoreFileTtl(); if (cfTtl != Long.MAX_VALUE) { delSfs = storeEngine.getStoreFileManager().getUnneededFiles( - EnvironmentEdgeManager.currentTimeMillis() - cfTtl, filesCompacting); + EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting); addToCompactingFiles(delSfs); } } @@ -2021,7 +2021,7 @@ public class HStore implements Store { this.lock.readLock().lock(); try { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); return this.memstore.updateColumnValue(row, f, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java index 9ff77411119..4673d0dfd2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java @@ -290,7 +290,7 @@ public class Leases extends HasThread { } public long getDelay(TimeUnit unit) { - return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTimeMillis(), + return unit.convert(this.expirationTime - EnvironmentEdgeManager.currentTime(), TimeUnit.MILLISECONDS); } @@ -305,7 +305,7 @@ public class Leases extends HasThread { * Resets the expiration time of the lease. */ public void resetExpirationTime() { - this.expirationTime = EnvironmentEdgeManager.currentTimeMillis() + this.leaseTimeoutPeriod; + this.expirationTime = EnvironmentEdgeManager.currentTime() + this.leaseTimeoutPeriod; } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 9f7ac98fa93..70f7e2d1925 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -375,7 +375,7 @@ class MemStoreFlusher implements FlushRequester { if (!region.getRegionInfo().isMetaRegion() && isTooManyStoreFiles(region)) { if (fqe.isMaximumWait(this.blockingWaitTime)) { - LOG.info("Waited " + (EnvironmentEdgeManager.currentTimeMillis() - fqe.createTime) + + LOG.info("Waited " + (EnvironmentEdgeManager.currentTime() - fqe.createTime) + "ms on a compaction to clean up 'too many store files'; waited " + "long enough... proceeding with flush of " + region.getRegionNameAsString()); @@ -500,7 +500,7 @@ class MemStoreFlusher implements FlushRequester { if (Trace.isTracing()) { scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark."); } - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); synchronized (this.blockSignal) { boolean blocked = false; long startTime = 0; @@ -508,7 +508,7 @@ class MemStoreFlusher implements FlushRequester { try { while (isAboveHighWaterMark() && !server.isStopped()) { if (!blocked) { - startTime = EnvironmentEdgeManager.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); LOG.info("Blocking updates on " + server.toString() + ": the global memstore size " + StringUtils.humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreSize()) + @@ -525,7 +525,7 @@ class MemStoreFlusher implements FlushRequester { LOG.warn("Interrupted while waiting"); interrupted = true; } - long took = EnvironmentEdgeManager.currentTimeMillis() - start; + long took = EnvironmentEdgeManager.currentTime() - start; LOG.warn("Memstore is above high water mark and block " + took + "ms"); } } finally { @@ -535,7 +535,7 @@ class MemStoreFlusher implements FlushRequester { } if(blocked){ - final long totalTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + final long totalTime = EnvironmentEdgeManager.currentTime() - startTime; if(totalTime > 0){ this.updatesBlockedMsHighWater.add(totalTime); } @@ -639,7 +639,7 @@ class MemStoreFlusher implements FlushRequester { FlushRegionEntry(final HRegion r) { this.region = r; - this.createTime = EnvironmentEdgeManager.currentTimeMillis(); + this.createTime = EnvironmentEdgeManager.currentTime(); this.whenToExpire = this.createTime; } @@ -648,7 +648,7 @@ class MemStoreFlusher implements FlushRequester { * @return True if we have been delayed > maximumWait milliseconds. */ public boolean isMaximumWait(final long maximumWait) { - return (EnvironmentEdgeManager.currentTimeMillis() - this.createTime) > maximumWait; + return (EnvironmentEdgeManager.currentTime() - this.createTime) > maximumWait; } /** @@ -661,19 +661,19 @@ class MemStoreFlusher implements FlushRequester { /** * @param when When to expire, when to come up out of the queue. - * Specify in milliseconds. This method adds EnvironmentEdgeManager.currentTimeMillis() + * Specify in milliseconds. This method adds EnvironmentEdgeManager.currentTime() * to whatever you pass. * @return This. */ public FlushRegionEntry requeue(final long when) { - this.whenToExpire = EnvironmentEdgeManager.currentTimeMillis() + when; + this.whenToExpire = EnvironmentEdgeManager.currentTime() + when; this.requeueCount++; return this; } @Override public long getDelay(TimeUnit unit) { - return unit.convert(this.whenToExpire - EnvironmentEdgeManager.currentTimeMillis(), + return unit.convert(this.whenToExpire - EnvironmentEdgeManager.currentTime(), TimeUnit.MILLISECONDS); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 5da1ea1e751..ff39a1e8677 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -460,7 +460,7 @@ class MetricsRegionServerWrapperImpl //Compute the number of requests per second - long currentTime = EnvironmentEdgeManager.currentTimeMillis(); + long currentTime = EnvironmentEdgeManager.currentTime(); // assume that it took PERIOD seconds to start the executor. // this is a guess but it's a pretty good one. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 04e79955631..01033a90574 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -394,7 +394,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ private Result append(final HRegion region, final MutationProto m, final CellScanner cellScanner, long nonceGroup) throws IOException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); Append append = ProtobufUtil.toAppend(m, cellScanner); Result r = null; if (region.getCoprocessorHost() != null) { @@ -415,7 +415,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateAppend( - EnvironmentEdgeManager.currentTimeMillis() - before); + EnvironmentEdgeManager.currentTime() - before); } return r; } @@ -430,7 +430,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ private Result increment(final HRegion region, final MutationProto mutation, final CellScanner cells, long nonceGroup) throws IOException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); Increment increment = ProtobufUtil.toIncrement(mutation, cells); Result r = null; if (region.getCoprocessorHost() != null) { @@ -451,7 +451,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateIncrement( - EnvironmentEdgeManager.currentTimeMillis() - before); + EnvironmentEdgeManager.currentTime() - before); } return r; } @@ -569,7 +569,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private void doBatchOp(final RegionActionResult.Builder builder, final HRegion region, final List mutations, final CellScanner cells) { Mutation[] mArray = new Mutation[mutations.size()]; - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); boolean batchContainsPuts = false, batchContainsDelete = false; try { int i = 0; @@ -622,7 +622,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } if (regionServer.metricsRegionServer != null) { - long after = EnvironmentEdgeManager.currentTimeMillis(); + long after = EnvironmentEdgeManager.currentTime(); if (batchContainsPuts) { regionServer.metricsRegionServer.updatePut(after - before); } @@ -645,7 +645,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private OperationStatus [] doReplayBatchOp(final HRegion region, final List mutations, long replaySeqId) throws IOException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); boolean batchContainsPuts = false, batchContainsDelete = false; try { for (Iterator it = mutations.iterator(); it.hasNext();) { @@ -677,7 +677,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, new HLogSplitter.MutationReplay[mutations.size()]), replaySeqId); } finally { if (regionServer.metricsRegionServer != null) { - long after = EnvironmentEdgeManager.currentTimeMillis(); + long after = EnvironmentEdgeManager.currentTime(); if (batchContainsPuts) { regionServer.metricsRegionServer.updatePut(after - before); } @@ -1362,7 +1362,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, @QosPriority(priority = HConstants.REPLAY_QOS) public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); CellScanner cells = ((PayloadCarryingRpcController) controller).cellScanner(); try { checkOpen(); @@ -1426,7 +1426,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } finally { if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateReplay( - EnvironmentEdgeManager.currentTimeMillis() - before); + EnvironmentEdgeManager.currentTime() - before); } } } @@ -1619,7 +1619,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, @Override public GetResponse get(final RpcController controller, final GetRequest request) throws ServiceException { - long before = EnvironmentEdgeManager.currentTimeMillis(); + long before = EnvironmentEdgeManager.currentTime(); try { checkOpen(); requestCount.increment(); @@ -1669,7 +1669,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } finally { if (regionServer.metricsRegionServer != null) { regionServer.metricsRegionServer.updateGet( - EnvironmentEdgeManager.currentTimeMillis() - before); + EnvironmentEdgeManager.currentTime() - before); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java index b51f8e15d04..b22977aca4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java @@ -64,7 +64,7 @@ class RegionMergeRequest implements Runnable { return; } try { - final long startTime = EnvironmentEdgeManager.currentTimeMillis(); + final long startTime = EnvironmentEdgeManager.currentTime(); RegionMergeTransaction mt = new RegionMergeTransaction(region_a, region_b, forcible); @@ -116,7 +116,7 @@ class RegionMergeRequest implements Runnable { + region_a + ", region_b=" + region_b + ",merged region=" + mt.getMergedRegionInfo().getRegionNameAsString() + ". Region merge took " - + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(), startTime)); + + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTime(), startTime)); } catch (IOException ex) { LOG.error("Merge failed " + this, RemoteExceptionHandler.checkIOException(ex)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java index 2db8d7e209f..eb2349cf48b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java @@ -522,7 +522,7 @@ public class RegionMergeTransaction { */ public static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b) { - long rid = EnvironmentEdgeManager.currentTimeMillis(); + long rid = EnvironmentEdgeManager.currentTime(); // Regionid is timestamp. Merged region's id can't be less than that of // merging regions else will insert at wrong location in hbase:meta if (rid < a.getRegionId() || rid < b.getRegionId()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 205c7f854bb..f2e8bfb915b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -337,7 +337,7 @@ public class ScanQueryMatcher { if ((!isUserScan) && timeToPurgeDeletes > 0 - && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) + && (EnvironmentEdgeManager.currentTime() - timestamp) <= timeToPurgeDeletes) { return MatchCode.INCLUDE; } else if (retainDeletesInOutput || mvccVersion > maxReadPointToTrackVersions) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java index faf9138bdb4..421f54e9e27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ServerNonceManager.java @@ -95,7 +95,7 @@ public class ServerNonceManager { } public void reportActivity() { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); this.data = (this.data & ALL_FLAG_BITS) | (now << 3); } @@ -237,7 +237,7 @@ public class ServerNonceManager { public void reportOperationFromWal(long group, long nonce, long writeTime) { if (nonce == HConstants.NO_NONCE) return; // Give the write time some slack in case the clocks are not synchronized. - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (now > writeTime + (deleteNonceGracePeriod * 1.5)) return; OperationContext newResult = new OperationContext(); newResult.setState(OperationContext.DONT_PROCEED); @@ -267,7 +267,7 @@ public class ServerNonceManager { } private void cleanUpOldNonces() { - long cutoff = EnvironmentEdgeManager.currentTimeMillis() - deleteNonceGracePeriod; + long cutoff = EnvironmentEdgeManager.currentTime() - deleteNonceGracePeriod; for (Map.Entry entry : nonces.entrySet()) { OperationContext oc = entry.getValue(); if (!oc.isExpired(cutoff)) continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index c5b29e6d7de..c9f095c86fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -183,7 +183,7 @@ public class SplitTransaction { * @return Daughter region id (timestamp) to use. */ private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) { - long rid = EnvironmentEdgeManager.currentTimeMillis(); + long rid = EnvironmentEdgeManager.currentTime(); // Regionid is timestamp. Can't be less than that of parent else will insert // at wrong location in hbase:meta (See HBASE-710). if (rid < hri.getRegionId()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 1ef3e914fbe..7160b309608 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -122,7 +122,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner explicitColumnQuery = numCol > 0; this.scan = scan; this.columns = columns; - oldestUnexpiredTS = EnvironmentEdgeManager.currentTimeMillis() - ttl; + oldestUnexpiredTS = EnvironmentEdgeManager.currentTime() - ttl; this.minVersions = minVersions; if (store != null && ((HStore)store).getHRegion() != null diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index 1665713b977..92a86e19d7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -79,7 +79,7 @@ public class StorefileRefresherChore extends Chore { continue; } String encodedName = r.getRegionInfo().getEncodedName(); - long time = EnvironmentEdgeManager.currentTimeMillis(); + long time = EnvironmentEdgeManager.currentTime(); if (!lastRefreshTimes.containsKey(encodedName)) { lastRefreshTimes.put(encodedName, time); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 5c9a2339c5f..0fc64d20e9b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -63,7 +63,7 @@ public class CompactionRequest implements Comparable { * This ctor should be used by coprocessors that want to subclass CompactionRequest. */ public CompactionRequest() { - this.selectionTime = EnvironmentEdgeManager.currentTimeMillis(); + this.selectionTime = EnvironmentEdgeManager.currentTime(); this.timeInNanos = System.nanoTime(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index 22697e987f9..9edb3174e4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -303,7 +303,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { if (cfTtl == Long.MAX_VALUE) { return null; // minversion might be set, cannot delete old files } - long timestampCutoff = EnvironmentEdgeManager.currentTimeMillis() - cfTtl; + long timestampCutoff = EnvironmentEdgeManager.currentTime() - cfTtl; // Merge the longest sequence of stripes where all files have expired, if any. int start = -1, bestStart = -1, length = 0, bestLength = 0; ArrayList> stripes = si.getStripes(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index b81619277dd..3ef8206ff4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -1911,7 +1911,7 @@ class FSHLog implements HLog, Syncable { // TODO: WORK ON MAKING THIS APPEND FASTER. DOING WAY TOO MUCH WORK WITH CPs, PBing, etc. atHeadOfRingBufferEventHandlerAppend(); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); byte [] encodedRegionName = entry.getKey().getEncodedRegionName(); long regionSequenceId = HLog.NO_SEQUENCE_ID; try { @@ -1954,7 +1954,7 @@ class FSHLog implements HLog, Syncable { coprocessorHost.postWALWrite(entry.getHRegionInfo(), entry.getKey(), entry.getEdit()); // Update metrics. - postAppend(entry, EnvironmentEdgeManager.currentTimeMillis() - start); + postAppend(entry, EnvironmentEdgeManager.currentTime() - start); } catch (Exception e) { LOG.fatal("Could not append. Requesting close of hlog", e); requestLogRoll(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java index 276e16cb794..41d091061c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java @@ -103,7 +103,7 @@ public class HLogFactory { // A hlog file could be under recovery, so it may take several // tries to get it open. Instead of claiming it is corrupted, retry // to open it up to 5 minutes by default. - long startWaiting = EnvironmentEdgeManager.currentTimeMillis(); + long startWaiting = EnvironmentEdgeManager.currentTime(); long openTimeout = conf.getInt("hbase.hlog.open.timeout", 300000) + startWaiting; int nbAttempt = 0; while (true) { @@ -138,9 +138,9 @@ public class HLogFactory { if (reporter != null && !reporter.progress()) { throw new InterruptedIOException("Operation is cancelled"); } - if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTimeMillis()) { + if (nbAttempt > 2 && openTimeout < EnvironmentEdgeManager.currentTime()) { LOG.error("Can't open after " + nbAttempt + " attempts and " - + (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms " + " for " + path); } else { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java index cc10cb22f6f..60eda989ecc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java @@ -209,7 +209,7 @@ public class HLogKey implements WritableComparable, SequenceNumber { */ public HLogKey(final byte [] encodedRegionName, final TableName tablename, long logSeqNum, long nonceGroup, long nonce) { - init(encodedRegionName, tablename, logSeqNum, EnvironmentEdgeManager.currentTimeMillis(), + init(encodedRegionName, tablename, logSeqNum, EnvironmentEdgeManager.currentTime(), EMPTY_UUIDS, nonceGroup, nonce); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index de31f24b40f..16b491fb0f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -1643,14 +1643,14 @@ public class HLogSplitter { private HRegionLocation waitUntilRegionOnline(HRegionLocation loc, byte[] row, final long timeout, AtomicBoolean isRecovering) throws IOException { - final long endTime = EnvironmentEdgeManager.currentTimeMillis() + timeout; + final long endTime = EnvironmentEdgeManager.currentTime() + timeout; final long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); boolean reloadLocation = false; TableName tableName = loc.getRegionInfo().getTable(); int tries = 0; Throwable cause = null; - while (endTime > EnvironmentEdgeManager.currentTimeMillis()) { + while (endTime > EnvironmentEdgeManager.currentTime()) { try { // Try and get regioninfo from the hosting server. HConnection hconn = getConnectionByTableName(tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index 4b38027af80..9ba33530bb7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -268,7 +268,7 @@ public class WALEdit implements Writable, HeapSize { public static WALEdit createFlushWALEdit(HRegionInfo hri, FlushDescriptor f) { KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, FLUSH, - EnvironmentEdgeManager.currentTimeMillis(), f.toByteArray()); + EnvironmentEdgeManager.currentTime(), f.toByteArray()); return new WALEdit().add(kv); } @@ -282,7 +282,7 @@ public class WALEdit implements Writable, HeapSize { public static WALEdit createRegionEventWALEdit(HRegionInfo hri, RegionEventDescriptor regionEventDesc) { KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, REGION_EVENT, - EnvironmentEdgeManager.currentTimeMillis(), regionEventDesc.toByteArray()); + EnvironmentEdgeManager.currentTime(), regionEventDesc.toByteArray()); return new WALEdit().add(kv); } @@ -301,7 +301,7 @@ public class WALEdit implements Writable, HeapSize { public static WALEdit createCompaction(final HRegionInfo hri, final CompactionDescriptor c) { byte [] pbbytes = c.toByteArray(); KeyValue kv = new KeyValue(getRowForRegion(hri), METAFAMILY, COMPACTION, - EnvironmentEdgeManager.currentTimeMillis(), pbbytes); + EnvironmentEdgeManager.currentTime(), pbbytes); return new WALEdit().add(kv); //replication scope null so that this won't be replicated } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 97e9b86963e..58bbe8337e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -120,7 +120,7 @@ public class WALEditsReplaySink { regionEntries.add(entry); } - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); // replaying edits by region for (Map.Entry> _entry : entriesByRegion.entrySet()) { @@ -139,7 +139,7 @@ public class WALEditsReplaySink { } } - long endTime = EnvironmentEdgeManager.currentTimeMillis() - startTime; + long endTime = EnvironmentEdgeManager.currentTime() - startTime; LOG.debug("number of rows:" + entries.size() + " are sent by batch! spent " + endTime + "(ms)!"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index 94dec7c4fe9..eadaeadf340 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -83,7 +83,7 @@ public class MetricsSource { * @param timestamp write time of the edit */ public void setAgeOfLastShippedOp(long timestamp) { - long age = EnvironmentEdgeManager.currentTimeMillis() - timestamp; + long age = EnvironmentEdgeManager.currentTime() - timestamp; rms.setGauge(ageOfLastShippedOpKey, age); rms.setGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, age); this.lastTimestamp = timestamp; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java index 0aba8a6481e..93c2ee8f53b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java @@ -42,7 +42,7 @@ public class ReplicationThrottler { this.enabled = this.bandwidth > 0; if (this.enabled) { this.cyclePushSize = 0; - this.cycleStartTick = EnvironmentEdgeManager.currentTimeMillis(); + this.cycleStartTick = EnvironmentEdgeManager.currentTime(); } } @@ -67,7 +67,7 @@ public class ReplicationThrottler { } long sleepTicks = 0; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // 1. if cyclePushSize exceeds bandwidth, we need to sleep some // following cycles to amortize, this case can occur when a single push // exceeds the bandwidth @@ -115,7 +115,7 @@ public class ReplicationThrottler { */ public void resetStartTick() { if (this.enabled) { - this.cycleStartTick = EnvironmentEdgeManager.currentTimeMillis(); + this.cycleStartTick = EnvironmentEdgeManager.currentTime(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 53d3f354c95..bda02b3efc4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -657,7 +657,7 @@ public class AccessController extends BaseMasterAndRegionObserver // any cells found there inclusively. long latestTs = Math.max(opTs, latestCellTs); if (latestTs == 0 || latestTs == HConstants.LATEST_TIMESTAMP) { - latestTs = EnvironmentEdgeManager.currentTimeMillis(); + latestTs = EnvironmentEdgeManager.currentTime(); } get.setTimeRange(0, latestTs + 1); // In case of Put operation we set to read all versions. This was done to consider the case diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 2892c512a0d..5cdddb8f2a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -126,7 +126,7 @@ public class AuthenticationTokenSecretManager @Override protected byte[] createPassword(AuthenticationTokenIdentifier identifier) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); AuthenticationKey secretKey = currentKey; identifier.setKeyId(secretKey.getKeyId()); identifier.setIssueDate(now); @@ -139,7 +139,7 @@ public class AuthenticationTokenSecretManager @Override public byte[] retrievePassword(AuthenticationTokenIdentifier identifier) throws InvalidToken { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); if (identifier.getExpirationDate() < now) { throw new InvalidToken("Token has expired"); } @@ -223,7 +223,7 @@ public class AuthenticationTokenSecretManager return; } - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); Iterator iter = allKeys.values().iterator(); while (iter.hasNext()) { AuthenticationKey key = iter.next(); @@ -247,7 +247,7 @@ public class AuthenticationTokenSecretManager return; } - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); AuthenticationKey prev = currentKey; AuthenticationKey newKey = new AuthenticationKey(++idSeq, Long.MAX_VALUE, // don't allow to expire until it's replaced by a new key @@ -314,7 +314,7 @@ public class AuthenticationTokenSecretManager isMaster = true; while (!stopped) { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // clear any expired removeExpiredKeys(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index 203f6deb5d3..03ba8b85ac5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -231,9 +231,9 @@ public class SnapshotDescriptionUtils { // set the creation time, if one hasn't been set long time = snapshot.getCreationTime(); if (time == SnapshotDescriptionUtils.NO_SNAPSHOT_START_TIME_SPECIFIED) { - time = EnvironmentEdgeManager.currentTimeMillis(); + time = EnvironmentEdgeManager.currentTime(); LOG.debug("Creation time not specified, setting to:" + time + " (current time:" - + EnvironmentEdgeManager.currentTimeMillis() + ")."); + + EnvironmentEdgeManager.currentTime() + ")."); SnapshotDescription.Builder builder = snapshot.toBuilder(); builder.setCreationTime(time); snapshot = builder.build(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java index 3df9e8b0232..da76251f656 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java @@ -179,7 +179,7 @@ public class ConnectionCache { private boolean closed; ConnectionInfo(HConnection conn, String user) { - lastAccessTime = EnvironmentEdgeManager.currentTimeMillis(); + lastAccessTime = EnvironmentEdgeManager.currentTime(); connection = conn; closed = false; userName = user; @@ -194,13 +194,13 @@ public class ConnectionCache { connections.remove(userName); return false; } - lastAccessTime = EnvironmentEdgeManager.currentTimeMillis(); + lastAccessTime = EnvironmentEdgeManager.currentTime(); return true; } synchronized boolean timedOut(int maxIdleTime) { long timeoutTime = lastAccessTime + maxIdleTime; - if (EnvironmentEdgeManager.currentTimeMillis() > timeoutTime) { + if (EnvironmentEdgeManager.currentTime() > timeoutTime) { connections.remove(userName); closed = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java index acdbd089bf4..cb8f751197f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java @@ -171,7 +171,7 @@ public class FSHDFSUtils extends FSUtils { final Configuration conf, final CancelableProgressable reporter) throws IOException { LOG.info("Recovering lease on dfs file " + p); - long startWaiting = EnvironmentEdgeManager.currentTimeMillis(); + long startWaiting = EnvironmentEdgeManager.currentTime(); // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves // beyond that limit 'to be safe'. @@ -200,8 +200,8 @@ public class FSHDFSUtils extends FSUtils { } else { // Cycle here until subsequentPause elapses. While spinning, check isFileClosed if // available (should be in hadoop 2.0.5... not in hadoop 1 though. - long localStartWaiting = EnvironmentEdgeManager.currentTimeMillis(); - while ((EnvironmentEdgeManager.currentTimeMillis() - localStartWaiting) < + long localStartWaiting = EnvironmentEdgeManager.currentTime(); + while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPause) { Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000)); if (findIsFileClosedMeth) { @@ -232,7 +232,7 @@ public class FSHDFSUtils extends FSUtils { boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, final int nbAttempt, final Path p, final long startWaiting) { - if (recoveryTimeout < EnvironmentEdgeManager.currentTimeMillis()) { + if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { LOG.warn("Cannot recoverLease after trying for " + conf.getInt("hbase.lease.recovery.timeout", 900000) + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + @@ -279,7 +279,7 @@ public class FSHDFSUtils extends FSUtils { */ private String getLogMessageDetail(final int nbAttempt, final Path p, final long startWaiting) { return "attempt=" + nbAttempt + " on file=" + p + " after " + - (EnvironmentEdgeManager.currentTimeMillis() - startWaiting) + "ms"; + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 560b5df404e..b80de94cc4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1728,7 +1728,7 @@ public abstract class FSUtils { public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest) throws IOException { // set the modify time for TimeToLive Cleaner - fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1); + fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1); return fs.rename(src, dest); } @@ -1807,7 +1807,7 @@ public abstract class FSUtils { throws IOException { FileSystem fs = FileSystem.get(conf); Path rootPath = FSUtils.getRootDir(conf); - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); Path queryPath; // The table files are in ${hbase.rootdir}/data///* if (null == desiredTable) { @@ -1898,7 +1898,7 @@ public abstract class FSUtils { } } - long overhead = EnvironmentEdgeManager.currentTimeMillis() - startTime; + long overhead = EnvironmentEdgeManager.currentTime() - startTime; String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms"; LOG.info(overheadMsg); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java index e33bf0c9219..dcb072e44b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ManualEnvironmentEdge.java @@ -39,7 +39,7 @@ public class ManualEnvironmentEdge implements EnvironmentEdge { } @Override - public long currentTimeMillis() { + public long currentTime() { return this.value; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java index a7afc108b52..3e069d971a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java @@ -50,7 +50,7 @@ public class TableLockChecker { public void checkTableLocks() throws IOException { TableLockManager tableLockManager = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null); - final long expireDate = EnvironmentEdgeManager.currentTimeMillis() - expireTimeout; + final long expireDate = EnvironmentEdgeManager.currentTime() - expireTimeout; MetadataHandler handler = new MetadataHandler() { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java index c666c143ffe..7e49df59ed9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java @@ -167,7 +167,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock { throws IOException, InterruptedException { boolean hasTimeout = timeoutMs != -1; long waitUntilMs = - hasTimeout ?EnvironmentEdgeManager.currentTimeMillis() + timeoutMs : -1; + hasTimeout ?EnvironmentEdgeManager.currentTime() + timeoutMs : -1; String createdZNode; try { createdZNode = createLockZNode(); @@ -196,7 +196,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock { if (ZKUtil.setWatchIfNodeExists(zkWatcher, zkPathToWatch)) { // Wait for the watcher to fire if (hasTimeout) { - long remainingMs = waitUntilMs - EnvironmentEdgeManager.currentTimeMillis(); + long remainingMs = waitUntilMs - EnvironmentEdgeManager.currentTime(); if (remainingMs < 0 || !deletedLatch.await(remainingMs, TimeUnit.MILLISECONDS)) { LOG.warn("Unable to acquire the lock in " + timeoutMs + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index b59db4eda45..34bd90b22a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -4659,7 +4659,7 @@ public class TestFromClientSide { HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, Integer.MAX_VALUE); - final long ts = EnvironmentEdgeManager.currentTimeMillis(); + final long ts = EnvironmentEdgeManager.currentTime(); Get get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(); @@ -4696,7 +4696,7 @@ public class TestFromClientSide { final HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, 3); - final long ts = EnvironmentEdgeManager.currentTimeMillis(); + final long ts = EnvironmentEdgeManager.currentTime(); final Get get = new Get(ROW); get.addColumn(FAMILY, QUALIFIER); get.setMaxVersions(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 54232b6a43c..0b84ec941d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -1128,7 +1128,7 @@ public class TestHCM { ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(timeMachine); try { - long timeBase = timeMachine.currentTimeMillis(); + long timeBase = timeMachine.currentTime(); long largeAmountOfTime = ANY_PAUSE * 1000; ConnectionManager.ServerErrorTracker tracker = new ConnectionManager.ServerErrorTracker(largeAmountOfTime, 100); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index 380b3375723..87232192a71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -475,14 +475,14 @@ public class TestScannersFromClientSide { HRegionServer rs = cluster.getRegionServer(i); ProtobufUtil.closeRegion( rs.getRSRpcServices(), rs.getServerName(), regionName, false); - long startTime = EnvironmentEdgeManager.currentTimeMillis(); + long startTime = EnvironmentEdgeManager.currentTime(); long timeOut = 300000; while (true) { if (rs.getOnlineRegion(regionName) == null) { break; } assertTrue("Timed out in closing the testing region", - EnvironmentEdgeManager.currentTimeMillis() < startTime + timeOut); + EnvironmentEdgeManager.currentTime() < startTime + timeOut); Thread.sleep(500); } @@ -497,13 +497,13 @@ public class TestScannersFromClientSide { ZKAssign.createNodeOffline(zkw, hri, loc.getServerName()); } ProtobufUtil.openRegion(rs.getRSRpcServices(), rs.getServerName(), hri); - startTime = EnvironmentEdgeManager.currentTimeMillis(); + startTime = EnvironmentEdgeManager.currentTime(); while (true) { if (rs.getOnlineRegion(regionName) != null) { break; } assertTrue("Timed out in open the testing region", - EnvironmentEdgeManager.currentTimeMillis() < startTime + timeOut); + EnvironmentEdgeManager.currentTime() < startTime + timeOut); Thread.sleep(500); } } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 27b807f6a0d..b7319b77ba0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -473,12 +473,12 @@ public class TestRegionObserverInterface { @Override public void postCompact(ObserverContext e, Store store, StoreFile resultFile) { - lastCompaction = EnvironmentEdgeManager.currentTimeMillis(); + lastCompaction = EnvironmentEdgeManager.currentTime(); } @Override public void postFlush(ObserverContext e) { - lastFlush = EnvironmentEdgeManager.currentTimeMillis(); + lastFlush = EnvironmentEdgeManager.currentTime(); } } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 0264d763f72..5706cb3fd1a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -187,7 +187,7 @@ public class TestWALObserver { assertFalse(modifiedFamily1); // it's where WAL write cp should occur. - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); log.append(hri, hri.getTable(), edit, now, htd, sequenceId); // the edit shall have been change now by the coprocessor. @@ -242,7 +242,7 @@ public class TestWALObserver { HLog wal = createWAL(this.conf); // Put p = creatPutWith2Families(TEST_ROW); WALEdit edit = new WALEdit(); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // addFamilyMapToWALEdit(p.getFamilyMap(), edit); final int countPerFamily = 1000; // for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) { @@ -379,9 +379,8 @@ public class TestWALObserver { byte[] qualifierBytes = Bytes.toBytes(Integer.toString(j)); byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j)); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, qualifierBytes, ee - .currentTimeMillis(), columnBytes)); - wal.append(hri, tableName, edit, ee.currentTimeMillis(), htd, sequenceId); + edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes)); + wal.append(hri, tableName, edit, ee.currentTime(), htd, sequenceId); } } @@ -406,5 +405,4 @@ public class TestWALObserver { htd.addFamily(c); return htd; } - -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 50a9b9f50ab..c7050c8a001 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -375,7 +375,7 @@ public class TestCacheOnWrite { .setDataBlockEncoding(encoder.getDataBlockEncoding()) ); int rowIdx = 0; - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); for (int iFile = 0; iFile < 5; ++iFile) { for (int iRow = 0; iRow < 500; ++iRow) { String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 243ff8d3cc3..af982d02997 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -111,7 +111,7 @@ public class TestScannerSelectionUsingTTL { HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); long version = 0; //make sure each new set of Put's have a new ts for (int iFile = 0; iFile < totalNumFiles; ++iFile) { if (iFile == NUM_EXPIRED_FILES) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 7beda90f2db..1f74710dc9b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -727,11 +727,11 @@ public class TestAssignmentManagerOnCluster { HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); // Region will be opened, but it won't complete master.assignRegion(hri); - long end = EnvironmentEdgeManager.currentTimeMillis() + 20000; + long end = EnvironmentEdgeManager.currentTime() + 20000; // Wait till postOpen is called while (!MyRegionObserver.postOpenCalled ) { assertFalse("Timed out waiting for postOpen to be called", - EnvironmentEdgeManager.currentTimeMillis() > end); + EnvironmentEdgeManager.currentTime() > end); Thread.sleep(300); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java index c9fde2d2e26..299fb06adc3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClusterStatusPublisher.java @@ -61,7 +61,7 @@ public class TestClusterStatusPublisher { @Override protected List> getDeadServers(long since) { List> res = new ArrayList>(); - switch ((int) EnvironmentEdgeManager.currentTimeMillis()) { + switch ((int) EnvironmentEdgeManager.currentTime()) { case 2: res.add(new Pair(ServerName.valueOf("hn", 10, 10), 1L)); break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 7d69034e9a3..1df81b736c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -1015,18 +1015,18 @@ public class TestDistributedLogSplitting { rsts.get(1).getRegionServer().abort("testing"); rsts.get(2).getRegionServer().abort("testing"); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (cluster.getLiveRegionServerThreads().size() > (NUM_RS - 3)) { - if (EnvironmentEdgeManager.currentTimeMillis() - start > 60000) { + if (EnvironmentEdgeManager.currentTime() - start > 60000) { assertTrue(false); } Thread.sleep(200); } - start = EnvironmentEdgeManager.currentTimeMillis(); + start = EnvironmentEdgeManager.currentTime(); while (HBaseTestingUtility.getAllOnlineRegions(cluster).size() < (NUM_REGIONS_TO_CREATE + 1)) { - if (EnvironmentEdgeManager.currentTimeMillis() - start > 60000) { + if (EnvironmentEdgeManager.currentTime() - start > 60000) { assertTrue("Timedout", false); } Thread.sleep(200); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index 000734c81f5..5f56d303113 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -140,7 +140,7 @@ public class TestHFileCleaner { // set a custom edge manager to handle time checking EnvironmentEdge setTime = new EnvironmentEdge() { @Override - public long currentTimeMillis() { + public long currentTime() { return createTime; } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index 16321b43155..9d2dda2ca05 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -182,7 +182,7 @@ public class TestSnapshotFromMaster { Mockito.when(mockHandler.getSnapshot()).thenReturn(desc); Mockito.when(mockHandler.isFinished()).thenReturn(new Boolean(true)); Mockito.when(mockHandler.getCompletionTimestamp()) - .thenReturn(EnvironmentEdgeManager.currentTimeMillis()); + .thenReturn(EnvironmentEdgeManager.currentTime()); master.getSnapshotManagerForTesting() .setSnapshotHandlerForTesting(TABLE_NAME, mockHandler); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 3743fdde343..383f4244574 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -928,7 +928,7 @@ public class TestDefaultMemStore extends TestCase { private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge { long t = 1234; @Override - public long currentTimeMillis() { + public long currentTime() { return t; } public void setCurrentTimeMillis(long t) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java index 4f8794c29c7..52f261aae7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java @@ -74,7 +74,7 @@ public class TestKeepDeletes { * compact timestamps are tracked. Otherwise, forced major compaction will not purge * Delete's having the same timestamp. see ScanQueryMatcher.match(): * if (retainDeletesInOutput - * || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) + * || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp) * <= timeToPurgeDeletes) ... ) * */ @@ -99,7 +99,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -196,7 +196,7 @@ public class TestKeepDeletes { HConstants.FOREVER, false); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -241,7 +241,7 @@ public class TestKeepDeletes { HConstants.FOREVER, false); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -309,7 +309,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -399,7 +399,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Delete d = new Delete(T1, ts); d.deleteColumns(c0, c0, ts); @@ -442,7 +442,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); @@ -505,7 +505,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); p.add(c0, c1, T1); @@ -587,7 +587,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); region.put(p); @@ -679,7 +679,7 @@ public class TestKeepDeletes { HConstants.FOREVER, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis(); + long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); p.add(c0, c0, T1); @@ -730,7 +730,7 @@ public class TestKeepDeletes { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, true); HRegion region = hbu.createLocalHRegion(htd, null, null); - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; // 2s in the past + long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past Put p = new Put(T1, ts); p.add(c0, c0, T3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java index 73a712f48ea..cbb90180464 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java @@ -69,7 +69,7 @@ public class TestMinVersions { try { // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; Put p = new Put(T1, ts); p.add(c0, c0, T1); @@ -116,7 +116,7 @@ public class TestMinVersions { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, false); HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; try { Put p = new Put(T1, ts-1); @@ -171,7 +171,7 @@ public class TestMinVersions { HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; try { Put p = new Put(T1, ts-2); @@ -229,7 +229,7 @@ public class TestMinVersions { HRegion region = hbu.createLocalHRegion(htd, null, null); // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; try { // 2nd version @@ -305,7 +305,7 @@ public class TestMinVersions { try { // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; // 1st version Put p = new Put(T1, ts-3); @@ -396,7 +396,7 @@ public class TestMinVersions { final byte [] c1 = COLUMNS[1]; // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; + long ts = EnvironmentEdgeManager.currentTime() - 2000; try { Put p = new Put(T1, ts-3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java index 4945ad1d782..0465b936832 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java @@ -94,7 +94,7 @@ public class TestQueryMatcher extends HBaseTestCase { // 2,4,5 ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, ttl, false, 0, rowComparator), get.getFamilyMap().get(fam2), - EnvironmentEdgeManager.currentTimeMillis() - ttl); + EnvironmentEdgeManager.currentTime() - ttl); List memstore = new ArrayList(); memstore.add(new KeyValue(row1, fam2, col1, 1, data)); @@ -176,7 +176,7 @@ public class TestQueryMatcher extends HBaseTestCase { ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, ttl, false, 0, rowComparator), null, - EnvironmentEdgeManager.currentTimeMillis() - ttl); + EnvironmentEdgeManager.currentTime() - ttl); List memstore = new ArrayList(); memstore.add(new KeyValue(row1, fam2, col1, 1, data)); @@ -227,7 +227,7 @@ public class TestQueryMatcher extends HBaseTestCase { ScanQueryMatcher.MatchCode.DONE }; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, testTTL, false, 0, rowComparator), get.getFamilyMap().get(fam2), now - testTTL); @@ -281,7 +281,7 @@ public class TestQueryMatcher extends HBaseTestCase { ScanQueryMatcher.MatchCode.DONE }; - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, testTTL, false, 0, rowComparator), null, now - testTTL); @@ -336,7 +336,7 @@ public class TestQueryMatcher extends HBaseTestCase { private void testDropDeletes( byte[] from, byte[] to, byte[][] rows, MatchCode... expected) throws IOException { - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // Set time to purge deletes to negative value to avoid it ever happening. ScanInfo scanInfo = new ScanInfo(fam2, 0, 1, ttl, false, -1L, rowComparator); NavigableSet cols = get.getFamilyMap().get(fam2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 38624fe1dec..b4ae3c98edd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -138,10 +138,10 @@ public class TestRegionMergeTransactionOnCluster { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); AssignmentManager am = cluster.getMaster().getAssignmentManager(); RegionStates regionStates = am.getRegionStates(); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (!regionStates.isRegionInState(hri, State.MERGED)) { assertFalse("Timed out in waiting one merged region to be in state MERGED", - EnvironmentEdgeManager.currentTimeMillis() - start > 60000); + EnvironmentEdgeManager.currentTime() - start > 60000); Thread.sleep(500); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index df11c0c3a99..aa628dc5502 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -1001,10 +1001,10 @@ public class TestSplitTransactionOnCluster { hri = region.getRegionInfo(); // split parent AssignmentManager am = cluster.getMaster().getAssignmentManager(); RegionStates regionStates = am.getRegionStates(); - long start = EnvironmentEdgeManager.currentTimeMillis(); + long start = EnvironmentEdgeManager.currentTime(); while (!regionStates.isRegionInState(hri, State.SPLIT)) { assertFalse("Timed out in waiting split parent to be in state SPLIT", - EnvironmentEdgeManager.currentTimeMillis() - start > 60000); + EnvironmentEdgeManager.currentTime() - start > 60000); Thread.sleep(500); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 96009320301..99834ed0b4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -297,7 +297,7 @@ public class TestStore { // store files will be (this.store.ttl / storeFileNum) for (int i = 1; i <= storeFileNum; i++) { LOG.info("Adding some data for the store file #" + i); - timeStamp = EnvironmentEdgeManager.currentTimeMillis(); + timeStamp = EnvironmentEdgeManager.currentTime(); this.store.add(new KeyValue(row, family, qf1, timeStamp, (byte[]) null)); this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null)); this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null)); @@ -318,7 +318,7 @@ public class TestStore { assertEquals(storeFileNum - i, sfs.size()); // Ensure only non-expired files remain. for (StoreFile sf : sfs) { - assertTrue(sf.getReader().getMaxTimestamp() >= (edge.currentTimeMillis() - storeTtl)); + assertTrue(sf.getReader().getMaxTimestamp() >= (edge.currentTime() - storeTtl)); } // Let the next store file expired. edge.incrementTime(sleepTime); @@ -328,7 +328,7 @@ public class TestStore { // Assert the last expired file is not removed. assertEquals(1, sfs.size()); long ts = sfs.iterator().next().getReader().getMaxTimestamp(); - assertTrue(ts < (edge.currentTimeMillis() - storeTtl)); + assertTrue(ts < (edge.currentTime() - storeTtl)); } @Test @@ -661,7 +661,7 @@ public class TestStore { long oldValue = 1L; long newValue = 3L; this.store.add(new KeyValue(row, family, qf1, - EnvironmentEdgeManager.currentTimeMillis(), + EnvironmentEdgeManager.currentTime(), Bytes.toBytes(oldValue))); // snapshot the store. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index 05ac4b4ffef..f42ff0fb962 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -27,25 +27,18 @@ import java.util.Arrays; import java.util.List; import java.util.NavigableSet; import java.util.TreeSet; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; import junit.framework.TestCase; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; -import org.apache.hadoop.hbase.util.Threads; import org.junit.experimental.categories.Category; // Can't be small as it plays with EnvironmentEdgeManager @@ -512,7 +505,7 @@ public class TestStoreScanner extends TestCase { try { final long now = System.currentTimeMillis(); EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() { - public long currentTimeMillis() { + public long currentTime() { return now; } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index 0edad8bfdc7..71ee5951250 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -784,14 +784,14 @@ public class TestWALReplay { // Add an edit to another family, should be skipped. WALEdit edit = new WALEdit(); - long now = ee.currentTimeMillis(); + long now = ee.currentTime(); edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName)); wal.append(hri, tableName, edit, now, htd, sequenceId); // Delete the c family to verify deletes make it over. edit = new WALEdit(); - now = ee.currentTimeMillis(); + now = ee.currentTime(); edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily)); wal.append(hri, tableName, edit, now, htd, sequenceId); @@ -976,8 +976,8 @@ public class TestWALReplay { byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j)); WALEdit edit = new WALEdit(); edit.add(new KeyValue(rowName, family, qualifierBytes, - ee.currentTimeMillis(), columnBytes)); - wal.append(hri, tableName, edit, ee.currentTimeMillis(), htd, sequenceId); + ee.currentTime(), columnBytes)); + wal.append(hri, tableName, edit, ee.currentTime(), htd, sequenceId); } } @@ -989,7 +989,7 @@ public class TestWALReplay { for (int j = 0; j < count; j++) { byte[] qualifier = Bytes.toBytes(qualifierPrefix + Integer.toString(j)); Put p = new Put(rowName); - p.add(family, qualifier, ee.currentTimeMillis(), rowName); + p.add(family, qualifier, ee.currentTime(), rowName); r.put(p); puts.add(p); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index f67ab1fc8b4..68a99aa3a55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -116,7 +116,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { final byte[] v3 = Bytes.toBytes("v3"); htable1 = new HTable(conf1, tableName); - long t = EnvironmentEdgeManager.currentTimeMillis(); + long t = EnvironmentEdgeManager.currentTime(); // create three versions for "row" Put put = new Put(row); put.add(famName, row, t, v1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java index e3ea39748d9..fd8c9a1d403 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java @@ -367,7 +367,7 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { try { // Store read only ACL at a future time Put p = new Put(TEST_ROW).add(TEST_FAMILY1, TEST_Q1, - EnvironmentEdgeManager.currentTimeMillis() + 1000000, + EnvironmentEdgeManager.currentTime() + 1000000, ZERO); p.setACL(USER_OTHER.getShortName(), new Permission(Permission.Action.READ)); t.put(p); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 8f8d92d245e..1773027f075 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -117,7 +117,7 @@ public class TestTokenAuthentication { public TokenServer(Configuration conf) throws IOException { this.conf = conf; - this.startcode = EnvironmentEdgeManager.currentTimeMillis(); + this.startcode = EnvironmentEdgeManager.currentTime(); // Server to handle client requests. String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost("default", "default")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java index d471c392478..b22a713144e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java @@ -155,7 +155,7 @@ public class TestZKSecretWatcher { LOG.debug("Master new current key: "+key3.getKeyId()); // force expire the original key - key1.setExpiration(EnvironmentEdgeManager.currentTimeMillis() - 1000); + key1.setExpiration(EnvironmentEdgeManager.currentTime() - 1000); KEY_MASTER.removeExpiredKeys(); // verify removed from master assertNull(KEY_MASTER.getKey(key1.getKeyId())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index dd2bfd4a734..dba1bba1861 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -115,7 +115,7 @@ public class TestCoprocessorScanPolicy { p.add(F, tableName.getName(), Bytes.toBytes(2)); t.put(p); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); // insert 2 versions p = new Put(R); @@ -165,7 +165,7 @@ public class TestCoprocessorScanPolicy { desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); HTable t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName); - long now = EnvironmentEdgeManager.currentTimeMillis(); + long now = EnvironmentEdgeManager.currentTime(); ManualEnvironmentEdge me = new ManualEnvironmentEdge(); me.setValue(now); EnvironmentEdgeManagerTestHelper.injectEdge(me); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java index 2be502e867b..b7243ab39ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestDefaultEnvironmentEdge.java @@ -36,7 +36,7 @@ public class TestDefaultEnvironmentEdge { public void testGetCurrentTimeUsesSystemClock() { DefaultEnvironmentEdge edge = new DefaultEnvironmentEdge(); long systemTime = System.currentTimeMillis(); - long edgeTime = edge.currentTimeMillis(); + long edgeTime = edge.currentTime(); assertTrue("System time must be either the same or less than the edge time", systemTime < edgeTime || systemTime == edgeTime); try { @@ -44,11 +44,8 @@ public class TestDefaultEnvironmentEdge { } catch (InterruptedException e) { fail(e.getMessage()); } - long secondEdgeTime = edge.currentTimeMillis(); + long secondEdgeTime = edge.currentTime(); assertTrue("Second time must be greater than the first", secondEdgeTime > edgeTime); } - - -} - +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java index 5c9208ae8a2..d82db0dc499 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java @@ -53,7 +53,7 @@ public class TestFSHDFSUtils { @Before public void setup() { - this.startTime = EnvironmentEdgeManager.currentTimeMillis(); + this.startTime = EnvironmentEdgeManager.currentTime(); } /** @@ -73,7 +73,7 @@ public class TestFSHDFSUtils { Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // invocations will happen pretty fast... the we fall into the longer wait loop). - assertTrue((EnvironmentEdgeManager.currentTimeMillis() - this.startTime) > + assertTrue((EnvironmentEdgeManager.currentTime() - this.startTime) > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java index 89450b229c0..67718d635da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestIncrementingEnvironmentEdge.java @@ -18,13 +18,12 @@ */ package org.apache.hadoop.hbase.util; -import org.apache.hadoop.hbase.MediumTests; +import static junit.framework.Assert.assertEquals; + import org.apache.hadoop.hbase.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; -import static junit.framework.Assert.assertEquals; - /** * Tests that the incrementing environment edge increments time instead of using * the default. @@ -35,11 +34,9 @@ public class TestIncrementingEnvironmentEdge { @Test public void testGetCurrentTimeUsesSystemClock() { IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(1); - assertEquals(1, edge.currentTimeMillis()); - assertEquals(2, edge.currentTimeMillis()); - assertEquals(3, edge.currentTimeMillis()); - assertEquals(4, edge.currentTimeMillis()); + assertEquals(1, edge.currentTime()); + assertEquals(2, edge.currentTime()); + assertEquals(3, edge.currentTime()); + assertEquals(4, edge.currentTime()); } - -} - +} \ No newline at end of file