HBASE-25911 Replace calls to System.currentTimeMillis with EnvironmentEdgeManager.currentTime (#3302)

We introduced EnvironmentEdgeManager as a way to inject alternate clocks
for unit tests. In order for this to be effective, all callers that would
otherwise use System.currentTimeMillis() must call
EnvironmentEdgeManager.currentTime() instead, except the implementers of
EnvironmentEdge.

Signed-off-by: Bharath Vissapragada <bharathv@apache.org>
Signed-off-by: Duo Zhang <zhangduo@apache.org>
Signed-off-by: Viraj Jasani <vjasani@apache.org>

Conflicts:
	hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
	hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
	hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupManager.java
	hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
	hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java
	hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
	hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
	hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java
	hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java
	hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
	hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java
	hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java
	hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestEnableRSGroups.java
	hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsAdmin2.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractWALRoller.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/client/RestoreSnapshotFromClientSimpleTestBase.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnection.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClientCloneLinksAfterDelete.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMutationGetCellBuilder.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitor.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSimpleTimeRangeTracker.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCloseChecker.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestProtobufLog.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCombinedAsyncWriter.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestRecoverStandbyProcedure.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestSyncReplicationWALProvider.java
	hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
	hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
This commit is contained in:
Andrew Purtell 2021-06-01 09:57:48 -07:00
parent d4285be5c1
commit a4e8ee183e
344 changed files with 1325 additions and 1114 deletions

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.client.RegionInfoDisplay;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@ -248,7 +249,7 @@ public class HRegionInfo implements RegionInfo {
public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
final boolean split)
throws IllegalArgumentException {
this(tableName, startKey, endKey, split, System.currentTimeMillis());
this(tableName, startKey, endKey, split, EnvironmentEdgeManager.currentTime());
}
/**

View File

@ -32,6 +32,7 @@ import java.util.stream.Collectors;
import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.yetus.audience.InterfaceAudience;
@ -136,7 +137,7 @@ public final class ServerMetricsBuilder {
private final Map<byte[], RegionMetrics> regionStatus = new TreeMap<>(Bytes.BYTES_COMPARATOR);
private final Map<byte[], UserMetrics> userMetrics = new TreeMap<>(Bytes.BYTES_COMPARATOR);
private final Set<String> coprocessorNames = new TreeSet<>();
private long reportTimestamp = System.currentTimeMillis();
private long reportTimestamp = EnvironmentEdgeManager.currentTime();
private long lastReportTimestamp = 0;
private ServerMetricsBuilder(ServerName serverName) {
this.serverName = serverName;

View File

@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
@ -190,7 +191,7 @@ public class BufferedMutatorImpl implements BufferedMutator {
}
if (currentWriteBufferSize.get() == 0) {
firstRecordInBufferTimestamp.set(System.currentTimeMillis());
firstRecordInBufferTimestamp.set(EnvironmentEdgeManager.currentTime());
}
currentWriteBufferSize.addAndGet(toAddSize);
writeAsyncBuffer.addAll(ms);
@ -209,7 +210,7 @@ public class BufferedMutatorImpl implements BufferedMutator {
if (currentWriteBufferSize.get() == 0) {
return; // Nothing to flush
}
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
if (firstRecordInBufferTimestamp.get() + writeBufferPeriodicFlushTimeoutMs.get() > now) {
return; // No need to flush yet
}

View File

@ -26,6 +26,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* The class that is able to determine some unique strings for the client,
@ -45,7 +46,7 @@ final class ClientIdGenerator {
byte[] selfBytes = getIpAddressBytes();
Long pid = getPid();
long tid = Thread.currentThread().getId();
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTime();
byte[] id = new byte[selfBytes.length + ((pid != null ? 1 : 0) + 2) * Bytes.SIZEOF_LONG];
int offset = Bytes.putBytes(id, 0, selfBytes, 0, selfBytes.length);

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.regionserver.LeaseException;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -103,7 +104,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
}
this.scan = scan;
this.tableName = tableName;
this.lastNext = System.currentTimeMillis();
this.lastNext = EnvironmentEdgeManager.currentTime();
this.connection = connection;
this.pool = pool;
this.primaryOperationTimeout = primaryOperationTimeout;
@ -449,7 +450,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
}
continue;
}
long currentTime = System.currentTimeMillis();
long currentTime = EnvironmentEdgeManager.currentTime();
if (this.scanMetrics != null) {
this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime - lastNext);
}

View File

@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience;
* Specifying timestamps, deleteFamily and deleteColumns will delete all
* versions with a timestamp less than or equal to that passed. If no
* timestamp is specified, an entry is added with a timestamp of 'now'
* where 'now' is the servers's System.currentTimeMillis().
* where 'now' is the servers's EnvironmentEdgeManager.currentTime().
* Specifying a timestamp to the deleteColumn method will
* delete versions only with a timestamp equal to that specified.
* If no timestamp is passed to deleteColumn, internally, it figures the

View File

@ -153,7 +153,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
}
long currentTime = EnvironmentEdgeManager.currentTime();
FailureInfo fInfo =
computeIfAbsent(repeatedFailuresMap, serverName, () -> new FailureInfo(currentTime));
computeIfAbsent(repeatedFailuresMap, serverName, () -> new FailureInfo(currentTime));
fInfo.timeOfLatestAttemptMilliSec = currentTime;
fInfo.numConsecutiveFailures.incrementAndGet();
}
@ -180,7 +180,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
* cleanupInterval ms.
*/
protected void occasionallyCleanupFailureInformation() {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
if (!(now > lastFailureMapCleanupTimeMilliSec
+ failureMapCleanupIntervalMilliSec))
return;
@ -295,7 +295,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
repeatedFailuresMap.remove(server);
} else {
// update time of last attempt
long currentTime = System.currentTimeMillis();
long currentTime = EnvironmentEdgeManager.currentTime();
fInfo.timeOfLatestAttemptMilliSec = currentTime;
// Release the lock if we were retrying inspite of FastFail

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
@ -48,7 +49,7 @@ public class RegionInfoBuilder {
private final TableName tableName;
private byte[] startKey = HConstants.EMPTY_START_ROW;
private byte[] endKey = HConstants.EMPTY_END_ROW;
private long regionId = System.currentTimeMillis();
private long regionId = EnvironmentEdgeManager.currentTime();
private int replicaId = RegionInfo.DEFAULT_REPLICA_ID;
private boolean offLine = false;
private boolean split = false;

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.exceptions.ScannerResetException;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -241,7 +242,7 @@ public class ScannerCallable extends ClientServiceCallable<Result[]> {
} else {
response = next();
}
long timestamp = System.currentTimeMillis();
long timestamp = EnvironmentEdgeManager.currentTime();
boolean isHeartBeat = response.hasHeartbeatMessage() && response.getHeartbeatMessage();
setHeartbeatMessage(isHeartBeat);
if (isHeartBeat && scan.isNeedCursorResult() && response.hasCursor()) {
@ -249,7 +250,7 @@ public class ScannerCallable extends ClientServiceCallable<Result[]> {
}
Result[] rrs = ResponseConverter.getResults(getRpcControllerCellScanner(), response);
if (logScannerActivity) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
if (now - timestamp > logCutOffLatency) {
int rows = rrs == null ? 0 : rrs.length;
LOG.info("Took " + (now - timestamp) + "ms to fetch " + rows + " rows from scanner="

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master;
import java.util.Date;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@ -188,11 +189,11 @@ public class RegionState {
private long ritDuration;
public static RegionState createForTesting(RegionInfo region, State state) {
return new RegionState(region, state, System.currentTimeMillis(), null);
return new RegionState(region, state, EnvironmentEdgeManager.currentTime(), null);
}
public RegionState(RegionInfo region, State state, ServerName serverName) {
this(region, state, System.currentTimeMillis(), serverName);
this(region, state, EnvironmentEdgeManager.currentTime(), serverName);
}
public RegionState(RegionInfo region,
@ -390,7 +391,7 @@ public class RegionState {
* A slower (but more easy-to-read) stringification
*/
public String toDescriptiveString() {
long relTime = System.currentTimeMillis() - stamp;
long relTime = EnvironmentEdgeManager.currentTime() - stamp;
return hri.getRegionNameAsString()
+ " state=" + state
+ ", ts=" + new Date(stamp) + " (" + (relTime/1000) + "s ago)"

View File

@ -127,9 +127,9 @@ public class SlowLogTableAccessor {
}
/**
* Create rowKey: currentTimeMillis APPEND slowLogPayload.hashcode
* Create rowKey: currentTime APPEND slowLogPayload.hashcode
* Scan on slowlog table should keep records with sorted order of time, however records
* added at the very same time (currentTimeMillis) could be in random order.
* added at the very same time could be in random order.
*
* @param slowLogPayload SlowLogPayload to process
* @return rowKey byte[]
@ -141,8 +141,8 @@ public class SlowLogTableAccessor {
if (lastFiveDig.startsWith("-")) {
lastFiveDig = String.valueOf(RANDOM.nextInt(99999));
}
final long currentTimeMillis = EnvironmentEdgeManager.currentTime();
final String timeAndHashcode = currentTimeMillis + lastFiveDig;
final long currentTime = EnvironmentEdgeManager.currentTime();
final String timeAndHashcode = currentTime + lastFiveDig;
final long rowKeyLong = Long.parseLong(timeAndHashcode);
return Bytes.toBytes(rowKeyLong);
}

View File

@ -67,8 +67,8 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.junit.Assert;
import org.junit.Before;
import org.junit.ClassRule;
@ -77,6 +77,7 @@ import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
@Category({ClientTests.class, LargeTests.class})
public class TestAsyncProcess {
@ -1024,9 +1025,9 @@ public class TestAsyncProcess {
};
t2.start();
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
ap.submit(null, DUMMY_TABLE, new ArrayList<>(), false, null, false);
long end = System.currentTimeMillis();
long end = EnvironmentEdgeManager.currentTime();
//Adds 100 to secure us against approximate timing.
Assert.assertTrue(start + 100L + sleepTime > end);
@ -1757,7 +1758,7 @@ public class TestAsyncProcess {
Put p = createPut(1, true);
mutator.mutate(p);
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
try {
mutator.flush();
Assert.fail();
@ -1765,7 +1766,7 @@ public class TestAsyncProcess {
assertEquals(1, expected.getNumExceptions());
assertTrue(expected.getRow(0) == p);
}
long actualSleep = System.currentTimeMillis() - startTime;
long actualSleep = EnvironmentEdgeManager.currentTime() - startTime;
long expectedSleep = 0L;
for (int i = 0; i < retries; i++) {
expectedSleep += ConnectionUtils.getPauseTime(specialPause, i);
@ -1784,7 +1785,7 @@ public class TestAsyncProcess {
mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
Assert.assertNotNull(mutator.getAsyncProcess().createServerErrorTracker());
mutator.mutate(p);
startTime = System.currentTimeMillis();
startTime = EnvironmentEdgeManager.currentTime();
try {
mutator.flush();
Assert.fail();
@ -1792,7 +1793,7 @@ public class TestAsyncProcess {
assertEquals(1, expected.getNumExceptions());
assertTrue(expected.getRow(0) == p);
}
actualSleep = System.currentTimeMillis() - startTime;
actualSleep = EnvironmentEdgeManager.currentTime() - startTime;
expectedSleep = 0L;
for (int i = 0; i < retries; i++) {
expectedSleep += ConnectionUtils.getPauseTime(normalPause, i);

View File

@ -57,11 +57,11 @@ import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Ignore;
@ -72,6 +72,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
@ -643,7 +644,7 @@ public class TestClientNoCluster extends Configured implements Tool {
CellProtos.Cell.Builder cellBuilder = CellProtos.Cell.newBuilder();
cellBuilder.setRow(row);
cellBuilder.setFamily(CATALOG_FAMILY_BYTESTRING);
cellBuilder.setTimestamp(System.currentTimeMillis());
cellBuilder.setTimestamp(EnvironmentEdgeManager.currentTime());
return cellBuilder;
}
@ -767,7 +768,7 @@ public class TestClientNoCluster extends Configured implements Tool {
*/
static void cycle(int id, final Configuration c, final Connection sharedConnection) throws IOException {
long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000);
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
final int printInterval = 100000;
Random rd = new Random(id);
boolean get = c.getBoolean("hbase.test.do.gets", false);
@ -786,7 +787,7 @@ public class TestClientNoCluster extends Configured implements Tool {
}
}
LOG.info("Finished a cycle putting " + namespaceSpan + " in " +
(System.currentTimeMillis() - startTime) + "ms");
(EnvironmentEdgeManager.currentTime() - startTime) + "ms");
}
} else {
try (BufferedMutator mutator = sharedConnection.getBufferedMutator(tableName)) {
@ -803,7 +804,7 @@ public class TestClientNoCluster extends Configured implements Tool {
}
}
LOG.info("Finished a cycle putting " + namespaceSpan + " in " +
(System.currentTimeMillis() - startTime) + "ms");
(EnvironmentEdgeManager.currentTime() - startTime) + "ms");
}
}
}

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.junit.ClassRule;
import org.junit.Rule;
@ -198,7 +199,7 @@ public class TestRegionInfoBuilder {
public void testParseName() throws IOException {
final TableName tableName = name.getTableName();
byte[] startKey = Bytes.toBytes("startKey");
long regionId = System.currentTimeMillis();
long regionId = EnvironmentEdgeManager.currentTime();
int replicaId = 42;
// test without replicaId
@ -228,7 +229,7 @@ public class TestRegionInfoBuilder {
byte[] startKey = Bytes.toBytes("startKey");
byte[] endKey = Bytes.toBytes("endKey");
boolean split = false;
long regionId = System.currentTimeMillis();
long regionId = EnvironmentEdgeManager.currentTime();
int replicaId = 42;
RegionInfo ri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey)

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.Assert;
import org.junit.ClassRule;
import org.junit.Rule;
@ -57,7 +58,7 @@ public class TestRegionInfoDisplay {
.setStartKey(startKey)
.setEndKey(endKey)
.setSplit(false)
.setRegionId(System.currentTimeMillis())
.setRegionId(EnvironmentEdgeManager.currentTime())
.setReplicaId(1).build();
checkEquality(ri, conf);
Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY,

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
@ -122,9 +123,9 @@ public class TestSnapshotFromAdmin {
String snapshot = "snapshot";
final TableName table = TableName.valueOf(name.getMethodName());
// get start time
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
admin.snapshot(snapshot, table);
long finish = System.currentTimeMillis();
long finish = EnvironmentEdgeManager.currentTime();
long elapsed = (finish - start);
assertTrue("Elapsed time:" + elapsed + " is more than expected max:" + time, elapsed <= time);
admin.close();

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hbase;
import com.google.errorprone.annotations.RestrictedApi;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -172,7 +174,7 @@ public abstract class ScheduledChore implements Runnable {
*/
private synchronized void updateTimeTrackingBeforeRun() {
timeOfLastRun = timeOfThisRun;
timeOfThisRun = System.currentTimeMillis();
timeOfThisRun = EnvironmentEdgeManager.currentTime();
}
/**
@ -215,7 +217,7 @@ public abstract class ScheduledChore implements Runnable {
* @return true if time is earlier or equal to current milli time
*/
private synchronized boolean isValidTime(final long time) {
return time > 0 && time <= System.currentTimeMillis();
return time > 0 && time <= EnvironmentEdgeManager.currentTime();
}
/**

View File

@ -42,7 +42,7 @@ public class ThrottledInputStream extends InputStream {
private final InputStream rawStream;
private final long maxBytesPerSec;
private final long startTime = System.currentTimeMillis();
private final long startTime = EnvironmentEdgeManager.currentTime();
private long bytesRead = 0;
private long totalSleepTime = 0;
@ -164,7 +164,7 @@ public class ThrottledInputStream extends InputStream {
* @return Read rate, in bytes/sec.
*/
public long getBytesPerSec() {
long elapsed = (System.currentTimeMillis() - startTime) / 1000;
long elapsed = (EnvironmentEdgeManager.currentTime() - startTime) / 1000;
if (elapsed == 0) {
return bytesRead;
} else {

View File

@ -174,7 +174,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
for (Path path : FileUtil.stat2Paths(fileStatuses)) { // for each file that match the pattern
if (fs.isFile(path)) { // only process files, skip for directories
File dst = new File(parentDirStr, "." + pathPrefix + "."
+ path.getName() + "." + System.currentTimeMillis() + ".jar");
+ path.getName() + "." + EnvironmentEdgeManager.currentTime() + ".jar");
fs.copyToLocalFile(path, new Path(dst.toString()));
dst.deleteOnExit();
@ -188,7 +188,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
Matcher m = libJarPattern.matcher(entry.getName());
if (m.matches()) {
File file = new File(parentDirStr, "." + pathPrefix + "."
+ path.getName() + "." + System.currentTimeMillis() + "." + m.group(1));
+ path.getName() + "." + EnvironmentEdgeManager.currentTime() + "." + m.group(1));
try (FileOutputStream outStream = new FileOutputStream(file)) {
IOUtils.copyBytes(jarFile.getInputStream(entry),
outStream, conf, true);

View File

@ -129,7 +129,7 @@ public class IdLock {
Thread currentThread = Thread.currentThread();
Entry entry = new Entry(id, currentThread);
Entry existing;
long waitUtilTS = System.currentTimeMillis() + time;
long waitUtilTS = EnvironmentEdgeManager.currentTime() + time;
long remaining = time;
while ((existing = map.putIfAbsent(entry.id, entry)) != null) {
synchronized (existing) {
@ -139,7 +139,7 @@ public class IdLock {
while (existing.locked) {
existing.wait(remaining);
if (existing.locked) {
long currentTS = System.currentTimeMillis();
long currentTS = EnvironmentEdgeManager.currentTime();
if (currentTS >= waitUtilTS) {
// time is up
return null;

View File

@ -108,7 +108,7 @@ public class Random64 {
final int precision = 100000;
final long totalTestCnt = defaultTotalTestCnt + precision;
final int reportPeriod = 100 * precision;
final long startTime = System.currentTimeMillis();
final long startTime = EnvironmentEdgeManager.currentTime();
System.out.println("Do collision test, totalTestCnt=" + totalTestCnt);
@ -130,7 +130,7 @@ public class Random64 {
}
if (cnt % reportPeriod == 0) {
long cost = System.currentTimeMillis() - startTime;
long cost = EnvironmentEdgeManager.currentTime() - startTime;
long remainingMs = (long) (1.0 * (totalTestCnt - cnt) * cost / cnt);
System.out.println(
String.format(

View File

@ -118,7 +118,7 @@ public class ReflectionUtils {
boolean dumpStack = false;
if (log.isInfoEnabled()) {
synchronized (ReflectionUtils.class) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
if (now - previousLogTime >= minInterval * 1000) {
previousLogTime = now;
dumpStack = true;

View File

@ -71,7 +71,7 @@ public class Sleeper {
if (this.stopper.isStopped()) {
return;
}
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
long currentSleepTime = sleepTime;
while (currentSleepTime > 0) {
long woke = -1;
@ -83,7 +83,7 @@ public class Sleeper {
sleepLock.wait(currentSleepTime);
}
woke = System.currentTimeMillis();
woke = EnvironmentEdgeManager.currentTime();
long slept = woke - now;
if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) {
LOG.warn("We slept {}ms instead of {}ms, this is likely due to a long " +
@ -98,7 +98,7 @@ public class Sleeper {
}
}
// Recalculate waitTime.
woke = (woke == -1)? System.currentTimeMillis(): woke;
woke = (woke == -1)? EnvironmentEdgeManager.currentTime() : woke;
currentSleepTime = this.period - (woke - now);
}
synchronized(sleepLock) {

View File

@ -149,7 +149,7 @@ public class Threads {
* @param msToWait the amount of time to sleep in milliseconds
*/
public static void sleepWithoutInterrupt(final long msToWait) {
long timeMillis = System.currentTimeMillis();
long timeMillis = EnvironmentEdgeManager.currentTime();
long endTime = timeMillis + msToWait;
boolean interrupted = false;
while (timeMillis < endTime) {
@ -158,7 +158,7 @@ public class Threads {
} catch (InterruptedException ex) {
interrupted = true;
}
timeMillis = System.currentTimeMillis();
timeMillis = EnvironmentEdgeManager.currentTime();
}
if (interrupted) {

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -139,7 +140,7 @@ public class TestCellComparator {
*/
@Test
public void testMetaComparisons() throws Exception {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
// Meta compares
Cell aaa = createByteBufferKeyValueFromKeyValue(new KeyValue(
@ -176,7 +177,7 @@ public class TestCellComparator {
*/
@Test
public void testMetaComparisons2() {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
CellComparator c = MetaCellComparator.META_COMPARATOR;
assertTrue(c.compare(createByteBufferKeyValueFromKeyValue(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)),

View File

@ -39,6 +39,7 @@ import java.util.TreeSet;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -144,7 +145,7 @@ public class TestKeyValue {
@Test
public void testMoreComparisons() {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
// Meta compares
KeyValue aaa = new KeyValue(
@ -174,7 +175,7 @@ public class TestKeyValue {
@Test
public void testMetaComparatorTableKeysWithCommaOk() {
CellComparator c = MetaCellComparator.META_COMPARATOR;
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
// meta keys values are not quite right. A users can enter illegal values
// from shell when scanning meta.
KeyValue a = new KeyValue(Bytes.toBytes("table,key,with,commas1,1234"), now);
@ -204,7 +205,7 @@ public class TestKeyValue {
}
private void metacomparisons(final CellComparatorImpl c) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now),
new KeyValue(
@ -221,7 +222,7 @@ public class TestKeyValue {
}
private void comparisons(final CellComparatorImpl c) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
assertTrue(c.compare(new KeyValue(
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now),
new KeyValue(
@ -520,7 +521,7 @@ public class TestKeyValue {
@Test
public void testMetaKeyComparator() {
CellComparator c = MetaCellComparator.META_COMPARATOR;
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
KeyValue a = new KeyValue(Bytes.toBytes("table1"), now);
KeyValue b = new KeyValue(Bytes.toBytes("table2"), now);
@ -589,12 +590,12 @@ public class TestKeyValue {
new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
Bytes.toBytes("2")),
new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
System.currentTimeMillis(), Bytes.toBytes("2"),
EnvironmentEdgeManager.currentTime(), Bytes.toBytes("2"),
new Tag[] {
new ArrayBackedTag((byte) 120, "tagA"),
new ArrayBackedTag((byte) 121, Bytes.toBytes("tagB")) }),
new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
System.currentTimeMillis(), Bytes.toBytes("2"),
EnvironmentEdgeManager.currentTime(), Bytes.toBytes("2"),
new Tag[] { new ArrayBackedTag((byte) 0, "tagA") }),
new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes(""),
Bytes.toBytes("1")) };

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.fail;
import java.text.MessageFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -171,7 +172,7 @@ public final class Waiter {
*/
public static <E extends Exception> long waitFor(Configuration conf, long timeout, long interval,
boolean failIfTimeout, Predicate<E> predicate) {
long started = System.currentTimeMillis();
long started = EnvironmentEdgeManager.currentTime();
long adjustedTimeout = (long) (getWaitForRatio(conf) * timeout);
long mustEnd = started + adjustedTimeout;
long remainderWait;
@ -183,7 +184,7 @@ public final class Waiter {
LOG.info(MessageFormat.format("Waiting up to [{0}] milli-secs(wait.for.ratio=[{1}])",
adjustedTimeout, getWaitForRatio(conf)));
while (!(eval = predicate.evaluate())
&& (remainderWait = mustEnd - System.currentTimeMillis()) > 0) {
&& (remainderWait = mustEnd - EnvironmentEdgeManager.currentTime()) > 0) {
try {
// handle tail case when remainder wait is less than one interval
sleepInterval = Math.min(remainderWait, interval);
@ -197,7 +198,7 @@ public final class Waiter {
if (!eval) {
if (interrupted) {
LOG.warn(MessageFormat.format("Waiting interrupted after [{0}] msec",
System.currentTimeMillis() - started));
EnvironmentEdgeManager.currentTime() - started));
} else if (failIfTimeout) {
String msg = getExplanation(predicate);
fail(MessageFormat
@ -208,7 +209,7 @@ public final class Waiter {
MessageFormat.format("Waiting timed out after [{0}] msec", adjustedTimeout) + msg);
}
}
return (eval || interrupted) ? (System.currentTimeMillis() - started) : -1;
return (eval || interrupted) ? (EnvironmentEdgeManager.currentTime() - started) : -1;
} catch (Exception ex) {
throw new RuntimeException(ex);
}

View File

@ -39,7 +39,7 @@ import org.junit.experimental.categories.Category;
@Category({ MiscTests.class, SmallTests.class })
public class TestByteBufferArray {
private static final Random RANDOM = new Random(System.currentTimeMillis());
private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTime());
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =

View File

@ -359,7 +359,7 @@ public class TestBytes extends TestCase {
public void testToStringBytesBinaryReversible() {
// let's run test with 1000 randomly generated byte arrays
Random rand = new Random(System.currentTimeMillis());
Random rand = new Random(EnvironmentEdgeManager.currentTime());
byte[] randomBytes = new byte[1000];
for (int i = 0; i < 1000; i++) {
rand.nextBytes(randomBytes);

View File

@ -56,7 +56,7 @@ public class TestThreads {
});
LOG.debug("Starting sleeper thread (" + SLEEP_TIME_MS + " ms)");
sleeper.start();
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
LOG.debug("Main thread: sleeping for 200 ms");
Threads.sleep(200);
@ -75,7 +75,7 @@ public class TestThreads {
assertTrue("sleepWithoutInterrupt did not preserve the thread's " +
"interrupted status", wasInterrupted.get());
long timeElapsed = System.currentTimeMillis() - startTime;
long timeElapsed = EnvironmentEdgeManager.currentTime() - startTime;
// We expect to wait at least SLEEP_TIME_MS, but we can wait more if there is a GC.
assertTrue("Elapsed time " + timeElapsed + " ms is out of the expected " +
" sleep time of " + SLEEP_TIME_MS, SLEEP_TIME_MS - timeElapsed < TOLERANCE_MS);

View File

@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.UserGroupInformation;
@ -103,7 +104,7 @@ public class TestSecureExport {
private static final byte[] ROW3 = Bytes.toBytes("row3");
private static final byte[] QUAL = Bytes.toBytes("qual");
private static final String LOCALHOST = "localhost";
private static final long NOW = System.currentTimeMillis();
private static final long NOW = EnvironmentEdgeManager.currentTime();
// user granted with all global permission
private static final String USER_ADMIN = "admin";
// user is table owner. will have all permissions on table

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.metrics.Counter;
import org.apache.hadoop.hbase.metrics.Gauge;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.metrics.Timer;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -77,14 +78,14 @@ public class ExampleMasterObserverWithMetrics implements MasterCoprocessor, Mast
TableDescriptor desc, RegionInfo[] regions) throws IOException {
// we rely on the fact that there is only 1 instance of our MasterObserver. We keep track of
// when the operation starts before the operation is executing.
this.createTableStartTime = System.currentTimeMillis();
this.createTableStartTime = EnvironmentEdgeManager.currentTime();
}
@Override
public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableDescriptor desc, RegionInfo[] regions) throws IOException {
if (this.createTableStartTime > 0) {
long time = System.currentTimeMillis() - this.createTableStartTime;
long time = EnvironmentEdgeManager.currentTime() - this.createTableStartTime;
LOG.info("Create table took: " + time);
// Update the timer metric for the create table operation duration.

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.math.IntMath;
@ -225,7 +226,7 @@ public class WriteHeavyIncrementObserver implements RegionCoprocessor, RegionObs
private long getUniqueTimestamp(byte[] row) {
int slot = Bytes.hashCode(row) & mask;
MutableLong lastTimestamp = lastTimestamps[slot];
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
synchronized (lastTimestamp) {
long pt = lastTimestamp.longValue() >> 10;
if (now > pt) {

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs;
@ -112,7 +113,7 @@ public class TestZooKeeperScanPolicyObserver {
@Test
public void test() throws IOException, KeeperException, InterruptedException {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
put(0, 100, now - 10000);
assertValueEquals(0, 100);

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.impl.TerminalImpl;
import org.apache.hadoop.hbase.hbtop.terminal.impl.batch.BatchTerminal;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -98,7 +99,7 @@ public class Screen implements Closeable {
nextScreenView = currentScreenView.handleKeyPress(keyPress);
} else {
if (timerTimestamp != null) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
if (timerTimestamp <= now) {
// Dispatch the timer to the current screen
timerTimestamp = null;
@ -131,7 +132,7 @@ public class Screen implements Closeable {
}
public void setTimer(long delay) {
timerTimestamp = System.currentTimeMillis() + delay;
timerTimestamp = EnvironmentEdgeManager.currentTime() + delay;
}
public void cancelTimer() {

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
import org.apache.hadoop.hbase.hbtop.field.FieldValue;
import org.apache.hadoop.hbase.hbtop.mode.DrillDownInfo;
import org.apache.hadoop.hbase.hbtop.mode.Mode;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -146,7 +147,7 @@ public class TopScreenModel {
private void refreshSummary(ClusterMetrics clusterMetrics) {
String currentTime = ISO_8601_EXTENDED_TIME_FORMAT
.format(System.currentTimeMillis());
.format(EnvironmentEdgeManager.currentTime());
String version = clusterMetrics.getHBaseVersion();
String clusterId = clusterMetrics.getClusterId();
int liveServers = clusterMetrics.getLiveServerMetrics().size();

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.hbtop.screen.help.HelpScreenView;
import org.apache.hadoop.hbase.hbtop.screen.mode.ModeScreenView;
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
@ -92,7 +93,7 @@ public class TopScreenPresenter {
public long refresh(boolean force) {
if (!force) {
long delay = System.currentTimeMillis() - lastRefreshTimestamp;
long delay = EnvironmentEdgeManager.currentTime() - lastRefreshTimestamp;
if (delay < refreshDelay.get()) {
return refreshDelay.get() - delay;
}
@ -114,7 +115,7 @@ public class TopScreenPresenter {
topScreenView.refreshTerminal();
lastRefreshTimestamp = System.currentTimeMillis();
lastRefreshTimestamp = EnvironmentEdgeManager.currentTime();
iterations++;
return refreshDelay.get();
}

View File

@ -28,7 +28,7 @@ import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@ -43,7 +43,7 @@ public class NoCacheFilter implements Filter {
throws IOException, ServletException {
HttpServletResponse httpRes = (HttpServletResponse) res;
httpRes.setHeader("Cache-Control", "no-cache");
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
httpRes.addDateHeader("Expires", now);
httpRes.addDateHeader("Date", now);
httpRes.addHeader("Pragma", "no-cache");

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.AsyncCallback;
@ -118,9 +119,9 @@ public class ChaosZKClient {
CreateMode.EPHEMERAL_SEQUENTIAL,
submitTaskCallback,
taskObject);
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
while ((System.currentTimeMillis() - start) < TASK_EXECUTION_TIMEOUT) {
while ((EnvironmentEdgeManager.currentTime() - start) < TASK_EXECUTION_TIMEOUT) {
if(taskStatus != null) {
return taskStatus;
}

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.yetus.audience.InterfaceAudience;
@ -251,9 +252,9 @@ public class DistributedHBaseCluster extends HBaseCluster {
private void waitForServiceToStop(ServiceType service, ServerName serverName, long timeout)
throws IOException {
LOG.info("Waiting for service: {} to stop: {}", service, serverName.getServerName());
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
while ((System.currentTimeMillis() - start) < timeout) {
while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
if (!clusterManager.isRunning(service, serverName.getHostname(), serverName.getPort())) {
return;
}
@ -265,9 +266,9 @@ public class DistributedHBaseCluster extends HBaseCluster {
private void waitForServiceToStart(ServiceType service, ServerName serverName, long timeout)
throws IOException {
LOG.info("Waiting for service: {} to start: ", service, serverName.getServerName());
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
while ((System.currentTimeMillis() - start) < timeout) {
while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
if (clusterManager.isRunning(service, serverName.getHostname(), serverName.getPort())) {
return;
}
@ -308,8 +309,8 @@ public class DistributedHBaseCluster extends HBaseCluster {
@Override
public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < timeout) {
long start = EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() - start < timeout) {
try {
getMasterAdminService();
return true;

View File

@ -25,6 +25,7 @@ import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HFileTestUtil;
import org.apache.hadoop.hbase.util.LoadTestTool;
import org.apache.hadoop.hbase.util.Threads;
@ -162,15 +163,15 @@ public class IntegrationTestIngest extends IntegrationTestBase {
LOG.info("Cluster size:" + util.getHBaseClusterInterface()
.getClusterMetrics().getLiveServerMetrics().size());
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
long startKey = 0;
long numKeys = getNumKeys(keysPerServerPerIter);
while (System.currentTimeMillis() - start < 0.9 * runtime) {
while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) {
LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
((runtime - (System.currentTimeMillis() - start))/60000) + " min");
((runtime - (EnvironmentEdgeManager.currentTime() - start))/60000) + " min");
int ret = -1;
ret = loadTool.run(getArgsForLoadTestTool("-write",

View File

@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.RegionSplitter;
import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
import org.junit.After;
@ -115,11 +116,11 @@ public class IntegrationTestManyRegions {
byte[][] splits = algo.split(REGION_COUNT);
LOG.info(String.format("Creating table %s with %d splits.", TABLE_NAME, REGION_COUNT));
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
try {
admin.createTable(desc, splits);
LOG.info(String.format("Pre-split table created successfully in %dms.",
(System.currentTimeMillis() - startTime)));
(EnvironmentEdgeManager.currentTime() - startTime)));
} catch (IOException e) {
LOG.error("Failed to create table", e);
}

View File

@ -25,6 +25,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.util.ConstantDelayQueue;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LoadTestTool;
import org.apache.hadoop.hbase.util.MultiThreadedUpdater;
import org.apache.hadoop.hbase.util.MultiThreadedWriter;
@ -163,15 +164,15 @@ public class IntegrationTestRegionReplicaReplication extends IntegrationTestInge
getConf().getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs",
5000) + 1000);
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
long startKey = 0;
long numKeys = getNumKeys(keysPerServerPerIter);
while (System.currentTimeMillis() - start < 0.9 * runtime) {
while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) {
LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
((runtime - (System.currentTimeMillis() - start))/60000) + " min");
((runtime - (EnvironmentEdgeManager.currentTime() - start))/60000) + " min");
int verifyPercent = 100;
int updatePercent = 20;

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.regionserver.StripeStoreConfig;
import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LoadTestKVGenerator;
import org.apache.hadoop.hbase.util.MultiThreadedAction;
import org.apache.hadoop.hbase.util.MultiThreadedReader;
@ -204,14 +205,14 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
if (preloadKeys > 0) {
MultiThreadedWriter preloader = new MultiThreadedWriter(dataGen, conf, TABLE_NAME);
long time = System.currentTimeMillis();
long time = EnvironmentEdgeManager.currentTime();
preloader.start(0, startKey, writeThreads);
preloader.waitForFinish();
if (preloader.getNumWriteFailures() > 0) {
throw new IOException("Preload failed");
}
int waitTime = (int)Math.min(preloadKeys / 100, 30000); // arbitrary
status(description + " preload took " + (System.currentTimeMillis()-time)/1000
status(description + " preload took " + (EnvironmentEdgeManager.currentTime()-time)/1000
+ "sec; sleeping for " + waitTime/1000 + "sec for store to stabilize");
Thread.sleep(waitTime);
}
@ -221,7 +222,7 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
// reader.getMetrics().enable();
reader.linkToWriter(writer);
long testStartTime = System.currentTimeMillis();
long testStartTime = EnvironmentEdgeManager.currentTime();
writer.start(startKey, endKey, writeThreads);
reader.start(startKey, endKey, readThreads);
writer.waitForFinish();
@ -255,7 +256,8 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
}
}
LOG.info("Performance data dump for " + description + " test: \n" + perfDump.toString());*/
status(description + " test took " + (System.currentTimeMillis()-testStartTime)/1000 + "sec");
status(description + " test took " +
(EnvironmentEdgeManager.currentTime() - testStartTime) / 1000 + "sec");
Assert.assertTrue(success);
}

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -67,9 +68,8 @@ public class MoveRegionsOfTableAction extends Action {
Collections.shuffle(regions);
long start = System.currentTimeMillis();
for (RegionInfo regionInfo:regions) {
long start = EnvironmentEdgeManager.currentTime();
for (RegionInfo regionInfo : regions) {
// Don't try the move if we're stopping
if (context.isStopping()) {
return;
@ -82,7 +82,7 @@ public class MoveRegionsOfTableAction extends Action {
// put a limit on max num regions. Otherwise, this won't finish
// with a sleep time of 10sec, 100 regions will finish in 16min
if (System.currentTimeMillis() - start > maxTime) {
if (EnvironmentEdgeManager.currentTime() - start > maxTime) {
break;
}
}

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.chaos.actions;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -48,7 +49,7 @@ public class SnapshotTableAction extends Action {
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
String snapshotName = tableName + "-it-" + System.currentTimeMillis();
String snapshotName = tableName + "-it-" + EnvironmentEdgeManager.currentTime();
Admin admin = util.getAdmin();
// Don't try the snapshot if we're stopping

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.chaos.policies;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
/** A policy which does stuff every time interval. */
@ -37,11 +38,11 @@ public abstract class PeriodicPolicy extends Policy {
Threads.sleep(jitter);
while (!isStopped()) {
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
runOneIteration();
if (isStopped()) return;
long sleepTime = periodMs - (System.currentTimeMillis() - start);
long sleepTime = periodMs - (EnvironmentEdgeManager.currentTime() - start);
if (sleepTime > 0) {
LOG.info("Sleeping for {} ms", sleepTime);
Threads.sleep(sleepTime);

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.Before;
@ -127,7 +128,7 @@ public class IntegrationTestTableSnapshotInputFormat extends IntegrationTestBase
Configuration conf = getConf();
TableName tableName = TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
String snapshotName = conf.get(SNAPSHOT_NAME_KEY, tableName.getQualifierAsString()
+ "_snapshot_" + System.currentTimeMillis());
+ "_snapshot_" + EnvironmentEdgeManager.currentTime());
int numRegions = conf.getInt(NUM_REGIONS_KEY, DEFAULT_NUM_REGIONS);
String tableDirStr = conf.get(TABLE_DIR_KEY);
Path tableDir;

View File

@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Random64;
import org.apache.hadoop.hbase.util.RegionSplitter;
import org.apache.hadoop.hbase.wal.WALEdit;
@ -711,9 +712,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
while (numQueries < maxQueries) {
numQueries++;
byte[] prev = node.prev;
long t1 = System.currentTimeMillis();
long t1 = EnvironmentEdgeManager.currentTime();
node = getNode(prev, table, node);
long t2 = System.currentTimeMillis();
long t2 = EnvironmentEdgeManager.currentTime();
if (node == null) {
LOG.error("ConcurrentWalker found UNDEFINED NODE: " + Bytes.toStringBinary(prev));
context.getCounter(Counts.UNDEFINED).increment(1l);
@ -1702,10 +1703,10 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
scan.setBatch(1);
scan.addColumn(FAMILY_NAME, COLUMN_PREV);
long t1 = System.currentTimeMillis();
long t1 = EnvironmentEdgeManager.currentTime();
ResultScanner scanner = table.getScanner(scan);
Result result = scanner.next();
long t2 = System.currentTimeMillis();
long t2 = EnvironmentEdgeManager.currentTime();
scanner.close();
if ( result != null) {
@ -1785,9 +1786,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
while (node != null && node.prev.length != NO_KEY.length &&
numQueries < maxQueries) {
byte[] prev = node.prev;
long t1 = System.currentTimeMillis();
long t1 = EnvironmentEdgeManager.currentTime();
node = getNode(prev, table, node);
long t2 = System.currentTimeMillis();
long t2 = EnvironmentEdgeManager.currentTime();
if (logEvery > 0 && numQueries % logEvery == 0) {
System.out.printf("CQ %d: %d %s \n", numQueries, t2 - t1, Bytes.toStringBinary(prev));
}

View File

@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.test.util.warc.WARCInputFormat;
import org.apache.hadoop.hbase.test.util.warc.WARCRecord;
import org.apache.hadoop.hbase.test.util.warc.WARCWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.RegionSplitter;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
@ -582,7 +583,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
String contentType = warcHeader.getField("WARC-Identified-Payload-Type");
if (contentType != null) {
LOG.debug("Processing record id=" + recordID + ", targetURI=\"" + targetURI + "\"");
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
// Make row key

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LoadTestTool;
import org.apache.hadoop.hbase.util.MultiThreadedReader;
import org.apache.hadoop.hbase.util.Threads;
@ -143,7 +144,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
LOG.info("Cluster size:"+
util.getHBaseClusterInterface().getClusterMetrics().getLiveServerMetrics().size());
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
long startKey = 0;
@ -197,7 +198,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
// set the intended run time for the reader. The reader will do read requests
// to random keys for this amount of time.
long remainingTime = runtime - (System.currentTimeMillis() - start);
long remainingTime = runtime - (EnvironmentEdgeManager.currentTime() - start);
if (remainingTime <= 0) {
LOG.error("The amount of time left for the test to perform random reads is "
+ "non-positive. Increase the test execution time via "

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@ -88,7 +89,7 @@ public class TableRecordReaderImpl {
}
if (logScannerActivity) {
LOG.info("Current scan=" + currentScan.toString());
timestamp = System.currentTimeMillis();
timestamp = EnvironmentEdgeManager.currentTime();
rowcount = 0;
}
}
@ -196,7 +197,7 @@ public class TableRecordReaderImpl {
if (logScannerActivity) {
rowcount ++;
if (rowcount >= logPerRowCount) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
timestamp = now;
@ -235,7 +236,7 @@ public class TableRecordReaderImpl {
return false;
} catch (IOException ioe) {
if (logScannerActivity) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
LOG.info("Mapper took " + (now-timestamp)
+ "ms to process " + rowcount + " rows");
LOG.info(ioe.toString(), ioe);

View File

@ -437,8 +437,10 @@ public class HFileOutputFormat2
private void close(final StoreFileWriter w) throws IOException {
if (w != null) {
w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString()));
w.appendFileInfo(BULKLOAD_TIME_KEY,
Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
w.appendFileInfo(BULKLOAD_TASK_KEY,
Bytes.toBytes(context.getTaskAttemptID().toString()));
w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
w.appendTrackedTimestampsToMetadata();

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
@ -765,7 +766,7 @@ public class ImportTsv extends Configured implements Tool {
}
// If timestamp option is not specified, use current system time.
long timstamp = getConf().getLong(TIMESTAMP_CONF_KEY, System.currentTimeMillis());
long timstamp = getConf().getLong(TIMESTAMP_CONF_KEY, EnvironmentEdgeManager.currentTime());
// Set it back to replace invalid timestamp (non-numeric) with current
// system time

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
@ -492,7 +493,7 @@ public class SyncTable extends Configured implements Tool {
sourceCell.getFamilyOffset(), sourceCell.getFamilyLength())
.setQualifier(sourceCell.getQualifierArray(),
sourceCell.getQualifierOffset(), sourceCell.getQualifierLength())
.setTimestamp(System.currentTimeMillis())
.setTimestamp(EnvironmentEdgeManager.currentTime())
.setValue(sourceCell.getValueArray(),
sourceCell.getValueOffset(), sourceCell.getValueLength()).build();
}

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
@ -90,7 +91,7 @@ public class TableRecordReaderImpl {
this.scanner = this.htable.getScanner(currentScan);
if (logScannerActivity) {
LOG.info("Current scan=" + currentScan.toString());
timestamp = System.currentTimeMillis();
timestamp = EnvironmentEdgeManager.currentTime();
rowcount = 0;
}
}
@ -212,7 +213,7 @@ public class TableRecordReaderImpl {
if (logScannerActivity) {
rowcount ++;
if (rowcount >= logPerRowCount) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount);
timestamp = now;
rowcount = 0;
@ -264,7 +265,7 @@ public class TableRecordReaderImpl {
} catch (IOException ioe) {
updateCounters();
if (logScannerActivity) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount);
LOG.info(ioe.toString(), ioe);
String lastRow = lastSuccessfulRow == null ?

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALKey;
@ -342,8 +343,8 @@ public class WALPlayer extends Configured implements Tool {
conf.setStrings(TABLES_KEY, tables);
conf.setStrings(TABLE_MAP_KEY, tableMap);
conf.set(FileInputFormat.INPUT_DIR, inputDirs);
Job job =
Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis()));
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" +
EnvironmentEdgeManager.currentTime()));
job.setJarByClass(WALPlayer.class);
job.setInputFormatClass(WALInputFormat.class);

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.Pair;
@ -416,7 +417,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
int reportBytes = 0;
int bytesRead;
long stime = System.currentTimeMillis();
long stime = EnvironmentEdgeManager.currentTime();
while ((bytesRead = in.read(buffer)) > 0) {
out.write(buffer, 0, bytesRead);
totalBytesWritten += bytesRead;
@ -431,7 +432,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
reportBytes = 0;
}
}
long etime = System.currentTimeMillis();
long etime = EnvironmentEdgeManager.currentTime();
context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
context.setStatus(String.format(statusMessage,

View File

@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.ByteArrayHashKey;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.GsonUtil;
import org.apache.hadoop.hbase.util.Hash;
import org.apache.hadoop.hbase.util.MurmurHash;
@ -1134,7 +1135,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
static abstract class TestBase {
// Below is make it so when Tests are all running in the one
// jvm, that they each have a differently seeded Random.
private static final Random randomSeed = new Random(System.currentTimeMillis());
private static final Random randomSeed = new Random(EnvironmentEdgeManager.currentTime());
private static long nextRandomSeed() {
return randomSeed.nextLong();
@ -2391,7 +2392,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
// write the serverName columns
MetaTableAccessor.updateRegionLocation(connection,
regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i,
System.currentTimeMillis());
EnvironmentEdgeManager.currentTime());
return true;
}
}

View File

@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
import org.apache.hadoop.hbase.wal.WAL;
@ -114,7 +115,7 @@ public class TestCellBasedImportExport2 {
private static String FQ_OUTPUT_DIR;
private static final String EXPORT_BATCH_SIZE = "100";
private static final long now = System.currentTimeMillis();
private static final long now = EnvironmentEdgeManager.currentTime();
private final TableName EXPORT_TABLE = TableName.valueOf("export_table");
private final TableName IMPORT_TABLE = TableName.valueOf("import_table");
@ -588,7 +589,7 @@ public class TestCellBasedImportExport2 {
@Test
public void testExportScan() throws Exception {
int version = 100;
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
long endTime = startTime + 1;
String prefix = "row";
String label_0 = "label_0";

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
@ -67,7 +68,7 @@ public class TestCellCounter {
private static Path FQ_OUTPUT_DIR;
private static final String OUTPUT_DIR = "target" + File.separator + "test-data" + File.separator
+ "output";
private static long now = System.currentTimeMillis();
private static long now = EnvironmentEdgeManager.currentTime();
@Rule
public TestName name = new TestName();

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.mob.MobTestUtil;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
@ -292,7 +293,7 @@ public class TestCopyTable {
p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23"));
t.put(p);
long currentTime = System.currentTimeMillis();
long currentTime = EnvironmentEdgeManager.currentTime();
String[] args = new String[] { "--new.name=" + targetTable, "--families=a:b", "--all.cells",
"--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000),
"--versions=1", sourceTable.getNameAsString() };

View File

@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALEdit;
@ -130,7 +131,7 @@ public class TestImportExport {
private static String FQ_OUTPUT_DIR;
private static final String EXPORT_BATCH_SIZE = "100";
private static final long now = System.currentTimeMillis();
private static final long now = EnvironmentEdgeManager.currentTime();
private final TableName EXPORT_TABLE = TableName.valueOf("export_table");
private final TableName IMPORT_TABLE = TableName.valueOf("import_table");
public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1);
@ -607,7 +608,7 @@ public class TestImportExport {
@Test
public void testExportScan() throws Exception {
int version = 100;
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
long endTime = startTime + 1;
String prefix = "row";
String label_0 = "label_0";

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
@ -249,12 +250,12 @@ public class TestRowCounter {
// clean up content of TABLE_NAME
Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM));
ts = System.currentTimeMillis();
ts = EnvironmentEdgeManager.currentTime();
put1.addColumn(family, col1, ts, Bytes.toBytes("val1"));
table.put(put1);
Thread.sleep(100);
ts = System.currentTimeMillis();
ts = EnvironmentEdgeManager.currentTime();
put2.addColumn(family, col1, ts, Bytes.toBytes("val2"));
put3.addColumn(family, col1, ts, Bytes.toBytes("val3"));
table.put(put2);
@ -302,9 +303,9 @@ public class TestRowCounter {
rowCounter.setConf(TEST_UTIL.getConfiguration());
args = Arrays.copyOf(args, args.length+1);
args[args.length-1]="--expectedCount=" + expectedCount;
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
int result = rowCounter.run(args);
long duration = System.currentTimeMillis() - start;
long duration = EnvironmentEdgeManager.currentTime() - start;
LOG.debug("row count duration (ms): " + duration);
assertTrue(result==0);
}
@ -318,9 +319,9 @@ public class TestRowCounter {
*/
private void runCreateSubmittableJobWithArgs(String[] args, int expectedCount) throws Exception {
Job job = RowCounter.createSubmittableJob(TEST_UTIL.getConfiguration(), args);
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
job.waitForCompletion(true);
long duration = System.currentTimeMillis() - start;
long duration = EnvironmentEdgeManager.currentTime() - start;
LOG.debug("row count duration (ms): " + duration);
assertTrue(job.isSuccessful());
Counter counter = job.getCounters().findCounter(RowCounter.RowCounterMapper.Counters.ROWS);
@ -486,12 +487,12 @@ public class TestRowCounter {
// clean up content of TABLE_NAME
Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM));
ts = System.currentTimeMillis();
ts = EnvironmentEdgeManager.currentTime();
put1.addColumn(family, col1, ts, Bytes.toBytes("val1"));
table.put(put1);
Thread.sleep(100);
ts = System.currentTimeMillis();
ts = EnvironmentEdgeManager.currentTime();
put2.addColumn(family, col1, ts, Bytes.toBytes("val2"));
put3.addColumn(family, col1, ts, Bytes.toBytes("val3"));
table.put(put2);

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.mapreduce.Counters;
import org.junit.AfterClass;
import org.junit.Assert;
@ -159,7 +160,7 @@ public class TestSyncTable {
final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source");
final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target");
Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableIgnoreTimestampsTrue");
long current = System.currentTimeMillis();
long current = EnvironmentEdgeManager.currentTime();
writeTestData(sourceTableName, targetTableName, current - 1000, current);
hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true");
Counters syncCounters = syncTables(sourceTableName, targetTableName,
@ -490,7 +491,7 @@ public class TestSyncTable {
int sourceRegions = 10;
int targetRegions = 6;
if (ArrayUtils.isEmpty(timestamps)) {
long current = System.currentTimeMillis();
long current = EnvironmentEdgeManager.currentTime();
timestamps = new long[]{current,current};
}

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -44,7 +45,7 @@ public class TestWALInputFormat {
public void testAddFile() {
List<FileStatus> lfss = new ArrayList<>();
LocatedFileStatus lfs = Mockito.mock(LocatedFileStatus.class);
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now));
WALInputFormat.addFile(lfss, lfs, now, now);
assertEquals(1, lfss.size());

View File

@ -41,8 +41,8 @@ import org.apache.hadoop.hbase.testclassification.MapReduceTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALFactory;
@ -131,7 +131,7 @@ public class TestWALRecordReader {
WAL log = walfactory.getWAL(info);
// This test depends on timestamp being millisecond based and the filename of the WAL also
// being millisecond based.
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTime();
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value));
log.appendData(info, getWalKeyImpl(ts, scopes), edit);
@ -145,7 +145,7 @@ public class TestWALRecordReader {
LOG.info("Past 1st WAL roll " + log.toString());
Thread.sleep(1);
long ts1 = System.currentTimeMillis();
long ts1 = EnvironmentEdgeManager.currentTime();
edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1+1, value));
@ -195,21 +195,24 @@ public class TestWALRecordReader {
byte [] value = Bytes.toBytes("value");
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
System.currentTimeMillis(), value));
long txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
EnvironmentEdgeManager.currentTime(), value));
long txid = log.appendData(info,
getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
log.sync(txid);
Thread.sleep(1); // make sure 2nd log gets a later timestamp
long secondTs = System.currentTimeMillis();
long secondTs = EnvironmentEdgeManager.currentTime();
log.rollWriter();
edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), System.currentTimeMillis(), value));
txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"),
EnvironmentEdgeManager.currentTime(), value));
txid = log.appendData(info,
getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
log.sync(txid);
log.shutdown();
walfactory.shutdown();
long thirdTs = System.currentTimeMillis();
long thirdTs = EnvironmentEdgeManager.currentTime();
// should have 2 log files now
WALInputFormat input = new WALInputFormat();

View File

@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.mapreduce.Job;
import org.junit.AfterClass;
import org.junit.Before;
@ -283,20 +284,20 @@ public class TestVerifyReplication extends TestReplicationBase {
// Take source and target tables snapshot
Path rootDir = CommonFSUtils.getRootDir(CONF1);
FileSystem fs = rootDir.getFileSystem(CONF1);
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
// Take target snapshot
Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
String peerFSAddress = peerFs.getUri().toString();
String tmpPath1 = UTIL1.getRandomDir().toString();
String tmpPath2 = "/tmp" + System.currentTimeMillis();
String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
"--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName,
@ -320,11 +321,11 @@ public class TestVerifyReplication extends TestReplicationBase {
Delete delete = new Delete(put.getRow());
htable2.delete(delete);
sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
@ -388,20 +389,20 @@ public class TestVerifyReplication extends TestReplicationBase {
// Take source and target tables snapshot
Path rootDir = CommonFSUtils.getRootDir(CONF1);
FileSystem fs = rootDir.getFileSystem(CONF1);
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true);
// Take target snapshot
Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName,
Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true);
String peerFSAddress = peerFs.getUri().toString();
String tmpPath1 = UTIL1.getRandomDir().toString();
String tmpPath2 = "/tmp" + System.currentTimeMillis();
String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(),
"--sourceSnapshotName=" + sourceSnapshotName,
@ -426,11 +427,11 @@ public class TestVerifyReplication extends TestReplicationBase {
Delete delete = new Delete(put.getRow());
htable3.delete(delete);
sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true);
peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName,
Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true);

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
@ -105,7 +106,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
// normal Batch tests
byte[] qualifierName = Bytes.toBytes("f1");
Put put = new Put(Bytes.toBytes("r1"));
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTime();
put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1002"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v1001"));
@ -169,7 +170,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
// normal Batch tests
byte[] qualifierName = Bytes.toBytes("f1");
Put put = new Put(Bytes.toBytes("r1"));
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTime();
put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v2"));
@ -286,20 +287,20 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
// Take source and target tables snapshot
Path rootDir = CommonFSUtils.getRootDir(CONF1);
FileSystem fs = rootDir.getFileSystem(CONF1);
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
// Take target snapshot
Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
String peerFSAddress = peerFs.getUri().toString();
String temPath1 = UTIL1.getRandomDir().toString();
String temPath2 = "/tmp" + System.currentTimeMillis();
String temPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
"--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName,
@ -323,11 +324,11 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
Delete delete = new Delete(put.getRow());
htable2.delete(delete);
sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.mapreduce.Job;
import org.junit.AfterClass;
@ -165,14 +166,14 @@ public class TestVerifyReplicationCrossDiffHdfs {
public void testVerifyRepBySnapshot() throws Exception {
Path rootDir = CommonFSUtils.getRootDir(conf1);
FileSystem fs = rootDir.getFileSystem(conf1);
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME, new String(FAMILY),
sourceSnapshotName, rootDir, fs, true);
// Take target snapshot
Path peerRootDir = CommonFSUtils.getRootDir(conf2);
FileSystem peerFs = peerRootDir.getFileSystem(conf2);
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME, new String(FAMILY),
peerSnapshotName, peerRootDir, peerFs, true);

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@ -328,13 +329,14 @@ public class TestExportSnapshot {
private Path getHdfsDestinationDir() {
Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
Path path = new Path(new Path(rootDir, "export-test"), "export-" + System.currentTimeMillis());
Path path = new Path(new Path(rootDir, "export-test"), "export-" +
EnvironmentEdgeManager.currentTime());
LOG.info("HDFS export destination path: " + path);
return path;
}
static Path getLocalDestinationDir(HBaseTestingUtility htu) {
Path path = htu.getDataTestDir("local-export-" + System.currentTimeMillis());
Path path = htu.getDataTestDir("local-export-" + EnvironmentEdgeManager.currentTime());
try {
FileSystem fs = FileSystem.getLocal(htu.getConfiguration());
LOG.info("Local export destination path: " + path);

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
@ -115,7 +116,7 @@ public class TestExportSnapshotV1NoCluster {
static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtility hctu, Path testDir)
throws IOException {
Path path = new Path(new Path(testDir, "export-test"),
"export-" + System.currentTimeMillis()).makeQualified(fs.getUri(),
"export-" + EnvironmentEdgeManager.currentTime()).makeQualified(fs.getUri(),
fs.getWorkingDirectory());
LOG.info("Export destination={}, fs={}, fsurl={}, fswd={}, testDir={}", path, fs, fs.getUri(),
fs.getWorkingDirectory(), testDir);

View File

@ -284,7 +284,7 @@ public class ProcedureExecutor<TEnvironment> {
}
long evictTtl = conf.getInt(EVICT_TTL_CONF_KEY, DEFAULT_EVICT_TTL);
long evictAckTtl = conf.getInt(EVICT_ACKED_TTL_CONF_KEY, DEFAULT_ACKED_EVICT_TTL);
if (retainer.isExpired(System.currentTimeMillis(), evictTtl, evictAckTtl)) {
if (retainer.isExpired(EnvironmentEdgeManager.currentTime(), evictTtl, evictAckTtl)) {
LOG.debug("Procedure {} has already been finished and expired, skip force updating",
procId);
return;

View File

@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase;
import org.apache.hadoop.hbase.procedure2.util.ByteSlot;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.yetus.audience.InterfaceAudience;
@ -829,12 +830,12 @@ public class WALProcedureStore extends ProcedureStoreBase {
// Wait SYNC_WAIT_MSEC or the signal of "slots full" before flushing
syncMaxSlot = runningProcCount;
assert syncMaxSlot > 0 : "unexpected syncMaxSlot=" + syncMaxSlot;
final long syncWaitSt = System.currentTimeMillis();
final long syncWaitSt = EnvironmentEdgeManager.currentTime();
if (slotIndex != syncMaxSlot) {
slotCond.await(syncWaitMsec, TimeUnit.MILLISECONDS);
}
final long currentTs = System.currentTimeMillis();
final long currentTs = EnvironmentEdgeManager.currentTime();
final long syncWaitMs = currentTs - syncWaitSt;
final float rollSec = getMillisFromLastRoll() / 1000.0f;
final float syncedPerSec = totalSyncedToStore / rollSec;
@ -979,7 +980,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
}
public long getMillisFromLastRoll() {
return (System.currentTimeMillis() - lastRollTs.get());
return (EnvironmentEdgeManager.currentTime() - lastRollTs.get());
}
void periodicRollForTesting() throws IOException {
@ -1103,7 +1104,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
stream = newStream;
flushLogId = logId;
totalSynced.set(0);
long rollTs = System.currentTimeMillis();
long rollTs = EnvironmentEdgeManager.currentTime();
lastRollTs.set(rollTs);
logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos, rollTs));

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.Before;
@ -103,9 +104,9 @@ public class TestProcedureSchedulerConcurrency {
}
}
if (wakeCount.get() != oldWakeCount) {
lastUpdate = System.currentTimeMillis();
lastUpdate = EnvironmentEdgeManager.currentTime();
} else if (wakeCount.get() >= NRUNS &&
(System.currentTimeMillis() - lastUpdate) > WAIT_THRESHOLD) {
(EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) {
break;
}
Threads.sleepWithoutInterrupt(25);

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
@ -159,7 +159,8 @@ public abstract class ProcedureStorePerformanceEvaluation<T extends ProcedureSto
boolean failure = false;
try {
for (Future<?> future : futures) {
long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - System.currentTimeMillis();
long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 -
EnvironmentEdgeManager.currentTime();
failure |= (future.get(timeout, TimeUnit.MILLISECONDS).equals(EXIT_FAILURE));
}
} catch (Exception e) {

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hbase.procedure2.store.wal;
import static java.lang.System.currentTimeMillis;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
@ -36,7 +34,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
@ -166,7 +164,7 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
List<Integer> procStates = shuffleProcWriteSequence();
TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used.
int numProcsPerWal = numWals > 0 ? procStates.size() / numWals : Integer.MAX_VALUE;
long startTime = currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
long lastTime = startTime;
for (int i = 0; i < procStates.size(); ++i) {
int procId = procStates.get(i);
@ -181,14 +179,14 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
store.update(procs[procId]);
}
if (i > 0 && i % numProcsPerWal == 0) {
long currentTime = currentTimeMillis();
long currentTime = EnvironmentEdgeManager.currentTime();
System.out.println("Forcing wall roll. Time taken on last WAL: " +
(currentTime - lastTime) / 1000.0f + " sec");
store.rollWriterForTesting();
lastTime = currentTime;
}
}
long timeTaken = currentTimeMillis() - startTime;
long timeTaken = EnvironmentEdgeManager.currentTime() - startTime;
System.out.println("\n\nDone writing WALs.\nNum procs : " + numProcs + "\nTotal time taken : "
+ StringUtils.humanTimeDiff(timeTaken) + "\n\n");
}
@ -199,9 +197,9 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
store.start(1);
store.recoverLease();
long startTime = currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
store.load(loader);
long timeTaken = System.currentTimeMillis() - startTime;
long timeTaken = EnvironmentEdgeManager.currentTime() - startTime;
System.out.println("******************************************");
System.out.println("Load time : " + (timeTaken / 1000.0f) + "sec");
System.out.println("******************************************");

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.rest.filter.GzipFilter;
import org.apache.hadoop.hbase.rest.filter.RestCsrfPreventionFilter;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.util.DNS;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.hbase.util.Strings;
@ -398,7 +399,7 @@ public class RESTServer implements Constants {
// Put up info server.
int port = conf.getInt("hbase.rest.info.port", 8085);
if (port >= 0) {
conf.setLong("startcode", System.currentTimeMillis());
conf.setLong("startcode", EnvironmentEdgeManager.currentTime());
String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
this.infoServer = new InfoServer("rest", a, port, false, conf);
this.infoServer.setAttribute("hbase.conf", conf);

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.rest.model.ScannerModel;
import org.apache.hadoop.hbase.security.visibility.Authorizations;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@ -104,7 +105,7 @@ public class ScannerResultGenerator extends ResultGenerator {
}
scanner = table.getScanner(scan);
cached = null;
id = Long.toString(System.currentTimeMillis()) +
id = Long.toString(EnvironmentEdgeManager.currentTime()) +
Integer.toHexString(scanner.hashCode());
} finally {
table.close();

View File

@ -44,6 +44,7 @@ import javax.net.ssl.SSLContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.rest.Constants;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
@ -332,7 +333,7 @@ public class Client {
method.addHeader(header);
}
}
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
if (resp != null) EntityUtils.consumeQuietly(resp.getEntity());
resp = httpClient.execute(method);
if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
@ -342,7 +343,7 @@ public class Client {
resp = httpClient.execute(method);
}
long endTime = System.currentTimeMillis();
long endTime = EnvironmentEdgeManager.currentTime();
if (LOG.isTraceEnabled()) {
LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " +
resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms");

View File

@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.rest.client.Cluster;
import org.apache.hadoop.hbase.rest.client.RemoteAdmin;
import org.apache.hadoop.hbase.util.ByteArrayHashKey;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Hash;
import org.apache.hadoop.hbase.util.MurmurHash;
import org.apache.hadoop.hbase.util.Pair;
@ -840,7 +841,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
// Below is make it so when Tests are all running in the one
// jvm, that they each have a differently seeded Random.
private static final Random randomSeed =
new Random(System.currentTimeMillis());
new Random(EnvironmentEdgeManager.currentTime());
private static long nextRandomSeed() {
return randomSeed.nextLong();
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.testclassification.RestTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
@ -155,14 +156,14 @@ public class TestRemoteAdminRetries {
}
private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
try {
callExecutor.run();
fail("should be timeout exception!");
} catch (IOException e) {
assertTrue(Pattern.matches(".*MyTable.*timed out", e.toString()));
}
assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
assertTrue((EnvironmentEdgeManager.currentTime() - start) > MAX_TIME);
}
private static interface CallExecutor {

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.testclassification.RestTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
@ -182,14 +183,14 @@ public class TestRemoteHTableRetries {
}
private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
try {
callExecutor.run();
fail("should be timeout exception!");
} catch (IOException e) {
assertTrue(Pattern.matches(".*request timed out", e.toString()));
}
assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
assertTrue((EnvironmentEdgeManager.currentTime() - start) > MAX_TIME);
}
private interface CallExecutor {

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.rest.RESTServlet;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RestTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.http.Header;
import org.apache.http.message.BasicHeader;
import org.junit.After;
@ -96,7 +97,7 @@ public class TestRemoteTable {
private static final byte[] VALUE_2 = Bytes.toBytes("testvalue2");
private static final long ONE_HOUR = 60 * 60 * 1000;
private static final long TS_2 = System.currentTimeMillis();
private static final long TS_2 = EnvironmentEdgeManager.currentTime();
private static final long TS_1 = TS_2 - ONE_HOUR;
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
@ -84,8 +85,8 @@ public class TestEnableRSGroups {
// wait RSGroupBasedLoadBalancer online
RSGroupBasedLoadBalancer loadBalancer =
(RSGroupBasedLoadBalancer) TEST_UTIL.getMiniHBaseCluster().getMaster().getLoadBalancer();
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start <= 60000 && !loadBalancer.isOnline()) {
long start = EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() - start <= 60000 && !loadBalancer.isOnline()) {
LOG.info("waiting for rsgroup load balancer onLine...");
sleep(200);
}

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@ -501,8 +502,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase {
AtomicBoolean changed = new AtomicBoolean(false);
Thread t1 = new Thread(() -> {
LOG.debug("thread1 start running, will recover region state");
long current = System.currentTimeMillis();
while (System.currentTimeMillis() - current <= 50000) {
long current = EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() - current <= 50000) {
List<RegionInfo> regions = master.getAssignmentManager().getRegionsOnServer(movedServer);
LOG.debug("server region size is:{}", regions.size());
assert regions.size() >= 1;
@ -602,8 +603,8 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase {
AtomicBoolean changed = new AtomicBoolean(false);
Thread t1 = new Thread(() -> {
LOG.info("thread1 start running, will recover region state");
long current = System.currentTimeMillis();
while (System.currentTimeMillis() - current <= 50000) {
long current = EnvironmentEdgeManager.currentTime();
while (EnvironmentEdgeManager.currentTime() - current <= 50000) {
List<RegionInfo> regions = master.getAssignmentManager().getRegionsOnServer(ss);
List<RegionInfo> tableRegions = new ArrayList<>();
for (RegionInfo regionInfo : regions) {
@ -662,9 +663,9 @@ public class TestRSGroupsAdmin2 extends TestRSGroupsBase {
}
return getTableRegionMap().get(tableName).size() >= tableRegionCount;
});
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName());
long timeTaken = System.currentTimeMillis() - startTime;
long timeTaken = EnvironmentEdgeManager.currentTime() - startTime;
String msg =
"Should not take mote than 15000 ms to move a table with 100 regions. Time taken ="
+ timeTaken + " ms";

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@ -62,8 +63,9 @@ public class HealthCheckChore extends ScheduledChore {
+ " number of times consecutively.");
}
// Always log health report.
LOG.info("Health status at " + StringUtils.formatTime(System.currentTimeMillis()) + " : "
+ report.getHealthReport());
LOG.info("Health status at " +
StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + " : " +
report.getHealthReport());
}
}
@ -73,9 +75,9 @@ public class HealthCheckChore extends ScheduledChore {
// First time we are seeing a failure. No need to stop, just
// record the time.
numTimesUnhealthy++;
startWindow = System.currentTimeMillis();
startWindow = EnvironmentEdgeManager.currentTime();
} else {
if ((System.currentTimeMillis() - startWindow) < failureWindow) {
if ((EnvironmentEdgeManager.currentTime() - startWindow) < failureWindow) {
numTimesUnhealthy++;
if (numTimesUnhealthy == threshold) {
stop = true;
@ -85,7 +87,7 @@ public class HealthCheckChore extends ScheduledChore {
} else {
// Outside of failure window, so we reset to 1.
numTimesUnhealthy = 1;
startWindow = System.currentTimeMillis();
startWindow = EnvironmentEdgeManager.currentTime();
stop = false;
}
}

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.regionserver.ShipperListener;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -1033,10 +1034,10 @@ public class HFileBlock implements Cacheable {
protected void finishBlockAndWriteHeaderAndData(DataOutputStream out)
throws IOException {
ensureBlockReady();
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size());
out.write(onDiskChecksum);
HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
}
/**
@ -1707,7 +1708,7 @@ public class HFileBlock implements Cacheable {
// checksums. Can change with circumstances. The below flag is whether the
// file has support for checksums (version 2+).
boolean checksumSupport = this.fileContext.isUseHBaseChecksum();
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
if (onDiskSizeWithHeader <= 0) {
// We were not passed the block size. Need to get it from the header. If header was
// not cached (see getCachedHeader above), need to seek to pull it in. This is costly
@ -1754,7 +1755,7 @@ public class HFileBlock implements Cacheable {
if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) {
return null;
}
long duration = System.currentTimeMillis() - startTime;
long duration = EnvironmentEdgeManager.currentTime() - startTime;
if (updateMetrics) {
HFile.updateReadLatency(duration, pread);
}

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.Writable;
import org.apache.yetus.audience.InterfaceAudience;
@ -212,9 +213,9 @@ public class HFileWriterImpl implements HFile.Writer {
throws IOException {
trailer.setFileInfoOffset(outputStream.getPos());
finishFileInfo();
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
fileInfo.write(out);
HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
}
/**
@ -837,9 +838,9 @@ public class HFileWriterImpl implements HFile.Writer {
trailer.setEntryCount(entryCount);
trailer.setCompressionCodec(hFileContext.getCompression());
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
trailer.serialize(outputStream);
HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
if (closeOutputStream) {
outputStream.close();

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@ -1074,7 +1075,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache {
long freedSumMb = 0;
int heavyEvictionCount = 0;
int freedDataOverheadPercent = 0;
long startTime = System.currentTimeMillis();
long startTime = EnvironmentEdgeManager.currentTime();
while (this.go) {
synchronized (this) {
try {
@ -1107,7 +1108,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache {
// This is should be almost the same time (+/- 10s)
// because we get comparable volumes of freed bytes each time.
// 10s because this is default period to run evict() (see above this.wait)
long stopTime = System.currentTimeMillis();
long stopTime = EnvironmentEdgeManager.currentTime();
if ((stopTime - startTime) > 1000 * 10 - 1) {
// Here we have to calc what situation we have got.
// We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit"

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -62,7 +63,7 @@ public final class PrefetchExecutor {
new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
String name = "hfile-prefetch-" + System.currentTimeMillis();
String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime();
Thread t = new Thread(r, name);
t.setDaemon(true);
return t;

View File

@ -25,15 +25,16 @@ import org.apache.hadoop.hbase.CallDroppedException;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.security.User;
import org.apache.hbase.thirdparty.com.google.protobuf.Message;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hbase.thirdparty.com.google.protobuf.Message;
/**
* The request processing logic, which is usually executed in thread pools provided by an
@ -101,7 +102,7 @@ public class CallRunner {
}
return;
}
call.setStartTime(System.currentTimeMillis());
call.setStartTime(EnvironmentEdgeManager.currentTime());
if (call.getStartTime() > call.getDeadline()) {
RpcServer.LOG.warn("Dropping timed out call: " + call);
return;

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.security.HBasePolicyProvider;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
@ -192,7 +193,7 @@ public class NettyRpcServer extends RpcServer {
MethodDescriptor md, Message param, CellScanner cellScanner,
long receiveTime, MonitoredRPCHandler status) throws IOException {
return call(service, md, param, cellScanner, receiveTime, status,
System.currentTimeMillis(), 0);
EnvironmentEdgeManager.currentTime(), 0);
}
@Override

View File

@ -30,6 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.nio.SingleByteBuff;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService;
import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hbase.thirdparty.com.google.protobuf.Message;
@ -116,7 +117,7 @@ class NettyServerRpcConnection extends ServerRpcConnection {
long size, final InetAddress remoteAddress, int timeout,
CallCleanup reqCleanup) {
return new NettyServerCall(id, service, md, header, param, cellScanner, this, size,
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.bbAllocator,
remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator,
this.rpcServer.cellBlockBuilder, reqCleanup);
}

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.GsonUtil;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.security.UserGroupInformation;
@ -395,7 +396,7 @@ public abstract class RpcServer implements RpcServerInterface,
Message result = call.getService().callBlockingMethod(md, controller, param);
long receiveTime = call.getReceiveTime();
long startTime = call.getStartTime();
long endTime = System.currentTimeMillis();
long endTime = EnvironmentEdgeManager.currentTime();
int processingTime = (int) (endTime - startTime);
int qTime = (int) (startTime - receiveTime);
int totalTime = (int) (endTime - receiveTime);

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
@ -422,7 +423,7 @@ public abstract class ServerCall<T extends ServerRpcConnection> implements RpcCa
@Override
public long disconnectSince() {
if (!this.connection.isConnectionOpen()) {
return System.currentTimeMillis() - receiveTime;
return EnvironmentEdgeManager.currentTime() - receiveTime;
} else {
return -1L;
}

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.security.HBasePolicyProvider;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.io.IOUtils;
@ -321,7 +322,7 @@ public class SimpleRpcServer extends RpcServer {
if (c == null) {
return;
}
c.setLastContact(System.currentTimeMillis());
c.setLastContact(EnvironmentEdgeManager.currentTime());
try {
count = c.readAndProcess();
} catch (InterruptedException ieo) {
@ -337,7 +338,7 @@ public class SimpleRpcServer extends RpcServer {
closeConnection(c);
c = null;
} else {
c.setLastContact(System.currentTimeMillis());
c.setLastContact(EnvironmentEdgeManager.currentTime());
}
}
@ -483,8 +484,8 @@ public class SimpleRpcServer extends RpcServer {
public Pair<Message, CellScanner> call(BlockingService service, MethodDescriptor md,
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status)
throws IOException {
return call(service, md, param, cellScanner, receiveTime, status, System.currentTimeMillis(),
0);
return call(service, md, param, cellScanner, receiveTime, status,
EnvironmentEdgeManager.currentTime(), 0);
}
@Override
@ -609,7 +610,8 @@ public class SimpleRpcServer extends RpcServer {
}
SimpleServerRpcConnection register(SocketChannel channel) {
SimpleServerRpcConnection connection = getConnection(channel, System.currentTimeMillis());
SimpleServerRpcConnection connection = getConnection(channel,
EnvironmentEdgeManager.currentTime());
add(connection);
if (LOG.isTraceEnabled()) {
LOG.trace("Connection from " + connection +
@ -640,7 +642,7 @@ public class SimpleRpcServer extends RpcServer {
// synch'ed to avoid explicit invocation upon OOM from colliding with
// timer task firing
synchronized void closeIdle(boolean scanAll) {
long minLastContact = System.currentTimeMillis() - maxIdleTime;
long minLastContact = EnvironmentEdgeManager.currentTime() - maxIdleTime;
// concurrent iterator might miss new connections added
// during the iteration, but that's ok because they won't
// be idle yet anyway and will be caught on next scan

View File

@ -29,9 +29,10 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.util.StringUtils;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Sends responses of RPC back to clients.
@ -162,7 +163,7 @@ class SimpleRpcServerResponder extends Thread {
* @return the time of the purge.
*/
private long purge(long lastPurgeTime) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTime();
if (now < lastPurgeTime + this.simpleRpcServer.purgeTimeout) {
return lastPurgeTime;
}
@ -247,7 +248,7 @@ class SimpleRpcServerResponder extends Thread {
return true;
} else {
// set the serve time when the response has to be sent later
conn.lastSentTime = System.currentTimeMillis();
conn.lastSentTime = EnvironmentEdgeManager.currentTime();
return false; // Socket can't take more, we will have to come back.
}
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.VersionInfoUtil;
import org.apache.hadoop.hbase.exceptions.RequestTooBigException;
import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService;
import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream;
import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
@ -209,7 +210,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
// Notify the client about the offending request
SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0,
null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0,
this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder);
this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
// Make sure the client recognizes the underlying exception
@ -327,7 +328,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
RequestHeader header, Message param, CellScanner cellScanner, long size,
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size,
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.bbAllocator,
remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator,
this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
}

View File

@ -193,6 +193,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HBaseFsck;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.IdLock;
@ -787,7 +788,7 @@ public class HMaster extends HRegionServer implements MasterServices {
*/
status.setStatus("Initializing Master file system");
this.masterActiveTime = System.currentTimeMillis();
this.masterActiveTime = EnvironmentEdgeManager.currentTime();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
// always initialize the MemStoreLAB as we use a region to store data in master now, see
@ -877,7 +878,7 @@ public class HMaster extends HRegionServer implements MasterServices {
// Start the Zombie master detector after setting master as active, see HBASE-21535
Thread zombieDetector = new Thread(new MasterInitializationMonitor(this),
"ActiveMasterInitializationMonitor-" + System.currentTimeMillis());
"ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());
zombieDetector.setDaemon(true);
zombieDetector.start();
@ -1039,8 +1040,8 @@ public class HMaster extends HRegionServer implements MasterServices {
status.markComplete("Initialization successful");
LOG.info(String.format("Master has completed initialization %.3fsec",
(System.currentTimeMillis() - masterActiveTime) / 1000.0f));
this.masterFinishedInitializationTime = System.currentTimeMillis();
(EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f));
this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
configurationManager.registerObserver(this.balancer);
configurationManager.registerObserver(this.cleanerPool);
configurationManager.registerObserver(this.hfileCleaner);
@ -1100,11 +1101,11 @@ public class HMaster extends HRegionServer implements MasterServices {
* After master has started up, lets do balancer post startup initialization. Since this runs
* in activeMasterManager thread, it should be fine.
*/
long start = System.currentTimeMillis();
long start = EnvironmentEdgeManager.currentTime();
this.balancer.postMasterStartupInitialize();
if (LOG.isDebugEnabled()) {
LOG.debug("Balancer post startup initialization complete, took " + (
(System.currentTimeMillis() - start) / 1000) + " seconds");
(EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
}
}
@ -1612,7 +1613,7 @@ public class HMaster extends HRegionServer implements MasterServices {
// Sleep to next balance plan start time
// But if there are zero regions in transition, it can skip sleep to speed up.
while (!interrupted && System.currentTimeMillis() < nextBalanceStartTime
while (!interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime
&& this.assignmentManager.getRegionStates().hasRegionsInTransition()) {
try {
Thread.sleep(100);
@ -1625,7 +1626,7 @@ public class HMaster extends HRegionServer implements MasterServices {
while (!interrupted
&& maxRegionsInTransition > 0
&& this.assignmentManager.getRegionStates().getRegionsInTransitionCount()
>= maxRegionsInTransition && System.currentTimeMillis() <= cutoffTime) {
>= maxRegionsInTransition && EnvironmentEdgeManager.currentTime() <= cutoffTime) {
try {
// sleep if the number of regions in transition exceeds the limit
Thread.sleep(100);
@ -1752,7 +1753,7 @@ public class HMaster extends HRegionServer implements MasterServices {
public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) {
List<RegionPlan> successRegionPlans = new ArrayList<>();
int maxRegionsInTransition = getMaxRegionsInTransition();
long balanceStartTime = System.currentTimeMillis();
long balanceStartTime = EnvironmentEdgeManager.currentTime();
long cutoffTime = balanceStartTime + this.maxBalancingTime;
int rpCount = 0; // number of RegionPlans balanced so far
if (plans != null && !plans.isEmpty()) {
@ -1782,7 +1783,7 @@ public class HMaster extends HRegionServer implements MasterServices {
// if performing next balance exceeds cutoff time, exit the loop
if (this.maxBalancingTime > 0 && rpCount < plans.size()
&& System.currentTimeMillis() > cutoffTime) {
&& EnvironmentEdgeManager.currentTime() > cutoffTime) {
// TODO: After balance, there should not be a cutoff time (keeping it as
// a security net for now)
LOG.debug("No more balancing till next balance run; maxBalanceTime="

Some files were not shown because too many files have changed in this diff Show More