HBASE-25911 Replace calls to System.currentTimeMillis with EnvironmentEdgeManager.currentTime (#3302)
We introduced EnvironmentEdgeManager as a way to inject alternate clocks for unit tests. In order for this to be effective, all callers that would otherwise use System.currentTimeMillis() must call EnvironmentEdgeManager.currentTime() instead, except the implementers of EnvironmentEdge. Signed-off-by: Bharath Vissapragada <bharathv@apache.org> Signed-off-by: Duo Zhang <zhangduo@apache.org> Signed-off-by: Viraj Jasani <vjasani@apache.org>
This commit is contained in:
parent
e9f595307a
commit
335305e0cf
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
|
|||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
||||
import org.apache.hadoop.hbase.procedure.ProcedureManagerHost;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -382,11 +383,11 @@ public class BackupManager implements Closeable {
|
|||
* @throws IOException if active session already exists
|
||||
*/
|
||||
public void startBackupSession() throws IOException {
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
long timeout = conf.getInt(BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY,
|
||||
DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT) * 1000L;
|
||||
long lastWarningOutputTime = 0;
|
||||
while (System.currentTimeMillis() - startTime < timeout) {
|
||||
while (EnvironmentEdgeManager.currentTime() - startTime < timeout) {
|
||||
try {
|
||||
systemTable.startBackupExclusiveOperation();
|
||||
return;
|
||||
|
@ -400,8 +401,8 @@ public class BackupManager implements Closeable {
|
|||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (lastWarningOutputTime == 0
|
||||
|| (System.currentTimeMillis() - lastWarningOutputTime) > 60000) {
|
||||
lastWarningOutputTime = System.currentTimeMillis();
|
||||
|| (EnvironmentEdgeManager.currentTime() - lastWarningOutputTime) > 60000) {
|
||||
lastWarningOutputTime = EnvironmentEdgeManager.currentTime();
|
||||
LOG.warn("Waiting to acquire backup exclusive lock for {}s",
|
||||
+(lastWarningOutputTime - startTime) / 1000);
|
||||
}
|
||||
|
|
|
@ -425,7 +425,7 @@ public class TestBackupBase {
|
|||
}
|
||||
|
||||
protected static void createTables() throws Exception {
|
||||
long tid = System.currentTimeMillis();
|
||||
long tid = EnvironmentEdgeManager.currentTime();
|
||||
table1 = TableName.valueOf("test-" + tid);
|
||||
Admin ha = TEST_UTIL.getAdmin();
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
|
|||
import org.apache.hadoop.hbase.backup.impl.BackupManager;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -96,17 +97,17 @@ public class TestBackupManager {
|
|||
public void run() {
|
||||
try {
|
||||
backupManager.startBackupSession();
|
||||
boolean result = startTimes.compareAndSet(0, 0, System.currentTimeMillis());
|
||||
boolean result = startTimes.compareAndSet(0, 0, EnvironmentEdgeManager.currentTime());
|
||||
if (!result) {
|
||||
result = startTimes.compareAndSet(1, 0, System.currentTimeMillis());
|
||||
result = startTimes.compareAndSet(1, 0, EnvironmentEdgeManager.currentTime());
|
||||
if (!result) {
|
||||
throw new IOException("PANIC! Unreachable code");
|
||||
}
|
||||
}
|
||||
Thread.sleep(sleepTime);
|
||||
result = stopTimes.compareAndSet(0, 0, System.currentTimeMillis());
|
||||
result = stopTimes.compareAndSet(0, 0, EnvironmentEdgeManager.currentTime());
|
||||
if (!result) {
|
||||
result = stopTimes.compareAndSet(1, 0, System.currentTimeMillis());
|
||||
result = stopTimes.compareAndSet(1, 0, EnvironmentEdgeManager.currentTime());
|
||||
if (!result) {
|
||||
throw new IOException("PANIC! Unreachable code");
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -520,8 +521,8 @@ public class TestBackupSystemTable {
|
|||
new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, new TableName[] {
|
||||
TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") },
|
||||
"/hbase/backup");
|
||||
ctxt.setStartTs(System.currentTimeMillis());
|
||||
ctxt.setCompleteTs(System.currentTimeMillis() + 1);
|
||||
ctxt.setStartTs(EnvironmentEdgeManager.currentTime());
|
||||
ctxt.setCompleteTs(EnvironmentEdgeManager.currentTime() + 1);
|
||||
return ctxt;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -298,7 +299,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
|
|||
@Override
|
||||
protected List<RegionPlan> balanceTable(TableName tableName,
|
||||
Map<ServerName, List<RegionInfo>> loadOfOneTable) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
// construct a Cluster object with clusterMap and rest of the
|
||||
// argument as defaults
|
||||
|
@ -482,7 +483,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
|
|||
balanceOverall(regionsToReturn, serverBalanceInfo, fetchFromTail, regionsToMove, max, min);
|
||||
}
|
||||
|
||||
long endTime = System.currentTimeMillis();
|
||||
long endTime = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
if (!regionsToMove.isEmpty() || neededRegions != 0) {
|
||||
// Emit data so can diagnose how balancer went astray.
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.RackManager;
|
|||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Triple;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -98,7 +99,8 @@ public class TestFavoredNodeAssignmentHelper {
|
|||
}
|
||||
});
|
||||
for (int i = 0; i < 40; i++) {
|
||||
ServerName server = ServerName.valueOf("foo" + i, 1234, System.currentTimeMillis());
|
||||
ServerName server = ServerName.valueOf("foo" + i, 1234,
|
||||
EnvironmentEdgeManager.currentTime());
|
||||
String rack = getRack(i);
|
||||
if (!rack.equals(RackManager.UNKNOWN_RACK)) {
|
||||
rackToServers.computeIfAbsent(rack, k -> new ArrayList<>()).add(server);
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.util.stream.Collectors;
|
|||
import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Strings;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
@ -136,7 +137,7 @@ public final class ServerMetricsBuilder {
|
|||
private final Map<byte[], RegionMetrics> regionStatus = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||
private final Map<byte[], UserMetrics> userMetrics = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||
private final Set<String> coprocessorNames = new TreeSet<>();
|
||||
private long reportTimestamp = System.currentTimeMillis();
|
||||
private long reportTimestamp = EnvironmentEdgeManager.currentTime();
|
||||
private long lastReportTimestamp = 0;
|
||||
private ServerMetricsBuilder(ServerName serverName) {
|
||||
this.serverName = serverName;
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.hbase.util.Addressing;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
|
||||
/**
|
||||
* The class that is able to determine some unique strings for the client,
|
||||
|
@ -45,7 +46,7 @@ final class ClientIdGenerator {
|
|||
byte[] selfBytes = getIpAddressBytes();
|
||||
Long pid = getPid();
|
||||
long tid = Thread.currentThread().getId();
|
||||
long ts = System.currentTimeMillis();
|
||||
long ts = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
byte[] id = new byte[selfBytes.length + ((pid != null ? 1 : 0) + 2) * Bytes.SIZEOF_LONG];
|
||||
int offset = Bytes.putBytes(id, 0, selfBytes, 0, selfBytes.length);
|
||||
|
|
|
@ -55,7 +55,7 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
* Specifying timestamps, deleteFamily and deleteColumns will delete all
|
||||
* versions with a timestamp less than or equal to that passed. If no
|
||||
* timestamp is specified, an entry is added with a timestamp of 'now'
|
||||
* where 'now' is the servers's System.currentTimeMillis().
|
||||
* where 'now' is the servers's EnvironmentEdgeManager.currentTime().
|
||||
* Specifying a timestamp to the deleteColumn method will
|
||||
* delete versions only with a timestamp equal to that specified.
|
||||
* If no timestamp is passed to deleteColumn, internally, it figures the
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
@ -48,7 +49,7 @@ public class RegionInfoBuilder {
|
|||
private final TableName tableName;
|
||||
private byte[] startKey = HConstants.EMPTY_START_ROW;
|
||||
private byte[] endKey = HConstants.EMPTY_END_ROW;
|
||||
private long regionId = System.currentTimeMillis();
|
||||
private long regionId = EnvironmentEdgeManager.currentTime();
|
||||
private int replicaId = RegionInfo.DEFAULT_REPLICA_ID;
|
||||
private boolean offLine = false;
|
||||
private boolean split = false;
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master;
|
|||
import java.util.Date;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.yetus.audience.InterfaceStability;
|
||||
|
||||
|
@ -188,11 +189,11 @@ public class RegionState {
|
|||
private long ritDuration;
|
||||
|
||||
public static RegionState createForTesting(RegionInfo region, State state) {
|
||||
return new RegionState(region, state, System.currentTimeMillis(), null);
|
||||
return new RegionState(region, state, EnvironmentEdgeManager.currentTime(), null);
|
||||
}
|
||||
|
||||
public RegionState(RegionInfo region, State state, ServerName serverName) {
|
||||
this(region, state, System.currentTimeMillis(), serverName);
|
||||
this(region, state, EnvironmentEdgeManager.currentTime(), serverName);
|
||||
}
|
||||
|
||||
public RegionState(RegionInfo region,
|
||||
|
@ -390,7 +391,7 @@ public class RegionState {
|
|||
* A slower (but more easy-to-read) stringification
|
||||
*/
|
||||
public String toDescriptiveString() {
|
||||
long relTime = System.currentTimeMillis() - stamp;
|
||||
long relTime = EnvironmentEdgeManager.currentTime() - stamp;
|
||||
return hri.getRegionNameAsString()
|
||||
+ " state=" + state
|
||||
+ ", ts=" + new Date(stamp) + " (" + (relTime/1000) + "s ago)"
|
||||
|
|
|
@ -127,9 +127,9 @@ public class SlowLogTableAccessor {
|
|||
}
|
||||
|
||||
/**
|
||||
* Create rowKey: currentTimeMillis APPEND slowLogPayload.hashcode
|
||||
* Create rowKey: currentTime APPEND slowLogPayload.hashcode
|
||||
* Scan on slowlog table should keep records with sorted order of time, however records
|
||||
* added at the very same time (currentTimeMillis) could be in random order.
|
||||
* added at the very same time could be in random order.
|
||||
*
|
||||
* @param slowLogPayload SlowLogPayload to process
|
||||
* @return rowKey byte[]
|
||||
|
@ -141,8 +141,8 @@ public class SlowLogTableAccessor {
|
|||
if (lastFiveDig.startsWith("-")) {
|
||||
lastFiveDig = String.valueOf(RANDOM.nextInt(99999));
|
||||
}
|
||||
final long currentTimeMillis = EnvironmentEdgeManager.currentTime();
|
||||
final String timeAndHashcode = currentTimeMillis + lastFiveDig;
|
||||
final long currentTime = EnvironmentEdgeManager.currentTime();
|
||||
final String timeAndHashcode = currentTime + lastFiveDig;
|
||||
final long rowKeyLong = Long.parseLong(timeAndHashcode);
|
||||
return Bytes.toBytes(rowKeyLong);
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
|
|||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -53,7 +54,7 @@ public class TestAsyncConnectionTracing {
|
|||
private static Configuration CONF = HBaseConfiguration.create();
|
||||
|
||||
private ServerName masterServer =
|
||||
ServerName.valueOf("localhost", 12345, System.currentTimeMillis());
|
||||
ServerName.valueOf("localhost", 12345, EnvironmentEdgeManager.currentTime());
|
||||
|
||||
private AsyncConnection conn;
|
||||
|
||||
|
@ -87,7 +88,8 @@ public class TestAsyncConnectionTracing {
|
|||
.filter(s -> s.getName().equals("AsyncConnection." + methodName)).findFirst().get();
|
||||
assertEquals(StatusCode.OK, data.getStatus().getStatusCode());
|
||||
if (serverName != null) {
|
||||
assertEquals(serverName.getServerName(), data.getAttributes().get(TraceUtil.SERVER_NAME_KEY));
|
||||
assertEquals(serverName.getServerName(),
|
||||
data.getAttributes().get(TraceUtil.SERVER_NAME_KEY));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,7 +101,8 @@ public class TestAsyncConnectionTracing {
|
|||
|
||||
@Test
|
||||
public void testHbckWithServerName() throws IOException {
|
||||
ServerName serverName = ServerName.valueOf("localhost", 23456, System.currentTimeMillis());
|
||||
ServerName serverName = ServerName.valueOf("localhost", 23456,
|
||||
EnvironmentEdgeManager.currentTime());
|
||||
conn.getHbck(serverName);
|
||||
assertTrace("getHbck", serverName);
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
|
|||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -69,11 +70,11 @@ public class TestAsyncRegionLocatorTracing {
|
|||
RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
|
||||
locs = new RegionLocations(
|
||||
new HRegionLocation(metaRegionInfo,
|
||||
ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis())),
|
||||
ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())),
|
||||
new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 1),
|
||||
ServerName.valueOf("127.0.0.2", 12345, System.currentTimeMillis())),
|
||||
ServerName.valueOf("127.0.0.2", 12345, EnvironmentEdgeManager.currentTime())),
|
||||
new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 2),
|
||||
ServerName.valueOf("127.0.0.3", 12345, System.currentTimeMillis())));
|
||||
ServerName.valueOf("127.0.0.3", 12345, EnvironmentEdgeManager.currentTime())));
|
||||
conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF) {
|
||||
|
||||
@Override
|
||||
|
@ -103,7 +104,8 @@ public class TestAsyncRegionLocatorTracing {
|
|||
|
||||
@Test
|
||||
public void testClearCacheServerName() {
|
||||
ServerName sn = ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis());
|
||||
ServerName sn = ServerName.valueOf("127.0.0.1", 12345,
|
||||
EnvironmentEdgeManager.currentTime());
|
||||
conn.getLocator().clearCache(sn);
|
||||
SpanData span = waitSpan("AsyncRegionLocator.clearCache");
|
||||
assertEquals(StatusCode.OK, span.getStatus().getStatusCode());
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
|||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.MD5Hash;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Rule;
|
||||
|
@ -198,7 +199,7 @@ public class TestRegionInfoBuilder {
|
|||
public void testParseName() throws IOException {
|
||||
final TableName tableName = name.getTableName();
|
||||
byte[] startKey = Bytes.toBytes("startKey");
|
||||
long regionId = System.currentTimeMillis();
|
||||
long regionId = EnvironmentEdgeManager.currentTime();
|
||||
int replicaId = 42;
|
||||
|
||||
// test without replicaId
|
||||
|
@ -228,7 +229,7 @@ public class TestRegionInfoBuilder {
|
|||
byte[] startKey = Bytes.toBytes("startKey");
|
||||
byte[] endKey = Bytes.toBytes("endKey");
|
||||
boolean split = false;
|
||||
long regionId = System.currentTimeMillis();
|
||||
long regionId = EnvironmentEdgeManager.currentTime();
|
||||
int replicaId = 42;
|
||||
|
||||
RegionInfo ri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey)
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.master.RegionState;
|
|||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.Assert;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Rule;
|
||||
|
@ -56,7 +57,7 @@ public class TestRegionInfoDisplay {
|
|||
.setStartKey(startKey)
|
||||
.setEndKey(endKey)
|
||||
.setSplit(false)
|
||||
.setRegionId(System.currentTimeMillis())
|
||||
.setRegionId(EnvironmentEdgeManager.currentTime())
|
||||
.setReplicaId(1).build();
|
||||
checkEquality(ri, conf);
|
||||
Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY,
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.apache.hadoop.hbase;
|
|||
import com.google.errorprone.annotations.RestrictedApi;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -172,7 +174,7 @@ public abstract class ScheduledChore implements Runnable {
|
|||
*/
|
||||
private synchronized void updateTimeTrackingBeforeRun() {
|
||||
timeOfLastRun = timeOfThisRun;
|
||||
timeOfThisRun = System.currentTimeMillis();
|
||||
timeOfThisRun = EnvironmentEdgeManager.currentTime();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -215,7 +217,7 @@ public abstract class ScheduledChore implements Runnable {
|
|||
* @return true if time is earlier or equal to current milli time
|
||||
*/
|
||||
private synchronized boolean isValidTime(final long time) {
|
||||
return time > 0 && time <= System.currentTimeMillis();
|
||||
return time > 0 && time <= EnvironmentEdgeManager.currentTime();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -42,7 +42,7 @@ public class ThrottledInputStream extends InputStream {
|
|||
|
||||
private final InputStream rawStream;
|
||||
private final long maxBytesPerSec;
|
||||
private final long startTime = System.currentTimeMillis();
|
||||
private final long startTime = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
private long bytesRead = 0;
|
||||
private long totalSleepTime = 0;
|
||||
|
@ -164,7 +164,7 @@ public class ThrottledInputStream extends InputStream {
|
|||
* @return Read rate, in bytes/sec.
|
||||
*/
|
||||
public long getBytesPerSec() {
|
||||
long elapsed = (System.currentTimeMillis() - startTime) / 1000;
|
||||
long elapsed = (EnvironmentEdgeManager.currentTime() - startTime) / 1000;
|
||||
if (elapsed == 0) {
|
||||
return bytesRead;
|
||||
} else {
|
||||
|
|
|
@ -174,7 +174,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
|
|||
for (Path path : FileUtil.stat2Paths(fileStatuses)) { // for each file that match the pattern
|
||||
if (fs.isFile(path)) { // only process files, skip for directories
|
||||
File dst = new File(parentDirStr, "." + pathPrefix + "."
|
||||
+ path.getName() + "." + System.currentTimeMillis() + ".jar");
|
||||
+ path.getName() + "." + EnvironmentEdgeManager.currentTime() + ".jar");
|
||||
fs.copyToLocalFile(path, new Path(dst.toString()));
|
||||
dst.deleteOnExit();
|
||||
|
||||
|
@ -188,7 +188,7 @@ public class CoprocessorClassLoader extends ClassLoaderBase {
|
|||
Matcher m = libJarPattern.matcher(entry.getName());
|
||||
if (m.matches()) {
|
||||
File file = new File(parentDirStr, "." + pathPrefix + "."
|
||||
+ path.getName() + "." + System.currentTimeMillis() + "." + m.group(1));
|
||||
+ path.getName() + "." + EnvironmentEdgeManager.currentTime() + "." + m.group(1));
|
||||
try (FileOutputStream outStream = new FileOutputStream(file)) {
|
||||
IOUtils.copyBytes(jarFile.getInputStream(entry),
|
||||
outStream, conf, true);
|
||||
|
|
|
@ -129,7 +129,7 @@ public class IdLock {
|
|||
Thread currentThread = Thread.currentThread();
|
||||
Entry entry = new Entry(id, currentThread);
|
||||
Entry existing;
|
||||
long waitUtilTS = System.currentTimeMillis() + time;
|
||||
long waitUtilTS = EnvironmentEdgeManager.currentTime() + time;
|
||||
long remaining = time;
|
||||
while ((existing = map.putIfAbsent(entry.id, entry)) != null) {
|
||||
synchronized (existing) {
|
||||
|
@ -139,7 +139,7 @@ public class IdLock {
|
|||
while (existing.locked) {
|
||||
existing.wait(remaining);
|
||||
if (existing.locked) {
|
||||
long currentTS = System.currentTimeMillis();
|
||||
long currentTS = EnvironmentEdgeManager.currentTime();
|
||||
if (currentTS >= waitUtilTS) {
|
||||
// time is up
|
||||
return null;
|
||||
|
|
|
@ -108,7 +108,7 @@ public class Random64 {
|
|||
final int precision = 100000;
|
||||
final long totalTestCnt = defaultTotalTestCnt + precision;
|
||||
final int reportPeriod = 100 * precision;
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final long startTime = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
System.out.println("Do collision test, totalTestCnt=" + totalTestCnt);
|
||||
|
||||
|
@ -130,7 +130,7 @@ public class Random64 {
|
|||
}
|
||||
|
||||
if (cnt % reportPeriod == 0) {
|
||||
long cost = System.currentTimeMillis() - startTime;
|
||||
long cost = EnvironmentEdgeManager.currentTime() - startTime;
|
||||
long remainingMs = (long) (1.0 * (totalTestCnt - cnt) * cost / cnt);
|
||||
System.out.println(
|
||||
String.format(
|
||||
|
|
|
@ -122,7 +122,7 @@ public class ReflectionUtils {
|
|||
boolean dumpStack = false;
|
||||
if (log.isInfoEnabled()) {
|
||||
synchronized (ReflectionUtils.class) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
if (now - previousLogTime >= minInterval * 1000) {
|
||||
previousLogTime = now;
|
||||
dumpStack = true;
|
||||
|
|
|
@ -71,7 +71,7 @@ public class Sleeper {
|
|||
if (this.stopper.isStopped()) {
|
||||
return;
|
||||
}
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
long currentSleepTime = sleepTime;
|
||||
while (currentSleepTime > 0) {
|
||||
long woke = -1;
|
||||
|
@ -83,7 +83,7 @@ public class Sleeper {
|
|||
|
||||
sleepLock.wait(currentSleepTime);
|
||||
}
|
||||
woke = System.currentTimeMillis();
|
||||
woke = EnvironmentEdgeManager.currentTime();
|
||||
long slept = woke - now;
|
||||
if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) {
|
||||
LOG.warn("We slept {}ms instead of {}ms, this is likely due to a long " +
|
||||
|
@ -98,7 +98,7 @@ public class Sleeper {
|
|||
}
|
||||
}
|
||||
// Recalculate waitTime.
|
||||
woke = (woke == -1)? System.currentTimeMillis(): woke;
|
||||
woke = (woke == -1)? EnvironmentEdgeManager.currentTime() : woke;
|
||||
currentSleepTime = this.period - (woke - now);
|
||||
}
|
||||
synchronized(sleepLock) {
|
||||
|
|
|
@ -150,7 +150,7 @@ public class Threads {
|
|||
* @param msToWait the amount of time to sleep in milliseconds
|
||||
*/
|
||||
public static void sleepWithoutInterrupt(final long msToWait) {
|
||||
long timeMillis = System.currentTimeMillis();
|
||||
long timeMillis = EnvironmentEdgeManager.currentTime();
|
||||
long endTime = timeMillis + msToWait;
|
||||
boolean interrupted = false;
|
||||
while (timeMillis < endTime) {
|
||||
|
@ -159,7 +159,7 @@ public class Threads {
|
|||
} catch (InterruptedException ex) {
|
||||
interrupted = true;
|
||||
}
|
||||
timeMillis = System.currentTimeMillis();
|
||||
timeMillis = EnvironmentEdgeManager.currentTime();
|
||||
}
|
||||
|
||||
if (interrupted) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.KeyValue.Type;
|
|||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
@ -139,7 +140,7 @@ public class TestCellComparator {
|
|||
*/
|
||||
@Test
|
||||
public void testMetaComparisons() throws Exception {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
// Meta compares
|
||||
Cell aaa = createByteBufferKeyValueFromKeyValue(new KeyValue(
|
||||
|
@ -176,7 +177,7 @@ public class TestCellComparator {
|
|||
*/
|
||||
@Test
|
||||
public void testMetaComparisons2() {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
CellComparator c = MetaCellComparator.META_COMPARATOR;
|
||||
assertTrue(c.compare(createByteBufferKeyValueFromKeyValue(new KeyValue(
|
||||
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)),
|
||||
|
|
|
@ -39,6 +39,7 @@ import java.util.TreeSet;
|
|||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
@ -144,7 +145,7 @@ public class TestKeyValue {
|
|||
|
||||
@Test
|
||||
public void testMoreComparisons() {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
// Meta compares
|
||||
KeyValue aaa = new KeyValue(
|
||||
|
@ -174,7 +175,7 @@ public class TestKeyValue {
|
|||
@Test
|
||||
public void testMetaComparatorTableKeysWithCommaOk() {
|
||||
CellComparator c = MetaCellComparator.META_COMPARATOR;
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
// meta keys values are not quite right. A users can enter illegal values
|
||||
// from shell when scanning meta.
|
||||
KeyValue a = new KeyValue(Bytes.toBytes("table,key,with,commas1,1234"), now);
|
||||
|
@ -204,7 +205,7 @@ public class TestKeyValue {
|
|||
}
|
||||
|
||||
private void metacomparisons(final CellComparatorImpl c) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
assertTrue(c.compare(new KeyValue(
|
||||
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now),
|
||||
new KeyValue(
|
||||
|
@ -221,7 +222,7 @@ public class TestKeyValue {
|
|||
}
|
||||
|
||||
private void comparisons(final CellComparatorImpl c) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
assertTrue(c.compare(new KeyValue(
|
||||
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now),
|
||||
new KeyValue(
|
||||
|
@ -520,7 +521,7 @@ public class TestKeyValue {
|
|||
@Test
|
||||
public void testMetaKeyComparator() {
|
||||
CellComparator c = MetaCellComparator.META_COMPARATOR;
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
KeyValue a = new KeyValue(Bytes.toBytes("table1"), now);
|
||||
KeyValue b = new KeyValue(Bytes.toBytes("table2"), now);
|
||||
|
@ -589,12 +590,12 @@ public class TestKeyValue {
|
|||
new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
|
||||
Bytes.toBytes("2")),
|
||||
new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
|
||||
System.currentTimeMillis(), Bytes.toBytes("2"),
|
||||
EnvironmentEdgeManager.currentTime(), Bytes.toBytes("2"),
|
||||
new Tag[] {
|
||||
new ArrayBackedTag((byte) 120, "tagA"),
|
||||
new ArrayBackedTag((byte) 121, Bytes.toBytes("tagB")) }),
|
||||
new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"),
|
||||
System.currentTimeMillis(), Bytes.toBytes("2"),
|
||||
EnvironmentEdgeManager.currentTime(), Bytes.toBytes("2"),
|
||||
new Tag[] { new ArrayBackedTag((byte) 0, "tagA") }),
|
||||
new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes(""),
|
||||
Bytes.toBytes("1")) };
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.junit.Assert.fail;
|
|||
import java.text.MessageFormat;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -171,7 +172,7 @@ public final class Waiter {
|
|||
*/
|
||||
public static <E extends Exception> long waitFor(Configuration conf, long timeout, long interval,
|
||||
boolean failIfTimeout, Predicate<E> predicate) {
|
||||
long started = System.currentTimeMillis();
|
||||
long started = EnvironmentEdgeManager.currentTime();
|
||||
long adjustedTimeout = (long) (getWaitForRatio(conf) * timeout);
|
||||
long mustEnd = started + adjustedTimeout;
|
||||
long remainderWait;
|
||||
|
@ -183,7 +184,7 @@ public final class Waiter {
|
|||
LOG.info(MessageFormat.format("Waiting up to [{0}] milli-secs(wait.for.ratio=[{1}])",
|
||||
adjustedTimeout, getWaitForRatio(conf)));
|
||||
while (!(eval = predicate.evaluate())
|
||||
&& (remainderWait = mustEnd - System.currentTimeMillis()) > 0) {
|
||||
&& (remainderWait = mustEnd - EnvironmentEdgeManager.currentTime()) > 0) {
|
||||
try {
|
||||
// handle tail case when remainder wait is less than one interval
|
||||
sleepInterval = Math.min(remainderWait, interval);
|
||||
|
@ -197,7 +198,7 @@ public final class Waiter {
|
|||
if (!eval) {
|
||||
if (interrupted) {
|
||||
LOG.warn(MessageFormat.format("Waiting interrupted after [{0}] msec",
|
||||
System.currentTimeMillis() - started));
|
||||
EnvironmentEdgeManager.currentTime() - started));
|
||||
} else if (failIfTimeout) {
|
||||
String msg = getExplanation(predicate);
|
||||
fail(MessageFormat
|
||||
|
@ -208,7 +209,7 @@ public final class Waiter {
|
|||
MessageFormat.format("Waiting timed out after [{0}] msec", adjustedTimeout) + msg);
|
||||
}
|
||||
}
|
||||
return (eval || interrupted) ? (System.currentTimeMillis() - started) : -1;
|
||||
return (eval || interrupted) ? (EnvironmentEdgeManager.currentTime() - started) : -1;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ import org.junit.experimental.categories.Category;
|
|||
@Category({ MiscTests.class, SmallTests.class })
|
||||
public class TestByteBufferArray {
|
||||
|
||||
private static final Random RANDOM = new Random(System.currentTimeMillis());
|
||||
private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTime());
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
|
|
|
@ -359,7 +359,7 @@ public class TestBytes extends TestCase {
|
|||
|
||||
public void testToStringBytesBinaryReversible() {
|
||||
// let's run test with 1000 randomly generated byte arrays
|
||||
Random rand = new Random(System.currentTimeMillis());
|
||||
Random rand = new Random(EnvironmentEdgeManager.currentTime());
|
||||
byte[] randomBytes = new byte[1000];
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
rand.nextBytes(randomBytes);
|
||||
|
|
|
@ -56,7 +56,7 @@ public class TestThreads {
|
|||
});
|
||||
LOG.debug("Starting sleeper thread (" + SLEEP_TIME_MS + " ms)");
|
||||
sleeper.start();
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
LOG.debug("Main thread: sleeping for 200 ms");
|
||||
Threads.sleep(200);
|
||||
|
||||
|
@ -75,7 +75,7 @@ public class TestThreads {
|
|||
assertTrue("sleepWithoutInterrupt did not preserve the thread's " +
|
||||
"interrupted status", wasInterrupted.get());
|
||||
|
||||
long timeElapsed = System.currentTimeMillis() - startTime;
|
||||
long timeElapsed = EnvironmentEdgeManager.currentTime() - startTime;
|
||||
// We expect to wait at least SLEEP_TIME_MS, but we can wait more if there is a GC.
|
||||
assertTrue("Elapsed time " + timeElapsed + " ms is out of the expected " +
|
||||
" sleep time of " + SLEEP_TIME_MS, SLEEP_TIME_MS - timeElapsed < TOLERANCE_MS);
|
||||
|
|
|
@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
|
|||
import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -105,7 +106,7 @@ public class TestSecureExport {
|
|||
private static final byte[] ROW3 = Bytes.toBytes("row3");
|
||||
private static final byte[] QUAL = Bytes.toBytes("qual");
|
||||
private static final String LOCALHOST = "localhost";
|
||||
private static final long NOW = System.currentTimeMillis();
|
||||
private static final long NOW = EnvironmentEdgeManager.currentTime();
|
||||
// user granted with all global permission
|
||||
private static final String USER_ADMIN = "admin";
|
||||
// user is table owner. will have all permissions on table
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.metrics.Counter;
|
|||
import org.apache.hadoop.hbase.metrics.Gauge;
|
||||
import org.apache.hadoop.hbase.metrics.MetricRegistry;
|
||||
import org.apache.hadoop.hbase.metrics.Timer;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -77,14 +78,14 @@ public class ExampleMasterObserverWithMetrics implements MasterCoprocessor, Mast
|
|||
TableDescriptor desc, RegionInfo[] regions) throws IOException {
|
||||
// we rely on the fact that there is only 1 instance of our MasterObserver. We keep track of
|
||||
// when the operation starts before the operation is executing.
|
||||
this.createTableStartTime = System.currentTimeMillis();
|
||||
this.createTableStartTime = EnvironmentEdgeManager.currentTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
TableDescriptor desc, RegionInfo[] regions) throws IOException {
|
||||
if (this.createTableStartTime > 0) {
|
||||
long time = System.currentTimeMillis() - this.createTableStartTime;
|
||||
long time = EnvironmentEdgeManager.currentTime() - this.createTableStartTime;
|
||||
LOG.info("Create table took: " + time);
|
||||
|
||||
// Update the timer metric for the create table operation duration.
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.regionserver.Store;
|
|||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.math.IntMath;
|
||||
|
@ -225,7 +226,7 @@ public class WriteHeavyIncrementObserver implements RegionCoprocessor, RegionObs
|
|||
private long getUniqueTimestamp(byte[] row) {
|
||||
int slot = Bytes.hashCode(row) & mask;
|
||||
MutableLong lastTimestamp = lastTimestamps[slot];
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
synchronized (lastTimestamp) {
|
||||
long pt = lastTimestamp.longValue() >> 10;
|
||||
if (now > pt) {
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
|||
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.ZooDefs;
|
||||
|
@ -112,7 +113,7 @@ public class TestZooKeeperScanPolicyObserver {
|
|||
|
||||
@Test
|
||||
public void test() throws IOException, KeeperException, InterruptedException {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
put(0, 100, now - 10000);
|
||||
assertValueEquals(0, 100);
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress;
|
|||
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
|
||||
import org.apache.hadoop.hbase.hbtop.terminal.impl.TerminalImpl;
|
||||
import org.apache.hadoop.hbase.hbtop.terminal.impl.batch.BatchTerminal;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -98,7 +99,7 @@ public class Screen implements Closeable {
|
|||
nextScreenView = currentScreenView.handleKeyPress(keyPress);
|
||||
} else {
|
||||
if (timerTimestamp != null) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
if (timerTimestamp <= now) {
|
||||
// Dispatch the timer to the current screen
|
||||
timerTimestamp = null;
|
||||
|
@ -131,7 +132,7 @@ public class Screen implements Closeable {
|
|||
}
|
||||
|
||||
public void setTimer(long delay) {
|
||||
timerTimestamp = System.currentTimeMillis() + delay;
|
||||
timerTimestamp = EnvironmentEdgeManager.currentTime() + delay;
|
||||
}
|
||||
|
||||
public void cancelTimer() {
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo;
|
|||
import org.apache.hadoop.hbase.hbtop.field.FieldValue;
|
||||
import org.apache.hadoop.hbase.hbtop.mode.DrillDownInfo;
|
||||
import org.apache.hadoop.hbase.hbtop.mode.Mode;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -146,7 +147,7 @@ public class TopScreenModel {
|
|||
|
||||
private void refreshSummary(ClusterMetrics clusterMetrics) {
|
||||
String currentTime = ISO_8601_EXTENDED_TIME_FORMAT
|
||||
.format(System.currentTimeMillis());
|
||||
.format(EnvironmentEdgeManager.currentTime());
|
||||
String version = clusterMetrics.getHBaseVersion();
|
||||
String clusterId = clusterMetrics.getClusterId();
|
||||
int liveServers = clusterMetrics.getLiveServerMetrics().size();
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.hbtop.screen.help.HelpScreenView;
|
|||
import org.apache.hadoop.hbase.hbtop.screen.mode.ModeScreenView;
|
||||
import org.apache.hadoop.hbase.hbtop.terminal.Terminal;
|
||||
import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
||||
|
@ -92,7 +93,7 @@ public class TopScreenPresenter {
|
|||
|
||||
public long refresh(boolean force) {
|
||||
if (!force) {
|
||||
long delay = System.currentTimeMillis() - lastRefreshTimestamp;
|
||||
long delay = EnvironmentEdgeManager.currentTime() - lastRefreshTimestamp;
|
||||
if (delay < refreshDelay.get()) {
|
||||
return refreshDelay.get() - delay;
|
||||
}
|
||||
|
@ -114,7 +115,7 @@ public class TopScreenPresenter {
|
|||
|
||||
topScreenView.refreshTerminal();
|
||||
|
||||
lastRefreshTimestamp = System.currentTimeMillis();
|
||||
lastRefreshTimestamp = EnvironmentEdgeManager.currentTime();
|
||||
iterations++;
|
||||
return refreshDelay.get();
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import javax.servlet.ServletResponse;
|
|||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
|
@ -43,7 +43,7 @@ public class NoCacheFilter implements Filter {
|
|||
throws IOException, ServletException {
|
||||
HttpServletResponse httpRes = (HttpServletResponse) res;
|
||||
httpRes.setHeader("Cache-Control", "no-cache");
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
httpRes.addDateHeader("Expires", now);
|
||||
httpRes.addDateHeader("Date", now);
|
||||
httpRes.addHeader("Pragma", "no-cache");
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.zookeeper.AsyncCallback;
|
||||
|
@ -118,9 +119,9 @@ public class ChaosZKClient {
|
|||
CreateMode.EPHEMERAL_SEQUENTIAL,
|
||||
submitTaskCallback,
|
||||
taskObject);
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
while ((System.currentTimeMillis() - start) < TASK_EXECUTION_TIMEOUT) {
|
||||
while ((EnvironmentEdgeManager.currentTime() - start) < TASK_EXECUTION_TIMEOUT) {
|
||||
if(taskStatus != null) {
|
||||
return taskStatus;
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
|
|||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
@ -235,9 +236,9 @@ public class DistributedHBaseCluster extends HBaseCluster {
|
|||
private void waitForServiceToStop(ServiceType service, ServerName serverName, long timeout)
|
||||
throws IOException {
|
||||
LOG.info("Waiting for service: {} to stop: {}", service, serverName.getServerName());
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
while ((System.currentTimeMillis() - start) < timeout) {
|
||||
while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
|
||||
if (!clusterManager.isRunning(service, serverName.getHostname(), serverName.getPort())) {
|
||||
return;
|
||||
}
|
||||
|
@ -249,9 +250,9 @@ public class DistributedHBaseCluster extends HBaseCluster {
|
|||
private void waitForServiceToStart(ServiceType service, ServerName serverName, long timeout)
|
||||
throws IOException {
|
||||
LOG.info("Waiting for service: {} to start: ", service, serverName.getServerName());
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
while ((System.currentTimeMillis() - start) < timeout) {
|
||||
while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
|
||||
if (clusterManager.isRunning(service, serverName.getHostname(), serverName.getPort())) {
|
||||
return;
|
||||
}
|
||||
|
@ -285,8 +286,8 @@ public class DistributedHBaseCluster extends HBaseCluster {
|
|||
|
||||
@Override
|
||||
public boolean waitForActiveAndReadyMaster(long timeout) throws IOException {
|
||||
long start = System.currentTimeMillis();
|
||||
while (System.currentTimeMillis() - start < timeout) {
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
while (EnvironmentEdgeManager.currentTime() - start < timeout) {
|
||||
try {
|
||||
connection.getAdmin().getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION));
|
||||
return true;
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||
|
@ -215,11 +216,11 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase {
|
|||
ColumnFamilyDescriptor[] columns = new ColumnFamilyDescriptor[] { cbuilder.build() };
|
||||
LOG.info("Creating table {} with {} splits.", tableName,
|
||||
regionsCountPerServer * regionServerCount);
|
||||
startTime = System.currentTimeMillis();
|
||||
startTime = EnvironmentEdgeManager.currentTime();
|
||||
HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), desc, columns,
|
||||
regionsCountPerServer);
|
||||
util.waitTableAvailable(tableName);
|
||||
endTime = System.currentTimeMillis();
|
||||
endTime = EnvironmentEdgeManager.currentTime();
|
||||
LOG.info("Pre-split table created successfully in {}ms.", (endTime - startTime));
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Set;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.HFileTestUtil;
|
||||
import org.apache.hadoop.hbase.util.LoadTestTool;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
|
@ -162,15 +163,15 @@ public class IntegrationTestIngest extends IntegrationTestBase {
|
|||
LOG.info("Cluster size:" + util.getHBaseClusterInterface()
|
||||
.getClusterMetrics().getLiveServerMetrics().size());
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
|
||||
long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
|
||||
long startKey = 0;
|
||||
|
||||
long numKeys = getNumKeys(keysPerServerPerIter);
|
||||
while (System.currentTimeMillis() - start < 0.9 * runtime) {
|
||||
while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) {
|
||||
LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
|
||||
((runtime - (System.currentTimeMillis() - start))/60000) + " min");
|
||||
((runtime - (EnvironmentEdgeManager.currentTime() - start))/60000) + " min");
|
||||
|
||||
int ret = -1;
|
||||
ret = loadTool.run(getArgsForLoadTestTool("-write",
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
|
|||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.RegionSplitter;
|
||||
import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
|
||||
import org.junit.After;
|
||||
|
@ -126,11 +127,11 @@ public class IntegrationTestManyRegions {
|
|||
byte[][] splits = algo.split(REGION_COUNT);
|
||||
|
||||
LOG.info(String.format("Creating table %s with %d splits.", TABLE_NAME, REGION_COUNT));
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
try {
|
||||
admin.createTable(tableDescriptor, splits);
|
||||
LOG.info(String.format("Pre-split table created successfully in %dms.",
|
||||
(System.currentTimeMillis() - startTime)));
|
||||
(EnvironmentEdgeManager.currentTime() - startTime)));
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to create table", e);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
||||
import org.apache.hadoop.hbase.util.ConstantDelayQueue;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.LoadTestTool;
|
||||
import org.apache.hadoop.hbase.util.MultiThreadedUpdater;
|
||||
import org.apache.hadoop.hbase.util.MultiThreadedWriter;
|
||||
|
@ -163,15 +164,15 @@ public class IntegrationTestRegionReplicaReplication extends IntegrationTestInge
|
|||
getConf().getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs",
|
||||
5000) + 1000);
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
|
||||
long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
|
||||
long startKey = 0;
|
||||
|
||||
long numKeys = getNumKeys(keysPerServerPerIter);
|
||||
while (System.currentTimeMillis() - start < 0.9 * runtime) {
|
||||
while (EnvironmentEdgeManager.currentTime() - start < 0.9 * runtime) {
|
||||
LOG.info("Intended run time: " + (runtime/60000) + " min, left:" +
|
||||
((runtime - (System.currentTimeMillis() - start))/60000) + " min");
|
||||
((runtime - (EnvironmentEdgeManager.currentTime() - start))/60000) + " min");
|
||||
|
||||
int verifyPercent = 100;
|
||||
int updatePercent = 20;
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.regionserver.StripeStoreConfig;
|
|||
import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
|
||||
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.LoadTestKVGenerator;
|
||||
import org.apache.hadoop.hbase.util.MultiThreadedAction;
|
||||
import org.apache.hadoop.hbase.util.MultiThreadedReader;
|
||||
|
@ -206,14 +207,14 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
|
|||
|
||||
if (preloadKeys > 0) {
|
||||
MultiThreadedWriter preloader = new MultiThreadedWriter(dataGen, conf, TABLE_NAME);
|
||||
long time = System.currentTimeMillis();
|
||||
long time = EnvironmentEdgeManager.currentTime();
|
||||
preloader.start(0, startKey, writeThreads);
|
||||
preloader.waitForFinish();
|
||||
if (preloader.getNumWriteFailures() > 0) {
|
||||
throw new IOException("Preload failed");
|
||||
}
|
||||
int waitTime = (int)Math.min(preloadKeys / 100, 30000); // arbitrary
|
||||
status(description + " preload took " + (System.currentTimeMillis()-time)/1000
|
||||
status(description + " preload took " + (EnvironmentEdgeManager.currentTime()-time)/1000
|
||||
+ "sec; sleeping for " + waitTime/1000 + "sec for store to stabilize");
|
||||
Thread.sleep(waitTime);
|
||||
}
|
||||
|
@ -223,7 +224,7 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
|
|||
// reader.getMetrics().enable();
|
||||
reader.linkToWriter(writer);
|
||||
|
||||
long testStartTime = System.currentTimeMillis();
|
||||
long testStartTime = EnvironmentEdgeManager.currentTime();
|
||||
writer.start(startKey, endKey, writeThreads);
|
||||
reader.start(startKey, endKey, readThreads);
|
||||
writer.waitForFinish();
|
||||
|
@ -257,7 +258,8 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
|
|||
}
|
||||
}
|
||||
LOG.info("Performance data dump for " + description + " test: \n" + perfDump.toString());*/
|
||||
status(description + " test took " + (System.currentTimeMillis()-testStartTime)/1000 + "sec");
|
||||
status(description + " test took " +
|
||||
(EnvironmentEdgeManager.currentTime() - testStartTime) / 1000 + "sec");
|
||||
Assert.assertTrue(success);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -67,7 +68,7 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
|
||||
Collections.shuffle(regions);
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
for (RegionInfo regionInfo : regions) {
|
||||
// Don't try the move if we're stopping
|
||||
if (context.isStopping()) {
|
||||
|
@ -81,7 +82,7 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
|
||||
// put a limit on max num regions. Otherwise, this won't finish
|
||||
// with a sleep time of 10sec, 100 regions will finish in 16min
|
||||
if (System.currentTimeMillis() - start > maxTime) {
|
||||
if (EnvironmentEdgeManager.currentTime() - start > maxTime) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -48,7 +49,7 @@ public class SnapshotTableAction extends Action {
|
|||
@Override
|
||||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
String snapshotName = tableName + "-it-" + System.currentTimeMillis();
|
||||
String snapshotName = tableName + "-it-" + EnvironmentEdgeManager.currentTime();
|
||||
Admin admin = util.getAdmin();
|
||||
|
||||
// Don't try the snapshot if we're stopping
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.hbase.chaos.policies;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
|
||||
/** A policy which does stuff every time interval. */
|
||||
|
@ -37,11 +38,11 @@ public abstract class PeriodicPolicy extends Policy {
|
|||
Threads.sleep(jitter);
|
||||
|
||||
while (!isStopped()) {
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
runOneIteration();
|
||||
|
||||
if (isStopped()) return;
|
||||
long sleepTime = periodMs - (System.currentTimeMillis() - start);
|
||||
long sleepTime = periodMs - (EnvironmentEdgeManager.currentTime() - start);
|
||||
if (sleepTime > 0) {
|
||||
LOG.info("Sleeping for {} ms", sleepTime);
|
||||
Threads.sleep(sleepTime);
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility;
|
|||
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -127,7 +128,7 @@ public class IntegrationTestTableSnapshotInputFormat extends IntegrationTestBase
|
|||
Configuration conf = getConf();
|
||||
TableName tableName = TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME));
|
||||
String snapshotName = conf.get(SNAPSHOT_NAME_KEY, tableName.getQualifierAsString()
|
||||
+ "_snapshot_" + System.currentTimeMillis());
|
||||
+ "_snapshot_" + EnvironmentEdgeManager.currentTime());
|
||||
int numRegions = conf.getInt(NUM_REGIONS_KEY, DEFAULT_NUM_REGIONS);
|
||||
String tableDirStr = conf.get(TABLE_DIR_KEY);
|
||||
Path tableDir;
|
||||
|
|
|
@ -78,6 +78,7 @@ import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
|||
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Random64;
|
||||
import org.apache.hadoop.hbase.util.RegionSplitter;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
|
@ -711,9 +712,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
while (numQueries < maxQueries) {
|
||||
numQueries++;
|
||||
byte[] prev = node.prev;
|
||||
long t1 = System.currentTimeMillis();
|
||||
long t1 = EnvironmentEdgeManager.currentTime();
|
||||
node = getNode(prev, table, node);
|
||||
long t2 = System.currentTimeMillis();
|
||||
long t2 = EnvironmentEdgeManager.currentTime();
|
||||
if (node == null) {
|
||||
LOG.error("ConcurrentWalker found UNDEFINED NODE: " + Bytes.toStringBinary(prev));
|
||||
context.getCounter(Counts.UNDEFINED).increment(1l);
|
||||
|
@ -1714,10 +1715,10 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
scan.setBatch(1);
|
||||
scan.addColumn(FAMILY_NAME, COLUMN_PREV);
|
||||
|
||||
long t1 = System.currentTimeMillis();
|
||||
long t1 = EnvironmentEdgeManager.currentTime();
|
||||
ResultScanner scanner = table.getScanner(scan);
|
||||
Result result = scanner.next();
|
||||
long t2 = System.currentTimeMillis();
|
||||
long t2 = EnvironmentEdgeManager.currentTime();
|
||||
scanner.close();
|
||||
|
||||
if ( result != null) {
|
||||
|
@ -1797,9 +1798,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
while (node != null && node.prev.length != NO_KEY.length &&
|
||||
numQueries < maxQueries) {
|
||||
byte[] prev = node.prev;
|
||||
long t1 = System.currentTimeMillis();
|
||||
long t1 = EnvironmentEdgeManager.currentTime();
|
||||
node = getNode(prev, table, node);
|
||||
long t2 = System.currentTimeMillis();
|
||||
long t2 = EnvironmentEdgeManager.currentTime();
|
||||
if (logEvery > 0 && numQueries % logEvery == 0) {
|
||||
System.out.printf("CQ %d: %d %s \n", numQueries, t2 - t1, Bytes.toStringBinary(prev));
|
||||
}
|
||||
|
|
|
@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.test.util.warc.WARCInputFormat;
|
|||
import org.apache.hadoop.hbase.test.util.warc.WARCRecord;
|
||||
import org.apache.hadoop.hbase.test.util.warc.WARCWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.RegionSplitter;
|
||||
import org.apache.hadoop.io.BytesWritable;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
|
@ -582,7 +583,7 @@ public class IntegrationTestLoadCommonCrawl extends IntegrationTestBase {
|
|||
String contentType = warcHeader.getField("WARC-Identified-Payload-Type");
|
||||
if (contentType != null) {
|
||||
LOG.debug("Processing record id=" + recordID + ", targetURI=\"" + targetURI + "\"");
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
// Make row key
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
|
||||
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.LoadTestTool;
|
||||
import org.apache.hadoop.hbase.util.MultiThreadedReader;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
|
@ -143,7 +144,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
|
|||
LOG.info("Cluster size:"+
|
||||
util.getHBaseClusterInterface().getClusterMetrics().getLiveServerMetrics().size());
|
||||
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName());
|
||||
long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime);
|
||||
long startKey = 0;
|
||||
|
@ -197,7 +198,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
|
|||
|
||||
// set the intended run time for the reader. The reader will do read requests
|
||||
// to random keys for this amount of time.
|
||||
long remainingTime = runtime - (System.currentTimeMillis() - start);
|
||||
long remainingTime = runtime - (EnvironmentEdgeManager.currentTime() - start);
|
||||
if (remainingTime <= 0) {
|
||||
LOG.error("The amount of time left for the test to perform random reads is "
|
||||
+ "non-positive. Increase the test execution time via "
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.filter.Filter;
|
|||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -89,7 +90,7 @@ public class TableRecordReaderImpl {
|
|||
}
|
||||
if (logScannerActivity) {
|
||||
LOG.info("Current scan=" + currentScan.toString());
|
||||
timestamp = System.currentTimeMillis();
|
||||
timestamp = EnvironmentEdgeManager.currentTime();
|
||||
rowcount = 0;
|
||||
}
|
||||
}
|
||||
|
@ -197,7 +198,7 @@ public class TableRecordReaderImpl {
|
|||
if (logScannerActivity) {
|
||||
rowcount ++;
|
||||
if (rowcount >= logPerRowCount) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
LOG.info("Mapper took " + (now-timestamp)
|
||||
+ "ms to process " + rowcount + " rows");
|
||||
timestamp = now;
|
||||
|
@ -236,7 +237,7 @@ public class TableRecordReaderImpl {
|
|||
return false;
|
||||
} catch (IOException ioe) {
|
||||
if (logScannerActivity) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
LOG.info("Mapper took " + (now-timestamp)
|
||||
+ "ms to process " + rowcount + " rows");
|
||||
LOG.info(ioe.toString(), ioe);
|
||||
|
|
|
@ -419,8 +419,10 @@ public class HFileOutputFormat2
|
|||
|
||||
private void close(final StoreFileWriter w) throws IOException {
|
||||
if (w != null) {
|
||||
w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
|
||||
w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString()));
|
||||
w.appendFileInfo(BULKLOAD_TIME_KEY,
|
||||
Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
|
||||
w.appendFileInfo(BULKLOAD_TASK_KEY,
|
||||
Bytes.toBytes(context.getTaskAttemptID().toString()));
|
||||
w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
|
||||
w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
|
||||
w.appendTrackedTimestampsToMetadata();
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
|
|||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
|
@ -764,7 +765,7 @@ public class ImportTsv extends Configured implements Tool {
|
|||
}
|
||||
|
||||
// If timestamp option is not specified, use current system time.
|
||||
long timstamp = getConf().getLong(TIMESTAMP_CONF_KEY, System.currentTimeMillis());
|
||||
long timstamp = getConf().getLong(TIMESTAMP_CONF_KEY, EnvironmentEdgeManager.currentTime());
|
||||
|
||||
// Set it back to replace invalid timestamp (non-numeric) with current
|
||||
// system time
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
|
||||
|
@ -493,7 +494,7 @@ public class SyncTable extends Configured implements Tool {
|
|||
sourceCell.getFamilyOffset(), sourceCell.getFamilyLength())
|
||||
.setQualifier(sourceCell.getQualifierArray(),
|
||||
sourceCell.getQualifierOffset(), sourceCell.getQualifierLength())
|
||||
.setTimestamp(System.currentTimeMillis())
|
||||
.setTimestamp(EnvironmentEdgeManager.currentTime())
|
||||
.setValue(sourceCell.getValueArray(),
|
||||
sourceCell.getValueOffset(), sourceCell.getValueLength()).build();
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.mapreduce.Counter;
|
||||
import org.apache.hadoop.mapreduce.InputSplit;
|
||||
import org.apache.hadoop.mapreduce.TaskAttemptContext;
|
||||
|
@ -91,7 +92,7 @@ public class TableRecordReaderImpl {
|
|||
this.scanner = this.htable.getScanner(currentScan);
|
||||
if (logScannerActivity) {
|
||||
LOG.info("Current scan=" + currentScan.toString());
|
||||
timestamp = System.currentTimeMillis();
|
||||
timestamp = EnvironmentEdgeManager.currentTime();
|
||||
rowcount = 0;
|
||||
}
|
||||
}
|
||||
|
@ -211,7 +212,7 @@ public class TableRecordReaderImpl {
|
|||
if (logScannerActivity) {
|
||||
rowcount ++;
|
||||
if (rowcount >= logPerRowCount) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount);
|
||||
timestamp = now;
|
||||
rowcount = 0;
|
||||
|
@ -263,7 +264,7 @@ public class TableRecordReaderImpl {
|
|||
} catch (IOException ioe) {
|
||||
updateCounters();
|
||||
if (logScannerActivity) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount);
|
||||
LOG.info(ioe.toString(), ioe);
|
||||
String lastRow = lastSuccessfulRow == null ?
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
|||
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.TableInfo;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
|
||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.wal.WALKey;
|
||||
|
@ -300,8 +301,8 @@ public class WALPlayer extends Configured implements Tool {
|
|||
conf.setStrings(TABLES_KEY, tables);
|
||||
conf.setStrings(TABLE_MAP_KEY, tableMap);
|
||||
conf.set(FileInputFormat.INPUT_DIR, inputDirs);
|
||||
Job job =
|
||||
Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis()));
|
||||
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" +
|
||||
EnvironmentEdgeManager.currentTime()));
|
||||
job.setJarByClass(WALPlayer.class);
|
||||
|
||||
job.setInputFormatClass(WALInputFormat.class);
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
|
|||
import org.apache.hadoop.hbase.mob.MobUtils;
|
||||
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
@ -416,7 +417,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
|
|||
int reportBytes = 0;
|
||||
int bytesRead;
|
||||
|
||||
long stime = System.currentTimeMillis();
|
||||
long stime = EnvironmentEdgeManager.currentTime();
|
||||
while ((bytesRead = in.read(buffer)) > 0) {
|
||||
out.write(buffer, 0, bytesRead);
|
||||
totalBytesWritten += bytesRead;
|
||||
|
@ -431,7 +432,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
|
|||
reportBytes = 0;
|
||||
}
|
||||
}
|
||||
long etime = System.currentTimeMillis();
|
||||
long etime = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
|
||||
context.setStatus(String.format(statusMessage,
|
||||
|
|
|
@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
|||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
import org.apache.hadoop.hbase.util.ByteArrayHashKey;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.GsonUtil;
|
||||
import org.apache.hadoop.hbase.util.Hash;
|
||||
import org.apache.hadoop.hbase.util.MurmurHash;
|
||||
|
@ -1145,7 +1146,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
static abstract class TestBase {
|
||||
// Below is make it so when Tests are all running in the one
|
||||
// jvm, that they each have a differently seeded Random.
|
||||
private static final Random randomSeed = new Random(System.currentTimeMillis());
|
||||
private static final Random randomSeed = new Random(EnvironmentEdgeManager.currentTime());
|
||||
|
||||
private static long nextRandomSeed() {
|
||||
return randomSeed.nextLong();
|
||||
|
@ -2391,7 +2392,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
// write the serverName columns
|
||||
MetaTableAccessor.updateRegionLocation(connection,
|
||||
regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i,
|
||||
System.currentTimeMillis());
|
||||
EnvironmentEdgeManager.currentTime());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -67,7 +68,7 @@ public class TestCellCounter {
|
|||
private static Path FQ_OUTPUT_DIR;
|
||||
private static final String OUTPUT_DIR = "target" + File.separator + "test-data" + File.separator
|
||||
+ "output";
|
||||
private static long now = System.currentTimeMillis();
|
||||
private static long now = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
@Rule
|
||||
public TestName name = new TestName();
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.mob.MobTestUtil;
|
|||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
|
@ -290,7 +291,7 @@ public class TestCopyTable {
|
|||
p.addColumn(FAMILY_B, QUALIFIER, Bytes.toBytes("Data23"));
|
||||
t.put(p);
|
||||
|
||||
long currentTime = System.currentTimeMillis();
|
||||
long currentTime = EnvironmentEdgeManager.currentTime();
|
||||
String[] args = new String[] { "--new.name=" + targetTable, "--families=a:b", "--all.cells",
|
||||
"--starttime=" + (currentTime - 100000), "--endtime=" + (currentTime + 100000),
|
||||
"--versions=1", sourceTable.getNameAsString() };
|
||||
|
|
|
@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
|||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
|
||||
import org.apache.hadoop.hbase.util.MapReduceExtendedCell;
|
||||
import org.apache.hadoop.hbase.wal.WAL;
|
||||
|
@ -130,7 +131,7 @@ public class TestImportExport {
|
|||
private static String FQ_OUTPUT_DIR;
|
||||
private static final String EXPORT_BATCH_SIZE = "100";
|
||||
|
||||
private static final long now = System.currentTimeMillis();
|
||||
private static final long now = EnvironmentEdgeManager.currentTime();
|
||||
private final TableName EXPORT_TABLE = TableName.valueOf("export_table");
|
||||
private final TableName IMPORT_TABLE = TableName.valueOf("import_table");
|
||||
public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1);
|
||||
|
@ -605,7 +606,7 @@ public class TestImportExport {
|
|||
@Test
|
||||
public void testExportScan() throws Exception {
|
||||
int version = 100;
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
long endTime = startTime + 1;
|
||||
String prefix = "row";
|
||||
String label_0 = "label_0";
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.LauncherSecurityManager;
|
||||
import org.apache.hadoop.mapreduce.Counter;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
|
@ -249,12 +250,12 @@ public class TestRowCounter {
|
|||
// clean up content of TABLE_NAME
|
||||
Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM));
|
||||
|
||||
ts = System.currentTimeMillis();
|
||||
ts = EnvironmentEdgeManager.currentTime();
|
||||
put1.addColumn(family, col1, ts, Bytes.toBytes("val1"));
|
||||
table.put(put1);
|
||||
Thread.sleep(100);
|
||||
|
||||
ts = System.currentTimeMillis();
|
||||
ts = EnvironmentEdgeManager.currentTime();
|
||||
put2.addColumn(family, col1, ts, Bytes.toBytes("val2"));
|
||||
put3.addColumn(family, col1, ts, Bytes.toBytes("val3"));
|
||||
table.put(put2);
|
||||
|
@ -302,9 +303,9 @@ public class TestRowCounter {
|
|||
rowCounter.setConf(TEST_UTIL.getConfiguration());
|
||||
args = Arrays.copyOf(args, args.length+1);
|
||||
args[args.length-1]="--expectedCount=" + expectedCount;
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
int result = rowCounter.run(args);
|
||||
long duration = System.currentTimeMillis() - start;
|
||||
long duration = EnvironmentEdgeManager.currentTime() - start;
|
||||
LOG.debug("row count duration (ms): " + duration);
|
||||
assertTrue(result==0);
|
||||
}
|
||||
|
@ -318,9 +319,9 @@ public class TestRowCounter {
|
|||
*/
|
||||
private void runCreateSubmittableJobWithArgs(String[] args, int expectedCount) throws Exception {
|
||||
Job job = RowCounter.createSubmittableJob(TEST_UTIL.getConfiguration(), args);
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
job.waitForCompletion(true);
|
||||
long duration = System.currentTimeMillis() - start;
|
||||
long duration = EnvironmentEdgeManager.currentTime() - start;
|
||||
LOG.debug("row count duration (ms): " + duration);
|
||||
assertTrue(job.isSuccessful());
|
||||
Counter counter = job.getCounters().findCounter(RowCounter.RowCounterMapper.Counters.ROWS);
|
||||
|
@ -486,12 +487,12 @@ public class TestRowCounter {
|
|||
// clean up content of TABLE_NAME
|
||||
Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM));
|
||||
|
||||
ts = System.currentTimeMillis();
|
||||
ts = EnvironmentEdgeManager.currentTime();
|
||||
put1.addColumn(family, col1, ts, Bytes.toBytes("val1"));
|
||||
table.put(put1);
|
||||
Thread.sleep(100);
|
||||
|
||||
ts = System.currentTimeMillis();
|
||||
ts = EnvironmentEdgeManager.currentTime();
|
||||
put2.addColumn(family, col1, ts, Bytes.toBytes("val2"));
|
||||
put3.addColumn(family, col1, ts, Bytes.toBytes("val3"));
|
||||
table.put(put2);
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
|
@ -159,7 +160,7 @@ public class TestSyncTable {
|
|||
final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source");
|
||||
final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target");
|
||||
Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTableIgnoreTimestampsTrue");
|
||||
long current = System.currentTimeMillis();
|
||||
long current = EnvironmentEdgeManager.currentTime();
|
||||
writeTestData(sourceTableName, targetTableName, current - 1000, current);
|
||||
hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true");
|
||||
Counters syncCounters = syncTables(sourceTableName, targetTableName,
|
||||
|
@ -490,7 +491,7 @@ public class TestSyncTable {
|
|||
int sourceRegions = 10;
|
||||
int targetRegions = 6;
|
||||
if (ArrayUtils.isEmpty(timestamps)) {
|
||||
long current = System.currentTimeMillis();
|
||||
long current = EnvironmentEdgeManager.currentTime();
|
||||
timestamps = new long[]{current,current};
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
@ -44,7 +45,7 @@ public class TestWALInputFormat {
|
|||
public void testAddFile() {
|
||||
List<FileStatus> lfss = new ArrayList<>();
|
||||
LocatedFileStatus lfs = Mockito.mock(LocatedFileStatus.class);
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
Mockito.when(lfs.getPath()).thenReturn(new Path("/name." + now));
|
||||
WALInputFormat.addFile(lfss, lfs, now, now);
|
||||
assertEquals(1, lfss.size());
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
|||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||
import org.apache.hadoop.hbase.wal.WAL;
|
||||
|
@ -131,7 +132,7 @@ public class TestWALRecordReader {
|
|||
WAL log = walfactory.getWAL(info);
|
||||
// This test depends on timestamp being millisecond based and the filename of the WAL also
|
||||
// being millisecond based.
|
||||
long ts = System.currentTimeMillis();
|
||||
long ts = EnvironmentEdgeManager.currentTime();
|
||||
WALEdit edit = new WALEdit();
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value));
|
||||
log.appendData(info, getWalKeyImpl(ts, scopes), edit);
|
||||
|
@ -145,7 +146,7 @@ public class TestWALRecordReader {
|
|||
LOG.info("Past 1st WAL roll " + log.toString());
|
||||
|
||||
Thread.sleep(1);
|
||||
long ts1 = System.currentTimeMillis();
|
||||
long ts1 = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
edit = new WALEdit();
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1+1, value));
|
||||
|
@ -195,21 +196,24 @@ public class TestWALRecordReader {
|
|||
byte [] value = Bytes.toBytes("value");
|
||||
WALEdit edit = new WALEdit();
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
|
||||
System.currentTimeMillis(), value));
|
||||
long txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
|
||||
EnvironmentEdgeManager.currentTime(), value));
|
||||
long txid = log.appendData(info,
|
||||
getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
|
||||
log.sync(txid);
|
||||
|
||||
Thread.sleep(1); // make sure 2nd log gets a later timestamp
|
||||
long secondTs = System.currentTimeMillis();
|
||||
long secondTs = EnvironmentEdgeManager.currentTime();
|
||||
log.rollWriter();
|
||||
|
||||
edit = new WALEdit();
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), System.currentTimeMillis(), value));
|
||||
txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"),
|
||||
EnvironmentEdgeManager.currentTime(), value));
|
||||
txid = log.appendData(info,
|
||||
getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
|
||||
log.sync(txid);
|
||||
log.shutdown();
|
||||
walfactory.shutdown();
|
||||
long thirdTs = System.currentTimeMillis();
|
||||
long thirdTs = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
// should have 2 log files now
|
||||
WALInputFormat input = new WALInputFormat();
|
||||
|
@ -251,15 +255,19 @@ public class TestWALRecordReader {
|
|||
WAL log = walfactory.getWAL(info);
|
||||
byte [] value = Bytes.toBytes("value");
|
||||
WALEdit edit = new WALEdit();
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), System.currentTimeMillis(), value));
|
||||
long txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
|
||||
EnvironmentEdgeManager.currentTime(), value));
|
||||
long txid = log.appendData(info,
|
||||
getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
|
||||
log.sync(txid);
|
||||
|
||||
Thread.sleep(10); // make sure 2nd edit gets a later timestamp
|
||||
|
||||
edit = new WALEdit();
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), System.currentTimeMillis(), value));
|
||||
txid = log.appendData(info, getWalKeyImpl(System.currentTimeMillis(), scopes), edit);
|
||||
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"),
|
||||
EnvironmentEdgeManager.currentTime(), value));
|
||||
txid = log.appendData(info,
|
||||
getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
|
||||
log.sync(txid);
|
||||
log.shutdown();
|
||||
|
||||
|
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
|
|||
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -283,20 +284,20 @@ public class TestVerifyReplication extends TestReplicationBase {
|
|||
// Take source and target tables snapshot
|
||||
Path rootDir = CommonFSUtils.getRootDir(CONF1);
|
||||
FileSystem fs = rootDir.getFileSystem(CONF1);
|
||||
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
||||
String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
|
||||
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
|
||||
|
||||
// Take target snapshot
|
||||
Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
|
||||
FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
|
||||
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
||||
String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
|
||||
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
|
||||
|
||||
String peerFSAddress = peerFs.getUri().toString();
|
||||
String tmpPath1 = UTIL1.getRandomDir().toString();
|
||||
String tmpPath2 = "/tmp" + System.currentTimeMillis();
|
||||
String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
|
||||
|
||||
String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
|
||||
"--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName,
|
||||
|
@ -320,11 +321,11 @@ public class TestVerifyReplication extends TestReplicationBase {
|
|||
Delete delete = new Delete(put.getRow());
|
||||
htable2.delete(delete);
|
||||
|
||||
sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
||||
sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
|
||||
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
|
||||
|
||||
peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
||||
peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
|
||||
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
|
||||
|
||||
|
@ -388,20 +389,20 @@ public class TestVerifyReplication extends TestReplicationBase {
|
|||
// Take source and target tables snapshot
|
||||
Path rootDir = CommonFSUtils.getRootDir(CONF1);
|
||||
FileSystem fs = rootDir.getFileSystem(CONF1);
|
||||
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
||||
String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
|
||||
Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true);
|
||||
|
||||
// Take target snapshot
|
||||
Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
|
||||
FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
|
||||
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
||||
String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName,
|
||||
Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true);
|
||||
|
||||
String peerFSAddress = peerFs.getUri().toString();
|
||||
String tmpPath1 = UTIL1.getRandomDir().toString();
|
||||
String tmpPath2 = "/tmp" + System.currentTimeMillis();
|
||||
String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
|
||||
|
||||
String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(),
|
||||
"--sourceSnapshotName=" + sourceSnapshotName,
|
||||
|
@ -426,11 +427,11 @@ public class TestVerifyReplication extends TestReplicationBase {
|
|||
Delete delete = new Delete(put.getRow());
|
||||
htable3.delete(delete);
|
||||
|
||||
sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
||||
sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
|
||||
Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true);
|
||||
|
||||
peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
||||
peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName,
|
||||
Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true);
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
|
|||
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -105,7 +106,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
|
|||
// normal Batch tests
|
||||
byte[] qualifierName = Bytes.toBytes("f1");
|
||||
Put put = new Put(Bytes.toBytes("r1"));
|
||||
long ts = System.currentTimeMillis();
|
||||
long ts = EnvironmentEdgeManager.currentTime();
|
||||
put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1002"));
|
||||
htable1.put(put);
|
||||
put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v1001"));
|
||||
|
@ -169,7 +170,7 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
|
|||
// normal Batch tests
|
||||
byte[] qualifierName = Bytes.toBytes("f1");
|
||||
Put put = new Put(Bytes.toBytes("r1"));
|
||||
long ts = System.currentTimeMillis();
|
||||
long ts = EnvironmentEdgeManager.currentTime();
|
||||
put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1"));
|
||||
htable1.put(put);
|
||||
put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v2"));
|
||||
|
@ -286,20 +287,20 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
|
|||
// Take source and target tables snapshot
|
||||
Path rootDir = CommonFSUtils.getRootDir(CONF1);
|
||||
FileSystem fs = rootDir.getFileSystem(CONF1);
|
||||
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
||||
String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
|
||||
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
|
||||
|
||||
// Take target snapshot
|
||||
Path peerRootDir = CommonFSUtils.getRootDir(CONF2);
|
||||
FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
|
||||
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
||||
String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
|
||||
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
|
||||
|
||||
String peerFSAddress = peerFs.getUri().toString();
|
||||
String temPath1 = UTIL1.getRandomDir().toString();
|
||||
String temPath2 = "/tmp" + System.currentTimeMillis();
|
||||
String temPath2 = "/tmp" + EnvironmentEdgeManager.currentTime();
|
||||
|
||||
String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
|
||||
"--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName,
|
||||
|
@ -323,11 +324,11 @@ public class TestVerifyReplicationAdjunct extends TestReplicationBase {
|
|||
Delete delete = new Delete(put.getRow());
|
||||
htable2.delete(delete);
|
||||
|
||||
sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
||||
sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
|
||||
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
|
||||
|
||||
peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
||||
peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
|
||||
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
|
|||
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -165,14 +166,14 @@ public class TestVerifyReplicationCrossDiffHdfs {
|
|||
public void testVerifyRepBySnapshot() throws Exception {
|
||||
Path rootDir = CommonFSUtils.getRootDir(conf1);
|
||||
FileSystem fs = rootDir.getFileSystem(conf1);
|
||||
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
|
||||
String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(util1.getAdmin(), TABLE_NAME,
|
||||
Bytes.toString(FAMILY), sourceSnapshotName, rootDir, fs, true);
|
||||
|
||||
// Take target snapshot
|
||||
Path peerRootDir = CommonFSUtils.getRootDir(conf2);
|
||||
FileSystem peerFs = peerRootDir.getFileSystem(conf2);
|
||||
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
|
||||
String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime();
|
||||
SnapshotTestingUtils.createSnapshotAndValidate(util2.getAdmin(), TABLE_NAME,
|
||||
Bytes.toString(FAMILY), peerSnapshotName, peerRootDir, peerFs, true);
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
|
|||
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -327,13 +328,14 @@ public class TestExportSnapshot {
|
|||
|
||||
private Path getHdfsDestinationDir() {
|
||||
Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
|
||||
Path path = new Path(new Path(rootDir, "export-test"), "export-" + System.currentTimeMillis());
|
||||
Path path = new Path(new Path(rootDir, "export-test"), "export-" +
|
||||
EnvironmentEdgeManager.currentTime());
|
||||
LOG.info("HDFS export destination path: " + path);
|
||||
return path;
|
||||
}
|
||||
|
||||
static Path getLocalDestinationDir(HBaseTestingUtility htu) {
|
||||
Path path = htu.getDataTestDir("local-export-" + System.currentTimeMillis());
|
||||
Path path = htu.getDataTestDir("local-export-" + EnvironmentEdgeManager.currentTime());
|
||||
try {
|
||||
FileSystem fs = FileSystem.getLocal(htu.getConfiguration());
|
||||
LOG.info("Local export destination path: " + path);
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
|||
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
|
||||
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
@ -114,7 +115,7 @@ public class TestExportSnapshotV1NoCluster {
|
|||
static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtility hctu, Path testDir)
|
||||
throws IOException {
|
||||
Path path = new Path(new Path(testDir, "export-test"),
|
||||
"export-" + System.currentTimeMillis()).makeQualified(fs.getUri(),
|
||||
"export-" + EnvironmentEdgeManager.currentTime()).makeQualified(fs.getUri(),
|
||||
fs.getWorkingDirectory());
|
||||
LOG.info("Export destination={}, fs={}, fsurl={}, fswd={}, testDir={}", path, fs, fs.getUri(),
|
||||
fs.getWorkingDirectory(), testDir);
|
||||
|
|
|
@ -284,7 +284,7 @@ public class ProcedureExecutor<TEnvironment> {
|
|||
}
|
||||
long evictTtl = conf.getInt(EVICT_TTL_CONF_KEY, DEFAULT_EVICT_TTL);
|
||||
long evictAckTtl = conf.getInt(EVICT_ACKED_TTL_CONF_KEY, DEFAULT_ACKED_EVICT_TTL);
|
||||
if (retainer.isExpired(System.currentTimeMillis(), evictTtl, evictAckTtl)) {
|
||||
if (retainer.isExpired(EnvironmentEdgeManager.currentTime(), evictTtl, evictAckTtl)) {
|
||||
LOG.debug("Procedure {} has already been finished and expired, skip force updating",
|
||||
procId);
|
||||
return;
|
||||
|
|
|
@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase;
|
|||
import org.apache.hadoop.hbase.procedure2.util.ByteSlot;
|
||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -829,12 +830,12 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
// Wait SYNC_WAIT_MSEC or the signal of "slots full" before flushing
|
||||
syncMaxSlot = runningProcCount;
|
||||
assert syncMaxSlot > 0 : "unexpected syncMaxSlot=" + syncMaxSlot;
|
||||
final long syncWaitSt = System.currentTimeMillis();
|
||||
final long syncWaitSt = EnvironmentEdgeManager.currentTime();
|
||||
if (slotIndex != syncMaxSlot) {
|
||||
slotCond.await(syncWaitMsec, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
final long currentTs = System.currentTimeMillis();
|
||||
final long currentTs = EnvironmentEdgeManager.currentTime();
|
||||
final long syncWaitMs = currentTs - syncWaitSt;
|
||||
final float rollSec = getMillisFromLastRoll() / 1000.0f;
|
||||
final float syncedPerSec = totalSyncedToStore / rollSec;
|
||||
|
@ -979,7 +980,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
}
|
||||
|
||||
public long getMillisFromLastRoll() {
|
||||
return (System.currentTimeMillis() - lastRollTs.get());
|
||||
return (EnvironmentEdgeManager.currentTime() - lastRollTs.get());
|
||||
}
|
||||
|
||||
void periodicRollForTesting() throws IOException {
|
||||
|
@ -1103,7 +1104,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
stream = newStream;
|
||||
flushLogId = logId;
|
||||
totalSynced.set(0);
|
||||
long rollTs = System.currentTimeMillis();
|
||||
long rollTs = EnvironmentEdgeManager.currentTime();
|
||||
lastRollTs.set(rollTs);
|
||||
logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos, rollTs));
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
|
|||
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -103,9 +104,9 @@ public class TestProcedureSchedulerConcurrency {
|
|||
}
|
||||
}
|
||||
if (wakeCount.get() != oldWakeCount) {
|
||||
lastUpdate = System.currentTimeMillis();
|
||||
lastUpdate = EnvironmentEdgeManager.currentTime();
|
||||
} else if (wakeCount.get() >= NRUNS &&
|
||||
(System.currentTimeMillis() - lastUpdate) > WAIT_THRESHOLD) {
|
||||
(EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) {
|
||||
break;
|
||||
}
|
||||
Threads.sleepWithoutInterrupt(25);
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
|
||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
||||
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
|
||||
|
||||
|
@ -159,7 +159,8 @@ public abstract class ProcedureStorePerformanceEvaluation<T extends ProcedureSto
|
|||
boolean failure = false;
|
||||
try {
|
||||
for (Future<?> future : futures) {
|
||||
long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - System.currentTimeMillis();
|
||||
long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 -
|
||||
EnvironmentEdgeManager.currentTime();
|
||||
failure |= (future.get(timeout, TimeUnit.MILLISECONDS).equals(EXIT_FAILURE));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.procedure2.store.wal;
|
||||
|
||||
import static java.lang.System.currentTimeMillis;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -36,7 +34,7 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
|
|||
import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
|
||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
||||
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
|
||||
|
||||
|
@ -166,7 +164,7 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
|
|||
List<Integer> procStates = shuffleProcWriteSequence();
|
||||
TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used.
|
||||
int numProcsPerWal = numWals > 0 ? procStates.size() / numWals : Integer.MAX_VALUE;
|
||||
long startTime = currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
long lastTime = startTime;
|
||||
for (int i = 0; i < procStates.size(); ++i) {
|
||||
int procId = procStates.get(i);
|
||||
|
@ -181,14 +179,14 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
|
|||
store.update(procs[procId]);
|
||||
}
|
||||
if (i > 0 && i % numProcsPerWal == 0) {
|
||||
long currentTime = currentTimeMillis();
|
||||
long currentTime = EnvironmentEdgeManager.currentTime();
|
||||
System.out.println("Forcing wall roll. Time taken on last WAL: " +
|
||||
(currentTime - lastTime) / 1000.0f + " sec");
|
||||
store.rollWriterForTesting();
|
||||
lastTime = currentTime;
|
||||
}
|
||||
}
|
||||
long timeTaken = currentTimeMillis() - startTime;
|
||||
long timeTaken = EnvironmentEdgeManager.currentTime() - startTime;
|
||||
System.out.println("\n\nDone writing WALs.\nNum procs : " + numProcs + "\nTotal time taken : "
|
||||
+ StringUtils.humanTimeDiff(timeTaken) + "\n\n");
|
||||
}
|
||||
|
@ -199,9 +197,9 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
|
|||
store.start(1);
|
||||
store.recoverLease();
|
||||
|
||||
long startTime = currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
store.load(loader);
|
||||
long timeTaken = System.currentTimeMillis() - startTime;
|
||||
long timeTaken = EnvironmentEdgeManager.currentTime() - startTime;
|
||||
System.out.println("******************************************");
|
||||
System.out.println("Load time : " + (timeTaken / 1000.0f) + "sec");
|
||||
System.out.println("******************************************");
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.rest.filter.GzipFilter;
|
|||
import org.apache.hadoop.hbase.rest.filter.RestCsrfPreventionFilter;
|
||||
import org.apache.hadoop.hbase.security.UserProvider;
|
||||
import org.apache.hadoop.hbase.util.DNS;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.util.ReflectionUtils;
|
||||
import org.apache.hadoop.hbase.util.Strings;
|
||||
|
@ -395,7 +396,7 @@ public class RESTServer implements Constants {
|
|||
// Put up info server.
|
||||
int port = conf.getInt("hbase.rest.info.port", 8085);
|
||||
if (port >= 0) {
|
||||
conf.setLong("startcode", System.currentTimeMillis());
|
||||
conf.setLong("startcode", EnvironmentEdgeManager.currentTime());
|
||||
String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
|
||||
this.infoServer = new InfoServer("rest", a, port, false, conf);
|
||||
this.infoServer.setAttribute("hbase.conf", conf);
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Table;
|
|||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
import org.apache.hadoop.hbase.rest.model.ScannerModel;
|
||||
import org.apache.hadoop.hbase.security.visibility.Authorizations;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -112,7 +113,7 @@ public class ScannerResultGenerator extends ResultGenerator {
|
|||
}
|
||||
scanner = table.getScanner(scan);
|
||||
cached = null;
|
||||
id = Long.toString(System.currentTimeMillis()) +
|
||||
id = Long.toString(EnvironmentEdgeManager.currentTime()) +
|
||||
Integer.toHexString(scanner.hashCode());
|
||||
} finally {
|
||||
table.close();
|
||||
|
|
|
@ -44,6 +44,7 @@ import javax.net.ssl.SSLContext;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.rest.Constants;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
|
||||
|
@ -332,7 +333,7 @@ public class Client {
|
|||
method.addHeader(header);
|
||||
}
|
||||
}
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
if (resp != null) EntityUtils.consumeQuietly(resp.getEntity());
|
||||
resp = httpClient.execute(method);
|
||||
if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
|
||||
|
@ -342,7 +343,7 @@ public class Client {
|
|||
resp = httpClient.execute(method);
|
||||
}
|
||||
|
||||
long endTime = System.currentTimeMillis();
|
||||
long endTime = EnvironmentEdgeManager.currentTime();
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " +
|
||||
resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms");
|
||||
|
|
|
@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.rest.client.Cluster;
|
|||
import org.apache.hadoop.hbase.rest.client.RemoteAdmin;
|
||||
import org.apache.hadoop.hbase.util.ByteArrayHashKey;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Hash;
|
||||
import org.apache.hadoop.hbase.util.MurmurHash;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
@ -836,7 +837,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
// Below is make it so when Tests are all running in the one
|
||||
// jvm, that they each have a differently seeded Random.
|
||||
private static final Random randomSeed =
|
||||
new Random(System.currentTimeMillis());
|
||||
new Random(EnvironmentEdgeManager.currentTime());
|
||||
private static long nextRandomSeed() {
|
||||
return randomSeed.nextLong();
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
|||
import org.apache.hadoop.hbase.testclassification.RestTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
@ -155,14 +156,14 @@ public class TestRemoteAdminRetries {
|
|||
}
|
||||
|
||||
private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
try {
|
||||
callExecutor.run();
|
||||
fail("should be timeout exception!");
|
||||
} catch (IOException e) {
|
||||
assertTrue(Pattern.matches(".*MyTable.*timed out", e.toString()));
|
||||
}
|
||||
assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
|
||||
assertTrue((EnvironmentEdgeManager.currentTime() - start) > MAX_TIME);
|
||||
}
|
||||
|
||||
private static interface CallExecutor {
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.testclassification.RestTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -182,14 +183,14 @@ public class TestRemoteHTableRetries {
|
|||
}
|
||||
|
||||
private void testTimedOutCall(CallExecutor callExecutor) throws Exception {
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
try {
|
||||
callExecutor.run();
|
||||
fail("should be timeout exception!");
|
||||
} catch (IOException e) {
|
||||
assertTrue(Pattern.matches(".*request timed out", e.toString()));
|
||||
}
|
||||
assertTrue((System.currentTimeMillis() - start) > MAX_TIME);
|
||||
assertTrue((EnvironmentEdgeManager.currentTime() - start) > MAX_TIME);
|
||||
}
|
||||
|
||||
private interface CallExecutor {
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.rest.RESTServlet;
|
|||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.RestTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.junit.After;
|
||||
|
@ -97,7 +98,7 @@ public class TestRemoteTable {
|
|||
private static final byte[] VALUE_2 = Bytes.toBytes("testvalue2");
|
||||
|
||||
private static final long ONE_HOUR = 60 * 60 * 1000;
|
||||
private static final long TS_2 = System.currentTimeMillis();
|
||||
private static final long TS_2 = EnvironmentEdgeManager.currentTime();
|
||||
private static final long TS_1 = TS_2 - ONE_HOUR;
|
||||
|
||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -62,8 +63,9 @@ public class HealthCheckChore extends ScheduledChore {
|
|||
+ " number of times consecutively.");
|
||||
}
|
||||
// Always log health report.
|
||||
LOG.info("Health status at " + StringUtils.formatTime(System.currentTimeMillis()) + " : "
|
||||
+ report.getHealthReport());
|
||||
LOG.info("Health status at " +
|
||||
StringUtils.formatTime(EnvironmentEdgeManager.currentTime()) + " : " +
|
||||
report.getHealthReport());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,9 +75,9 @@ public class HealthCheckChore extends ScheduledChore {
|
|||
// First time we are seeing a failure. No need to stop, just
|
||||
// record the time.
|
||||
numTimesUnhealthy++;
|
||||
startWindow = System.currentTimeMillis();
|
||||
startWindow = EnvironmentEdgeManager.currentTime();
|
||||
} else {
|
||||
if ((System.currentTimeMillis() - startWindow) < failureWindow) {
|
||||
if ((EnvironmentEdgeManager.currentTime() - startWindow) < failureWindow) {
|
||||
numTimesUnhealthy++;
|
||||
if (numTimesUnhealthy == threshold) {
|
||||
stop = true;
|
||||
|
@ -85,7 +87,7 @@ public class HealthCheckChore extends ScheduledChore {
|
|||
} else {
|
||||
// Outside of failure window, so we reset to 1.
|
||||
numTimesUnhealthy = 1;
|
||||
startWindow = System.currentTimeMillis();
|
||||
startWindow = EnvironmentEdgeManager.currentTime();
|
||||
stop = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.regionserver.ShipperListener;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -1032,10 +1033,10 @@ public class HFileBlock implements Cacheable {
|
|||
protected void finishBlockAndWriteHeaderAndData(DataOutputStream out)
|
||||
throws IOException {
|
||||
ensureBlockReady();
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size());
|
||||
out.write(onDiskChecksum);
|
||||
HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
|
||||
HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1706,7 +1707,7 @@ public class HFileBlock implements Cacheable {
|
|||
// checksums. Can change with circumstances. The below flag is whether the
|
||||
// file has support for checksums (version 2+).
|
||||
boolean checksumSupport = this.fileContext.isUseHBaseChecksum();
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
if (onDiskSizeWithHeader <= 0) {
|
||||
// We were not passed the block size. Need to get it from the header. If header was
|
||||
// not cached (see getCachedHeader above), need to seek to pull it in. This is costly
|
||||
|
@ -1753,7 +1754,7 @@ public class HFileBlock implements Cacheable {
|
|||
if (verifyChecksum && !validateChecksum(offset, curBlock, hdrSize)) {
|
||||
return null;
|
||||
}
|
||||
long duration = System.currentTimeMillis() - startTime;
|
||||
long duration = EnvironmentEdgeManager.currentTime() - startTime;
|
||||
if (updateMetrics) {
|
||||
HFile.updateReadLatency(duration, pread);
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.BloomFilterWriter;
|
|||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -212,9 +213,9 @@ public class HFileWriterImpl implements HFile.Writer {
|
|||
throws IOException {
|
||||
trailer.setFileInfoOffset(outputStream.getPos());
|
||||
finishFileInfo();
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
fileInfo.write(out);
|
||||
HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
|
||||
HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
|
||||
}
|
||||
|
||||
public long getPos() throws IOException {
|
||||
|
@ -841,9 +842,9 @@ public class HFileWriterImpl implements HFile.Writer {
|
|||
trailer.setEntryCount(entryCount);
|
||||
trailer.setCompressionCodec(hFileContext.getCompression());
|
||||
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
trailer.serialize(outputStream);
|
||||
HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
|
||||
HFile.updateWriteLatency(EnvironmentEdgeManager.currentTime() - startTime);
|
||||
|
||||
if (closeOutputStream) {
|
||||
outputStream.close();
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -1074,7 +1075,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache {
|
|||
long freedSumMb = 0;
|
||||
int heavyEvictionCount = 0;
|
||||
int freedDataOverheadPercent = 0;
|
||||
long startTime = System.currentTimeMillis();
|
||||
long startTime = EnvironmentEdgeManager.currentTime();
|
||||
while (this.go) {
|
||||
synchronized (this) {
|
||||
try {
|
||||
|
@ -1107,7 +1108,7 @@ public class LruAdaptiveBlockCache implements FirstLevelBlockCache {
|
|||
// This is should be almost the same time (+/- 10s)
|
||||
// because we get comparable volumes of freed bytes each time.
|
||||
// 10s because this is default period to run evict() (see above this.wait)
|
||||
long stopTime = System.currentTimeMillis();
|
||||
long stopTime = EnvironmentEdgeManager.currentTime();
|
||||
if ((stopTime - startTime) > 1000 * 10 - 1) {
|
||||
// Here we have to calc what situation we have got.
|
||||
// We have the limit "hbase.lru.cache.heavy.eviction.bytes.size.limit"
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -62,7 +63,7 @@ public final class PrefetchExecutor {
|
|||
new ThreadFactory() {
|
||||
@Override
|
||||
public Thread newThread(Runnable r) {
|
||||
String name = "hfile-prefetch-" + System.currentTimeMillis();
|
||||
String name = "hfile-prefetch-" + EnvironmentEdgeManager.currentTime();
|
||||
Thread t = new Thread(r, name);
|
||||
t.setDaemon(true);
|
||||
return t;
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
|||
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -103,7 +104,7 @@ public class CallRunner {
|
|||
}
|
||||
return;
|
||||
}
|
||||
call.setStartTime(System.currentTimeMillis());
|
||||
call.setStartTime(EnvironmentEdgeManager.currentTime());
|
||||
if (call.getStartTime() > call.getDeadline()) {
|
||||
RpcServer.LOG.warn("Dropping timed out call: " + call);
|
||||
return;
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
|
||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
||||
import org.apache.hadoop.hbase.nio.SingleByteBuff;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.Message;
|
||||
|
@ -116,7 +117,7 @@ class NettyServerRpcConnection extends ServerRpcConnection {
|
|||
long size, final InetAddress remoteAddress, int timeout,
|
||||
CallCleanup reqCleanup) {
|
||||
return new NettyServerCall(id, service, md, header, param, cellScanner, this, size,
|
||||
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.bbAllocator,
|
||||
remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator,
|
||||
this.rpcServer.cellBlockBuilder, reqCleanup);
|
||||
}
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
|
|||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.security.UserProvider;
|
||||
import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.GsonUtil;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -395,7 +396,7 @@ public abstract class RpcServer implements RpcServerInterface,
|
|||
Message result = call.getService().callBlockingMethod(md, controller, param);
|
||||
long receiveTime = call.getReceiveTime();
|
||||
long startTime = call.getStartTime();
|
||||
long endTime = System.currentTimeMillis();
|
||||
long endTime = EnvironmentEdgeManager.currentTime();
|
||||
int processingTime = (int) (endTime - startTime);
|
||||
int qTime = (int) (startTime - receiveTime);
|
||||
int totalTime = (int) (endTime - receiveTime);
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.security.User;
|
|||
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
@ -432,7 +433,7 @@ public abstract class ServerCall<T extends ServerRpcConnection> implements RpcCa
|
|||
@Override
|
||||
public long disconnectSince() {
|
||||
if (!this.connection.isConnectionOpen()) {
|
||||
return System.currentTimeMillis() - receiveTime;
|
||||
return EnvironmentEdgeManager.currentTime() - receiveTime;
|
||||
} else {
|
||||
return -1L;
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.Server;
|
||||
import org.apache.hadoop.hbase.security.HBasePolicyProvider;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
|
||||
|
@ -315,7 +316,7 @@ public class SimpleRpcServer extends RpcServer {
|
|||
if (c == null) {
|
||||
return;
|
||||
}
|
||||
c.setLastContact(System.currentTimeMillis());
|
||||
c.setLastContact(EnvironmentEdgeManager.currentTime());
|
||||
try {
|
||||
count = c.readAndProcess();
|
||||
} catch (InterruptedException ieo) {
|
||||
|
@ -331,7 +332,7 @@ public class SimpleRpcServer extends RpcServer {
|
|||
closeConnection(c);
|
||||
c = null;
|
||||
} else {
|
||||
c.setLastContact(System.currentTimeMillis());
|
||||
c.setLastContact(EnvironmentEdgeManager.currentTime());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -586,7 +587,8 @@ public class SimpleRpcServer extends RpcServer {
|
|||
}
|
||||
|
||||
SimpleServerRpcConnection register(SocketChannel channel) {
|
||||
SimpleServerRpcConnection connection = getConnection(channel, System.currentTimeMillis());
|
||||
SimpleServerRpcConnection connection = getConnection(channel,
|
||||
EnvironmentEdgeManager.currentTime());
|
||||
add(connection);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Connection from " + connection +
|
||||
|
@ -617,7 +619,7 @@ public class SimpleRpcServer extends RpcServer {
|
|||
// synch'ed to avoid explicit invocation upon OOM from colliding with
|
||||
// timer task firing
|
||||
synchronized void closeIdle(boolean scanAll) {
|
||||
long minLastContact = System.currentTimeMillis() - maxIdleTime;
|
||||
long minLastContact = EnvironmentEdgeManager.currentTime() - maxIdleTime;
|
||||
// concurrent iterator might miss new connections added
|
||||
// during the iteration, but that's ok because they won't
|
||||
// be idle yet anyway and will be caught on next scan
|
||||
|
|
|
@ -29,9 +29,10 @@ import java.util.Set;
|
|||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Sends responses of RPC back to clients.
|
||||
|
@ -162,7 +163,7 @@ class SimpleRpcServerResponder extends Thread {
|
|||
* @return the time of the purge.
|
||||
*/
|
||||
private long purge(long lastPurgeTime) {
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
if (now < lastPurgeTime + this.simpleRpcServer.purgeTimeout) {
|
||||
return lastPurgeTime;
|
||||
}
|
||||
|
@ -247,7 +248,7 @@ class SimpleRpcServerResponder extends Thread {
|
|||
return true;
|
||||
} else {
|
||||
// set the serve time when the response has to be sent later
|
||||
conn.lastSentTime = System.currentTimeMillis();
|
||||
conn.lastSentTime = EnvironmentEdgeManager.currentTime();
|
||||
return false; // Socket can't take more, we will have to come back.
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.VersionInfoUtil;
|
|||
import org.apache.hadoop.hbase.exceptions.RequestTooBigException;
|
||||
import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup;
|
||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.BlockingService;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream;
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor;
|
||||
|
@ -209,7 +210,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
|
|||
|
||||
// Notify the client about the offending request
|
||||
SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
|
||||
null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0,
|
||||
null, null, null, this, 0, this.addr, EnvironmentEdgeManager.currentTime(), 0,
|
||||
this.rpcServer.bbAllocator, this.rpcServer.cellBlockBuilder, null, responder);
|
||||
this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
|
||||
// Make sure the client recognizes the underlying exception
|
||||
|
@ -327,7 +328,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
|
|||
RequestHeader header, Message param, CellScanner cellScanner, long size,
|
||||
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
|
||||
return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size,
|
||||
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.bbAllocator,
|
||||
remoteAddress, EnvironmentEdgeManager.currentTime(), timeout, this.rpcServer.bbAllocator,
|
||||
this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
|
||||
}
|
||||
|
||||
|
|
|
@ -195,6 +195,7 @@ import org.apache.hadoop.hbase.security.SecurityConstants;
|
|||
import org.apache.hadoop.hbase.security.UserProvider;
|
||||
import org.apache.hadoop.hbase.util.Addressing;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FutureUtils;
|
||||
import org.apache.hadoop.hbase.util.HBaseFsck;
|
||||
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
|
||||
|
@ -802,7 +803,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
*/
|
||||
status.setStatus("Initializing Master file system");
|
||||
|
||||
this.masterActiveTime = System.currentTimeMillis();
|
||||
this.masterActiveTime = EnvironmentEdgeManager.currentTime();
|
||||
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
|
||||
|
||||
// always initialize the MemStoreLAB as we use a region to store data in master now, see
|
||||
|
@ -896,7 +897,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
|
||||
// Start the Zombie master detector after setting master as active, see HBASE-21535
|
||||
Thread zombieDetector = new Thread(new MasterInitializationMonitor(this),
|
||||
"ActiveMasterInitializationMonitor-" + System.currentTimeMillis());
|
||||
"ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());
|
||||
zombieDetector.setDaemon(true);
|
||||
zombieDetector.start();
|
||||
|
||||
|
@ -1043,8 +1044,8 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
|
||||
status.markComplete("Initialization successful");
|
||||
LOG.info(String.format("Master has completed initialization %.3fsec",
|
||||
(System.currentTimeMillis() - masterActiveTime) / 1000.0f));
|
||||
this.masterFinishedInitializationTime = System.currentTimeMillis();
|
||||
(EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f));
|
||||
this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
|
||||
configurationManager.registerObserver(this.balancer);
|
||||
configurationManager.registerObserver(this.cleanerPool);
|
||||
configurationManager.registerObserver(this.hfileCleaner);
|
||||
|
@ -1104,11 +1105,11 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
* After master has started up, lets do balancer post startup initialization. Since this runs
|
||||
* in activeMasterManager thread, it should be fine.
|
||||
*/
|
||||
long start = System.currentTimeMillis();
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
this.balancer.postMasterStartupInitialize();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Balancer post startup initialization complete, took " + (
|
||||
(System.currentTimeMillis() - start) / 1000) + " seconds");
|
||||
(EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1618,7 +1619,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
|
||||
// Sleep to next balance plan start time
|
||||
// But if there are zero regions in transition, it can skip sleep to speed up.
|
||||
while (!interrupted && System.currentTimeMillis() < nextBalanceStartTime
|
||||
while (!interrupted && EnvironmentEdgeManager.currentTime() < nextBalanceStartTime
|
||||
&& this.assignmentManager.getRegionStates().hasRegionsInTransition()) {
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
|
@ -1631,7 +1632,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
while (!interrupted
|
||||
&& maxRegionsInTransition > 0
|
||||
&& this.assignmentManager.getRegionStates().getRegionsInTransitionCount()
|
||||
>= maxRegionsInTransition && System.currentTimeMillis() <= cutoffTime) {
|
||||
>= maxRegionsInTransition && EnvironmentEdgeManager.currentTime() <= cutoffTime) {
|
||||
try {
|
||||
// sleep if the number of regions in transition exceeds the limit
|
||||
Thread.sleep(100);
|
||||
|
@ -1758,7 +1759,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
public List<RegionPlan> executeRegionPlansWithThrottling(List<RegionPlan> plans) {
|
||||
List<RegionPlan> successRegionPlans = new ArrayList<>();
|
||||
int maxRegionsInTransition = getMaxRegionsInTransition();
|
||||
long balanceStartTime = System.currentTimeMillis();
|
||||
long balanceStartTime = EnvironmentEdgeManager.currentTime();
|
||||
long cutoffTime = balanceStartTime + this.maxBalancingTime;
|
||||
int rpCount = 0; // number of RegionPlans balanced so far
|
||||
if (plans != null && !plans.isEmpty()) {
|
||||
|
@ -1788,7 +1789,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
|
||||
// if performing next balance exceeds cutoff time, exit the loop
|
||||
if (this.maxBalancingTime > 0 && rpCount < plans.size()
|
||||
&& System.currentTimeMillis() > cutoffTime) {
|
||||
&& EnvironmentEdgeManager.currentTime() > cutoffTime) {
|
||||
// TODO: After balance, there should not be a cutoff time (keeping it as
|
||||
// a security net for now)
|
||||
LOG.debug("No more balancing till next balance run; maxBalanceTime="
|
||||
|
|
|
@ -2575,7 +2575,8 @@ public class MasterRpcServices extends RSRpcServices implements
|
|||
RegionState.State newState = RegionState.State.convert(s.getState());
|
||||
LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info,
|
||||
prevState.getState(), newState);
|
||||
Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, System.currentTimeMillis());
|
||||
Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info,
|
||||
EnvironmentEdgeManager.currentTime());
|
||||
metaPut.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
|
||||
Bytes.toBytes(newState.name()));
|
||||
List<Put> putList = new ArrayList<>();
|
||||
|
|
|
@ -512,8 +512,7 @@ public class ServerManager {
|
|||
ZKWatcher zkw = master.getZooKeeper();
|
||||
int onlineServersCt;
|
||||
while ((onlineServersCt = onlineServers.size()) > 0){
|
||||
|
||||
if (System.currentTimeMillis() > (previousLogTime + 1000)) {
|
||||
if (EnvironmentEdgeManager.currentTime() > (previousLogTime + 1000)) {
|
||||
Set<ServerName> remainingServers = onlineServers.keySet();
|
||||
synchronized (onlineServers) {
|
||||
if (remainingServers.size() == 1 && remainingServers.contains(sn)) {
|
||||
|
@ -530,7 +529,7 @@ public class ServerManager {
|
|||
sb.append(key);
|
||||
}
|
||||
LOG.info("Waiting on regionserver(s) " + sb.toString());
|
||||
previousLogTime = System.currentTimeMillis();
|
||||
previousLogTime = EnvironmentEdgeManager.currentTime();
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -703,8 +702,8 @@ public class ServerManager {
|
|||
if (timeout < 0) {
|
||||
return;
|
||||
}
|
||||
long expiration = timeout + System.currentTimeMillis();
|
||||
while (System.currentTimeMillis() < expiration) {
|
||||
long expiration = timeout + EnvironmentEdgeManager.currentTime();
|
||||
while (EnvironmentEdgeManager.currentTime() < expiration) {
|
||||
try {
|
||||
RegionInfo rsRegion = ProtobufUtil.toRegionInfo(FutureUtils
|
||||
.get(
|
||||
|
@ -775,7 +774,7 @@ public class ServerManager {
|
|||
maxToStart = Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
long now = System.currentTimeMillis();
|
||||
long now = EnvironmentEdgeManager.currentTime();
|
||||
final long startTime = now;
|
||||
long slept = 0;
|
||||
long lastLogTime = 0;
|
||||
|
@ -808,7 +807,7 @@ public class ServerManager {
|
|||
// We sleep for some time
|
||||
final long sleepTime = 50;
|
||||
Thread.sleep(sleepTime);
|
||||
now = System.currentTimeMillis();
|
||||
now = EnvironmentEdgeManager.currentTime();
|
||||
slept = now - startTime;
|
||||
|
||||
oldCount = count;
|
||||
|
|
|
@ -2230,7 +2230,7 @@ public class AssignmentManager {
|
|||
private void acceptPlan(final HashMap<RegionInfo, RegionStateNode> regions,
|
||||
final Map<ServerName, List<RegionInfo>> plan) throws HBaseIOException {
|
||||
final ProcedureEvent<?>[] events = new ProcedureEvent[regions.size()];
|
||||
final long st = System.currentTimeMillis();
|
||||
final long st = EnvironmentEdgeManager.currentTime();
|
||||
|
||||
if (plan.isEmpty()) {
|
||||
throw new HBaseIOException("unable to compute plans for regions=" + regions.size());
|
||||
|
@ -2256,7 +2256,7 @@ public class AssignmentManager {
|
|||
}
|
||||
ProcedureEvent.wakeEvents(getProcedureScheduler(), events);
|
||||
|
||||
final long et = System.currentTimeMillis();
|
||||
final long et = EnvironmentEdgeManager.currentTime();
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("ASSIGN ACCEPT " + events.length + " -> " +
|
||||
StringUtils.humanTimeDiff(et - st));
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue