HBASE-26582 Prune use of Random and SecureRandom objects (#4118)
Avoid the pattern where a Random object is allocated, used once or twice, and then left for GC. This pattern triggers warnings from some static analysis tools because this pattern leads to poor effective randomness. In a few cases we were legitimately suffering from this issue; in others a change is still good to reduce noise in analysis results. Use ThreadLocalRandom where there is no requirement to set the seed to gain good reuse. Where useful relax use of SecureRandom to simply Random or ThreadLocalRandom, which are unlikely to block if the system entropy pool is low, if we don't need crypographically strong randomness for the use case. The exception to this is normalization of use of Bytes#random to fill byte arrays with randomness. Because Bytes#random may be used to generate key material it must be backed by SecureRandom. Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
parent
98836fb2b4
commit
300f9b9576
|
@ -34,7 +34,6 @@ import java.util.List;
|
|||
import java.util.Random;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -44,6 +43,7 @@ import org.apache.hadoop.hbase.io.asyncfs.monitor.ExcludeDatanodeManager;
|
|||
import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
|
@ -57,7 +57,6 @@ import org.junit.experimental.categories.Category;
|
|||
import org.junit.rules.TestName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.Channel;
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
|
||||
|
@ -72,13 +71,9 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
|
|||
HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutput.class);
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutput.class);
|
||||
|
||||
private static DistributedFileSystem FS;
|
||||
|
||||
private static EventLoopGroup EVENT_LOOP_GROUP;
|
||||
|
||||
private static Class<? extends Channel> CHANNEL_CLASS;
|
||||
|
||||
private static int READ_TIMEOUT_MS = 2000;
|
||||
|
||||
private static StreamSlowMonitor MONITOR;
|
||||
|
@ -104,14 +99,16 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
|
|||
shutdownMiniDFSCluster();
|
||||
}
|
||||
|
||||
private static final Random RNG = new Random(); // This test depends on Random#setSeed
|
||||
|
||||
static void writeAndVerify(FileSystem fs, Path f, AsyncFSOutput out)
|
||||
throws IOException, InterruptedException, ExecutionException {
|
||||
List<CompletableFuture<Long>> futures = new ArrayList<>();
|
||||
byte[] b = new byte[10];
|
||||
Random rand = new Random(12345);
|
||||
// test pipelined flush
|
||||
RNG.setSeed(12345);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
rand.nextBytes(b);
|
||||
RNG.nextBytes(b);
|
||||
out.write(b);
|
||||
futures.add(out.flush(false));
|
||||
futures.add(out.flush(false));
|
||||
|
@ -123,11 +120,11 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
|
|||
out.close();
|
||||
assertEquals(b.length * 10, fs.getFileStatus(f).getLen());
|
||||
byte[] actual = new byte[b.length];
|
||||
rand.setSeed(12345);
|
||||
RNG.setSeed(12345);
|
||||
try (FSDataInputStream in = fs.open(f)) {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
in.readFully(actual);
|
||||
rand.nextBytes(b);
|
||||
RNG.nextBytes(b);
|
||||
assertArrayEquals(b, actual);
|
||||
}
|
||||
assertEquals(-1, in.read());
|
||||
|
@ -150,7 +147,7 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
|
|||
FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true,
|
||||
false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR);
|
||||
byte[] b = new byte[10];
|
||||
ThreadLocalRandom.current().nextBytes(b);
|
||||
Bytes.random(b);
|
||||
out.write(b, 0, b.length);
|
||||
out.flush(false).get();
|
||||
// restart one datanode which causes one connection broken
|
||||
|
@ -260,7 +257,7 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
|
|||
FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true,
|
||||
false, (short) 3, 1024 * 1024 * 1024, eventLoop, CHANNEL_CLASS, MONITOR);
|
||||
byte[] b = new byte[50 * 1024 * 1024];
|
||||
ThreadLocalRandom.current().nextBytes(b);
|
||||
Bytes.random(b);
|
||||
out.write(b);
|
||||
out.flush(false);
|
||||
assertEquals(b.length, out.flush(false).get().longValue());
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.io.asyncfs;
|
||||
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
|
@ -29,12 +28,12 @@ import java.util.Map;
|
|||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
@ -57,7 +56,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
|
|||
import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
|
||||
import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
|
||||
|
||||
|
||||
/**
|
||||
* Testcase for HBASE-26679, here we introduce a separate test class and not put the testcase in
|
||||
* {@link TestFanOutOneBlockAsyncDFSOutput} because we will send heartbeat to DN when there is no
|
||||
|
@ -191,7 +189,7 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase {
|
|||
});
|
||||
|
||||
byte[] b = new byte[10];
|
||||
ThreadLocalRandom.current().nextBytes(b);
|
||||
Bytes.random(b);
|
||||
OUT.write(b, 0, b.length);
|
||||
CompletableFuture<Long> future = OUT.flush(false);
|
||||
/**
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Random;
|
||||
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
@ -33,12 +32,12 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator {
|
|||
|
||||
private static final PerClientRandomNonceGenerator INST = new PerClientRandomNonceGenerator();
|
||||
|
||||
private final Random rdm = new Random();
|
||||
private final long clientId;
|
||||
|
||||
private PerClientRandomNonceGenerator() {
|
||||
byte[] clientIdBase = ClientIdGenerator.generateClientId();
|
||||
this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) + rdm.nextInt();
|
||||
this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) +
|
||||
ThreadLocalRandom.current().nextInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,7 +49,7 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator {
|
|||
public long newNonce() {
|
||||
long result = HConstants.NO_NONCE;
|
||||
do {
|
||||
result = rdm.nextLong();
|
||||
result = ThreadLocalRandom.current().nextLong();
|
||||
} while (result == HConstants.NO_NONCE);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -35,7 +35,6 @@ import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferExce
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
public class RandomRowFilter extends FilterBase {
|
||||
protected static final Random random = new Random();
|
||||
|
||||
protected float chance;
|
||||
protected boolean filterOutRow;
|
||||
|
@ -104,7 +103,7 @@ public class RandomRowFilter extends FilterBase {
|
|||
filterOutRow = false;
|
||||
} else {
|
||||
// roll the dice
|
||||
filterOutRow = !(random.nextFloat() < chance);
|
||||
filterOutRow = !(ThreadLocalRandom.current().nextFloat() < chance);
|
||||
}
|
||||
return filterOutRow;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.ByteArrayOutputStream;
|
|||
import java.io.IOException;
|
||||
import java.security.Key;
|
||||
import java.security.KeyException;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.Properties;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import org.apache.commons.crypto.cipher.CryptoCipherFactory;
|
||||
|
@ -37,7 +36,6 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
import org.apache.yetus.audience.InterfaceStability;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.EncryptionProtos;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
|
||||
|
@ -50,8 +48,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
|
|||
public final class EncryptionUtil {
|
||||
static private final Logger LOG = LoggerFactory.getLogger(EncryptionUtil.class);
|
||||
|
||||
static private final SecureRandom RNG = new SecureRandom();
|
||||
|
||||
/**
|
||||
* Private constructor to keep this class from being instantiated.
|
||||
*/
|
||||
|
@ -96,7 +92,7 @@ public final class EncryptionUtil {
|
|||
byte[] iv = null;
|
||||
if (cipher.getIvLength() > 0) {
|
||||
iv = new byte[cipher.getIvLength()];
|
||||
RNG.nextBytes(iv);
|
||||
Bytes.secureRandom(iv);
|
||||
builder.setIv(UnsafeByteOperations.unsafeWrap(iv));
|
||||
}
|
||||
byte[] keyBytes = key.getEncoded();
|
||||
|
@ -286,7 +282,7 @@ public final class EncryptionUtil {
|
|||
* @throws IOException if create CryptoAES failed
|
||||
*/
|
||||
public static CryptoAES createCryptoAES(RPCProtos.CryptoCipherMeta cryptoCipherMeta,
|
||||
Configuration conf) throws IOException {
|
||||
Configuration conf) throws IOException {
|
||||
Properties properties = new Properties();
|
||||
// the property for cipher class
|
||||
properties.setProperty(CryptoCipherFactory.CLASSES_KEY,
|
||||
|
|
|
@ -22,7 +22,8 @@ package org.apache.hadoop.hbase.slowlog;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
|
@ -49,8 +50,6 @@ public class SlowLogTableAccessor {
|
|||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SlowLogTableAccessor.class);
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
private static Connection connection;
|
||||
|
||||
/**
|
||||
|
@ -139,7 +138,7 @@ public class SlowLogTableAccessor {
|
|||
String lastFiveDig =
|
||||
hashcode.substring((hashcode.length() > 5) ? (hashcode.length() - 5) : 0);
|
||||
if (lastFiveDig.startsWith("-")) {
|
||||
lastFiveDig = String.valueOf(RANDOM.nextInt(99999));
|
||||
lastFiveDig = String.valueOf(ThreadLocalRandom.current().nextInt(99999));
|
||||
}
|
||||
final long currentTime = EnvironmentEdgeManager.currentTime();
|
||||
final String timeAndHashcode = currentTime + lastFiveDig;
|
||||
|
|
|
@ -23,7 +23,7 @@ import static org.junit.Assert.fail;
|
|||
|
||||
import java.security.Key;
|
||||
import java.security.KeyException;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
|
@ -110,7 +110,7 @@ public class TestEncryptionUtil {
|
|||
|
||||
// generate a test key
|
||||
byte[] keyBytes = new byte[AES.KEY_LENGTH];
|
||||
new SecureRandom().nextBytes(keyBytes);
|
||||
Bytes.secureRandom(keyBytes);
|
||||
String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
|
||||
Key key = new SecretKeySpec(keyBytes, algorithm);
|
||||
|
||||
|
@ -152,7 +152,7 @@ public class TestEncryptionUtil {
|
|||
|
||||
// generate a test key
|
||||
byte[] keyBytes = new byte[AES.KEY_LENGTH];
|
||||
new SecureRandom().nextBytes(keyBytes);
|
||||
Bytes.secureRandom(keyBytes);
|
||||
String algorithm =
|
||||
conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
|
||||
Key key = new SecretKeySpec(keyBytes, algorithm);
|
||||
|
@ -189,7 +189,7 @@ public class TestEncryptionUtil {
|
|||
|
||||
// generate a test key
|
||||
byte[] keyBytes = new byte[AES.KEY_LENGTH];
|
||||
new SecureRandom().nextBytes(keyBytes);
|
||||
Bytes.secureRandom(keyBytes);
|
||||
String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
|
||||
Key key = new SecretKeySpec(keyBytes, algorithm);
|
||||
|
||||
|
@ -214,7 +214,7 @@ public class TestEncryptionUtil {
|
|||
|
||||
// generate a test key
|
||||
byte[] keyBytes = new byte[AES.KEY_LENGTH];
|
||||
new SecureRandom().nextBytes(keyBytes);
|
||||
Bytes.secureRandom(keyBytes);
|
||||
String algorithm =
|
||||
conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
|
||||
Key key = new SecretKeySpec(keyBytes, algorithm);
|
||||
|
|
|
@ -25,11 +25,9 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
|
|
|
@ -20,11 +20,9 @@ package org.apache.hadoop.hbase.util;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
|
|
|
@ -317,7 +317,7 @@ public final class Encryption {
|
|||
*/
|
||||
private static byte[] generateSecretKey(String algorithm, int keyLengthBytes, char[] password) {
|
||||
byte[] salt = new byte[keyLengthBytes];
|
||||
Bytes.random(salt);
|
||||
Bytes.secureRandom(salt);
|
||||
PBEKeySpec spec = new PBEKeySpec(password, salt, 10000, keyLengthBytes*8);
|
||||
try {
|
||||
return SecretKeyFactory.getInstance(algorithm).generateSecret(spec).getEncoded();
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.io.ByteArrayInputStream;
|
|||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
|
||||
|
@ -110,7 +109,7 @@ public class HFileBlockDefaultEncodingContext implements HFileBlockEncodingConte
|
|||
if (cryptoContext != Encryption.Context.NONE) {
|
||||
cryptoByteStream = new ByteArrayOutputStream();
|
||||
iv = new byte[cryptoContext.getCipher().getIvLength()];
|
||||
new SecureRandom().nextBytes(iv);
|
||||
Bytes.secureRandom(iv);
|
||||
}
|
||||
|
||||
dummyHeader = Preconditions.checkNotNull(headerBytes,
|
||||
|
|
|
@ -38,6 +38,8 @@ import java.util.Collections;
|
|||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellComparator;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
@ -2555,11 +2557,16 @@ public class Bytes implements Comparable<Bytes> {
|
|||
Arrays.fill(b, offset, offset + length, (byte) 0);
|
||||
}
|
||||
|
||||
private static final SecureRandom RNG = new SecureRandom();
|
||||
// Pseudorandom random number generator, do not use SecureRandom here
|
||||
private static final Random RNG = new Random();
|
||||
|
||||
/**
|
||||
* Fill given array with random bytes.
|
||||
* @param b array which needs to be filled with random bytes
|
||||
* <p>
|
||||
* If you want random bytes generated by a strong source of randomness use {@link
|
||||
* Bytes#secureRandom(byte[])}.
|
||||
* @param b array which needs to be filled with random bytes
|
||||
*/
|
||||
public static void random(byte[] b) {
|
||||
RNG.nextBytes(b);
|
||||
|
@ -2567,9 +2574,12 @@ public class Bytes implements Comparable<Bytes> {
|
|||
|
||||
/**
|
||||
* Fill given array with random bytes at the specified position.
|
||||
* @param b
|
||||
* @param offset
|
||||
* @param length
|
||||
* <p>
|
||||
* If you want random bytes generated by a strong source of randomness use {@link
|
||||
* Bytes#secureRandom(byte[], int, int)}.
|
||||
* @param b array which needs to be filled with random bytes
|
||||
* @param offset staring offset in array
|
||||
* @param length number of bytes to fill
|
||||
*/
|
||||
public static void random(byte[] b, int offset, int length) {
|
||||
checkPositionIndex(offset, b.length, "offset");
|
||||
|
@ -2580,6 +2590,33 @@ public class Bytes implements Comparable<Bytes> {
|
|||
System.arraycopy(buf, 0, b, offset, length);
|
||||
}
|
||||
|
||||
// Bytes.secureRandom may be used to create key material.
|
||||
private static final SecureRandom SECURE_RNG = new SecureRandom();
|
||||
|
||||
/**
|
||||
* Fill given array with random bytes using a strong random number generator.
|
||||
* @param b array which needs to be filled with random bytes
|
||||
*/
|
||||
public static void secureRandom(byte[] b) {
|
||||
SECURE_RNG.nextBytes(b);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill given array with random bytes at the specified position using a strong random number
|
||||
* generator.
|
||||
* @param b array which needs to be filled with random bytes
|
||||
* @param offset staring offset in array
|
||||
* @param length number of bytes to fill
|
||||
*/
|
||||
public static void secureRandom(byte[] b, int offset, int length) {
|
||||
checkPositionIndex(offset, b.length, "offset");
|
||||
checkArgument(length > 0, "length must be greater than 0");
|
||||
checkPositionIndex(offset + length, b.length, "offset + length");
|
||||
byte[] buf = new byte[length];
|
||||
SECURE_RNG.nextBytes(buf);
|
||||
System.arraycopy(buf, 0, b, offset, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a max byte array with the specified max byte count
|
||||
* @param maxByteCount the length of returned byte array
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.net.ServerSocket;
|
|||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
@ -269,10 +268,7 @@ public class HBaseCommonTestingUtility {
|
|||
return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
|
||||
}
|
||||
|
||||
// Support for Random Port Generation.
|
||||
static Random random = new Random();
|
||||
|
||||
private static final PortAllocator portAllocator = new PortAllocator(random);
|
||||
private static final PortAllocator portAllocator = new PortAllocator();
|
||||
|
||||
public static int randomFreePort() {
|
||||
return portAllocator.randomFreePort();
|
||||
|
@ -285,11 +281,9 @@ public class HBaseCommonTestingUtility {
|
|||
/** A set of ports that have been claimed using {@link #randomFreePort()}. */
|
||||
private final Set<Integer> takenRandomPorts = new HashSet<>();
|
||||
|
||||
private final Random random;
|
||||
private final AvailablePortChecker portChecker;
|
||||
|
||||
public PortAllocator(Random random) {
|
||||
this.random = random;
|
||||
public PortAllocator() {
|
||||
this.portChecker = new AvailablePortChecker() {
|
||||
@Override
|
||||
public boolean available(int port) {
|
||||
|
@ -304,8 +298,7 @@ public class HBaseCommonTestingUtility {
|
|||
};
|
||||
}
|
||||
|
||||
public PortAllocator(Random random, AvailablePortChecker portChecker) {
|
||||
this.random = random;
|
||||
public PortAllocator(AvailablePortChecker portChecker) {
|
||||
this.portChecker = portChecker;
|
||||
}
|
||||
|
||||
|
@ -336,7 +329,7 @@ public class HBaseCommonTestingUtility {
|
|||
*/
|
||||
private int randomPort() {
|
||||
return MIN_RANDOM_PORT
|
||||
+ random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
|
||||
+ ThreadLocalRandom.current().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
|
||||
}
|
||||
|
||||
interface AvailablePortChecker {
|
||||
|
|
|
@ -48,9 +48,9 @@ public class TestEncryption {
|
|||
@Test
|
||||
public void testSmallBlocks() throws Exception {
|
||||
byte[] key = new byte[16];
|
||||
Bytes.random(key);
|
||||
Bytes.secureRandom(key);
|
||||
byte[] iv = new byte[16];
|
||||
Bytes.random(iv);
|
||||
Bytes.secureRandom(iv);
|
||||
for (int size: new int[] { 4, 8, 16, 32, 64, 128, 256, 512 }) {
|
||||
checkTransformSymmetry(key, iv, getRandomBlock(size));
|
||||
}
|
||||
|
@ -59,9 +59,9 @@ public class TestEncryption {
|
|||
@Test
|
||||
public void testLargeBlocks() throws Exception {
|
||||
byte[] key = new byte[16];
|
||||
Bytes.random(key);
|
||||
Bytes.secureRandom(key);
|
||||
byte[] iv = new byte[16];
|
||||
Bytes.random(iv);
|
||||
Bytes.secureRandom(iv);
|
||||
for (int size: new int[] { 256 * 1024, 512 * 1024, 1024 * 1024 }) {
|
||||
checkTransformSymmetry(key, iv, getRandomBlock(size));
|
||||
}
|
||||
|
@ -70,9 +70,9 @@ public class TestEncryption {
|
|||
@Test
|
||||
public void testOddSizedBlocks() throws Exception {
|
||||
byte[] key = new byte[16];
|
||||
Bytes.random(key);
|
||||
Bytes.secureRandom(key);
|
||||
byte[] iv = new byte[16];
|
||||
Bytes.random(iv);
|
||||
Bytes.secureRandom(iv);
|
||||
for (int size: new int[] { 3, 7, 11, 23, 47, 79, 119, 175 }) {
|
||||
checkTransformSymmetry(key, iv, getRandomBlock(size));
|
||||
}
|
||||
|
@ -81,9 +81,9 @@ public class TestEncryption {
|
|||
@Test
|
||||
public void testTypicalHFileBlocks() throws Exception {
|
||||
byte[] key = new byte[16];
|
||||
Bytes.random(key);
|
||||
Bytes.secureRandom(key);
|
||||
byte[] iv = new byte[16];
|
||||
Bytes.random(iv);
|
||||
Bytes.secureRandom(iv);
|
||||
for (int size: new int[] { 4 * 1024, 8 * 1024, 64 * 1024, 128 * 1024 }) {
|
||||
checkTransformSymmetry(key, iv, getRandomBlock(size));
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
import java.math.BigInteger;
|
||||
import java.util.Arrays;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
|
@ -81,9 +81,8 @@ public class TestLRUDictionary {
|
|||
|
||||
@Test
|
||||
public void testBasic() {
|
||||
Random rand = new Random();
|
||||
byte[] testBytes = new byte[10];
|
||||
rand.nextBytes(testBytes);
|
||||
Bytes.random(testBytes);
|
||||
|
||||
// Verify that our randomly generated array doesn't exist in the dictionary
|
||||
assertEquals(-1, testee.findEntry(testBytes, 0, testBytes.length));
|
||||
|
|
|
@ -36,7 +36,7 @@ public class LoadTestKVGenerator {
|
|||
private static int logLimit = 10;
|
||||
|
||||
/** A random number generator for determining value size */
|
||||
private Random randomForValueSize = new Random();
|
||||
private Random randomForValueSize = new Random(); // Seed may be set with Random#setSeed
|
||||
|
||||
private final int minValueSize;
|
||||
private final int maxValueSize;
|
||||
|
|
|
@ -21,8 +21,11 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
|
@ -54,7 +57,7 @@ public class TestAvlUtil {
|
|||
final TreeMap<Integer, Object> treeMap = new TreeMap<>();
|
||||
TestAvlNode root = null;
|
||||
|
||||
final Random rand = new Random();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < NELEM; ++i) {
|
||||
int key = rand.nextInt(MAX_KEY);
|
||||
if (AvlTree.get(root, key, KEY_COMPARATOR) != null) {
|
||||
|
|
|
@ -24,7 +24,7 @@ import static org.junit.Assert.fail;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
||||
|
@ -39,8 +39,6 @@ import org.junit.experimental.categories.Category;
|
|||
@Category({ MiscTests.class, SmallTests.class })
|
||||
public class TestByteBufferArray {
|
||||
|
||||
private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTime());
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestByteBufferArray.class);
|
||||
|
@ -87,7 +85,7 @@ public class TestByteBufferArray {
|
|||
|
||||
private ByteBuff createByteBuff(int len) {
|
||||
assert len >= 0;
|
||||
int pos = len == 0 ? 0 : RANDOM.nextInt(len);
|
||||
int pos = len == 0 ? 0 : ThreadLocalRandom.current().nextInt(len);
|
||||
ByteBuff b = ByteBuff.wrap(ByteBuffer.allocate(2 * len));
|
||||
b.position(pos).limit(pos + len);
|
||||
return b;
|
||||
|
|
|
@ -38,6 +38,8 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
|
@ -385,14 +387,11 @@ public class TestBytes {
|
|||
|
||||
@Test
|
||||
public void testToStringBytesBinaryReversible() {
|
||||
// let's run test with 1000 randomly generated byte arrays
|
||||
Random rand = new Random(EnvironmentEdgeManager.currentTime());
|
||||
byte[] randomBytes = new byte[1000];
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
rand.nextBytes(randomBytes);
|
||||
Bytes.random(randomBytes);
|
||||
verifyReversibleForBytes(randomBytes);
|
||||
}
|
||||
|
||||
// some specific cases
|
||||
verifyReversibleForBytes(new byte[] {});
|
||||
verifyReversibleForBytes(new byte[] {'\\', 'x', 'A', 'D'});
|
||||
|
@ -597,10 +596,10 @@ public class TestBytes {
|
|||
List<byte[]> testByteData = new ArrayList<>(5);
|
||||
testByteData.addAll(Arrays.asList(new byte[0], new byte[1], new byte[10],
|
||||
new byte[] { 1, 2, 3, 4, 5 }, new byte[] { (byte) 0xFF }));
|
||||
Random r = new Random();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
byte[] bytes = new byte[r.nextInt(100)];
|
||||
r.nextBytes(bytes);
|
||||
byte[] bytes = new byte[rand.nextInt(100)];
|
||||
Bytes.random(bytes);
|
||||
testByteData.add(bytes);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import static org.junit.Assert.assertNotEquals;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
@ -41,13 +40,13 @@ public class TestCompatibilitySingletonFactory {
|
|||
HBaseClassTestRule.forClass(TestCompatibilitySingletonFactory.class);
|
||||
|
||||
private static final int ITERATIONS = 100000;
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
private class TestCompatibilitySingletonFactoryCallable implements Callable<String> {
|
||||
|
||||
@Override
|
||||
public String call() throws Exception {
|
||||
Thread.sleep(RANDOM.nextInt(10));
|
||||
// XXX: Why is this sleep here?
|
||||
Thread.sleep(10);
|
||||
RandomStringGenerator
|
||||
instance =
|
||||
CompatibilitySingletonFactory.getInstance(RandomStringGenerator.class);
|
||||
|
|
|
@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.http;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
import javax.servlet.FilterConfig;
|
||||
|
@ -114,12 +116,12 @@ public class TestServletFilter extends HttpServerFunctionalTest {
|
|||
final String hadooplogoURL = "/static/hadoop-logo.jpg";
|
||||
|
||||
final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL};
|
||||
final Random ran = new Random();
|
||||
final Random rand = ThreadLocalRandom.current();
|
||||
final int[] sequence = new int[50];
|
||||
|
||||
//generate a random sequence and update counts
|
||||
for(int i = 0; i < sequence.length; i++) {
|
||||
sequence[i] = ran.nextInt(urls.length);
|
||||
sequence[i] = rand.nextInt(urls.length);
|
||||
}
|
||||
|
||||
//access the urls as the sequence
|
||||
|
|
|
@ -23,10 +23,8 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||
|
@ -258,7 +256,8 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
return null;
|
||||
}
|
||||
ArrayList<String> namespaceList = new ArrayList<>(namespaceMap.keySet());
|
||||
String randomKey = namespaceList.get(RandomUtils.nextInt(0, namespaceList.size()));
|
||||
String randomKey = namespaceList.get(ThreadLocalRandom.current()
|
||||
.nextInt(namespaceList.size()));
|
||||
NamespaceDescriptor randomNsd = namespaceMap.get(randomKey);
|
||||
// remove from namespaceMap
|
||||
namespaceMap.remove(randomKey);
|
||||
|
@ -307,12 +306,12 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
|
||||
private NamespaceDescriptor createNamespaceDesc() {
|
||||
String namespaceName = "itnamespace" + String.format("%010d",
|
||||
RandomUtils.nextInt());
|
||||
ThreadLocalRandom.current().nextInt());
|
||||
NamespaceDescriptor nsd = NamespaceDescriptor.create(namespaceName).build();
|
||||
|
||||
nsd.setConfiguration(
|
||||
nsTestConfigKey,
|
||||
String.format("%010d", RandomUtils.nextInt()));
|
||||
String.format("%010d", ThreadLocalRandom.current().nextInt()));
|
||||
return nsd;
|
||||
}
|
||||
}
|
||||
|
@ -332,7 +331,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
NamespaceDescriptor modifiedNsd = NamespaceDescriptor.create(namespaceName).build();
|
||||
String nsValueNew;
|
||||
do {
|
||||
nsValueNew = String.format("%010d", RandomUtils.nextInt());
|
||||
nsValueNew = String.format("%010d", ThreadLocalRandom.current().nextInt());
|
||||
} while (selected.getConfigurationValue(nsTestConfigKey).equals(nsValueNew));
|
||||
modifiedNsd.setConfiguration(nsTestConfigKey, nsValueNew);
|
||||
admin.modifyNamespace(modifiedNsd);
|
||||
|
@ -398,8 +397,8 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
return null;
|
||||
}
|
||||
ArrayList<TableName> tableList = new ArrayList<>(tableMap.keySet());
|
||||
TableName randomKey = tableList.get(RandomUtils.nextInt(0, tableList.size()));
|
||||
TableDescriptor randomTd = tableMap.remove(randomKey);
|
||||
TableName key = tableList.get(ThreadLocalRandom.current().nextInt(tableList.size()));
|
||||
TableDescriptor randomTd = tableMap.remove(key);
|
||||
return randomTd;
|
||||
}
|
||||
}
|
||||
|
@ -437,8 +436,9 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
private TableDescriptor createTableDesc() {
|
||||
String tableName = String.format("ittable-%010d", RandomUtils.nextInt());
|
||||
String familyName = "cf-" + Math.abs(RandomUtils.nextInt());
|
||||
String tableName = String.format("ittable-%010d",
|
||||
ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE));
|
||||
String familyName = "cf-" + ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE);
|
||||
return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
|
||||
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName))
|
||||
.build();
|
||||
|
@ -582,8 +582,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
LOG.info("No column families in table: " + td);
|
||||
return null;
|
||||
}
|
||||
ColumnFamilyDescriptor randomCfd = families[RandomUtils.nextInt(0, families.length)];
|
||||
return randomCfd;
|
||||
return families[ThreadLocalRandom.current().nextInt(families.length)];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -600,7 +599,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
try {
|
||||
ColumnFamilyDescriptor cfd = createFamilyDesc();
|
||||
if (selected.hasColumnFamily(cfd.getName())){
|
||||
LOG.info(new String(cfd.getName()) + " already exists in table "
|
||||
LOG.info(Bytes.toString(cfd.getName()) + " already exists in table "
|
||||
+ selected.getTableName());
|
||||
return;
|
||||
}
|
||||
|
@ -625,7 +624,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
private ColumnFamilyDescriptor createFamilyDesc() {
|
||||
String familyName = String.format("cf-%010d", RandomUtils.nextInt());
|
||||
String familyName = String.format("cf-%010d", ThreadLocalRandom.current().nextInt());
|
||||
return ColumnFamilyDescriptorBuilder.of(familyName);
|
||||
}
|
||||
}
|
||||
|
@ -644,7 +643,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
}
|
||||
|
||||
Admin admin = connection.getAdmin();
|
||||
int versions = RandomUtils.nextInt(0, 10) + 3;
|
||||
int versions = ThreadLocalRandom.current().nextInt(10) + 3;
|
||||
try {
|
||||
TableName tableName = selected.getTableName();
|
||||
LOG.info("Altering versions of column family: " + columnDesc + " to: " + versions +
|
||||
|
@ -700,7 +699,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
// possible DataBlockEncoding ids
|
||||
DataBlockEncoding[] possibleIds = {DataBlockEncoding.NONE, DataBlockEncoding.PREFIX,
|
||||
DataBlockEncoding.DIFF, DataBlockEncoding.FAST_DIFF, DataBlockEncoding.ROW_INDEX_V1};
|
||||
short id = possibleIds[RandomUtils.nextInt(0, possibleIds.length)].getId();
|
||||
short id = possibleIds[ThreadLocalRandom.current().nextInt(possibleIds.length)].getId();
|
||||
LOG.info("Altering encoding of column family: " + columnDesc + " to: " + id +
|
||||
" in table: " + tableName);
|
||||
|
||||
|
@ -788,17 +787,18 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
int average_rows = 1;
|
||||
int numRows = average_rows * numRegions;
|
||||
LOG.info("Adding " + numRows + " rows to table: " + selected);
|
||||
byte[] value = new byte[10];
|
||||
for (int i = 0; i < numRows; i++){
|
||||
// nextInt(Integer.MAX_VALUE)) to return positive numbers only
|
||||
byte[] rowKey = Bytes.toBytes(
|
||||
"row-" + String.format("%010d", RandomUtils.nextInt()));
|
||||
"row-" + String.format("%010d", ThreadLocalRandom.current().nextInt()));
|
||||
ColumnFamilyDescriptor cfd = selectFamily(selected);
|
||||
if (cfd == null){
|
||||
return;
|
||||
}
|
||||
byte[] family = cfd.getName();
|
||||
byte[] qualifier = Bytes.toBytes("col-" + RandomUtils.nextInt() % 10);
|
||||
byte[] value = Bytes.toBytes("val-" + RandomStringUtils.randomAlphanumeric(10));
|
||||
byte[] qualifier = Bytes.toBytes("col-" + ThreadLocalRandom.current().nextInt(10));
|
||||
Bytes.random(value);
|
||||
Put put = new Put(rowKey);
|
||||
put.addColumn(family, qualifier, value);
|
||||
table.put(put);
|
||||
|
@ -842,7 +842,8 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
public void run() {
|
||||
while (running.get()) {
|
||||
// select random action
|
||||
ACTION selectedAction = ACTION.values()[RandomUtils.nextInt() % ACTION.values().length];
|
||||
ACTION selectedAction =
|
||||
ACTION.values()[ThreadLocalRandom.current().nextInt(ACTION.values().length)];
|
||||
this.action = selectedAction;
|
||||
LOG.info("Performing Action: " + selectedAction);
|
||||
|
||||
|
@ -875,7 +876,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
break;
|
||||
case DELETE_TABLE:
|
||||
// reduce probability of deleting table to 20%
|
||||
if (RandomUtils.nextInt(0, 100) < 20) {
|
||||
if (ThreadLocalRandom.current().nextInt(100) < 20) {
|
||||
new DeleteTableAction().perform();
|
||||
}
|
||||
break;
|
||||
|
@ -884,7 +885,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
break;
|
||||
case DELETE_COLUMNFAMILY:
|
||||
// reduce probability of deleting column family to 20%
|
||||
if (RandomUtils.nextInt(0, 100) < 20) {
|
||||
if (ThreadLocalRandom.current().nextInt(100) < 20) {
|
||||
new DeleteColumnFamilyAction().perform();
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -26,15 +26,15 @@ import java.util.LinkedList;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.HBaseCluster;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.IntegrationTestBase;
|
||||
import org.apache.hadoop.hbase.IntegrationTestingUtility;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
|
@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -286,9 +287,10 @@ public abstract class Action {
|
|||
List<byte[]> regions = new LinkedList<>(serverLoad.getRegionMetrics().keySet());
|
||||
int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size());
|
||||
getLogger().debug("Removing {} regions from {}", victimRegionCount, sn);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < victimRegionCount; ++i) {
|
||||
int victimIx = RandomUtils.nextInt(0, regions.size());
|
||||
String regionId = HRegionInfo.encodeRegionName(regions.remove(victimIx));
|
||||
int victimIx = rand.nextInt(regions.size());
|
||||
String regionId = RegionInfo.encodeRegionName(regions.remove(victimIx));
|
||||
victimRegions.add(Bytes.toBytes(regionId));
|
||||
}
|
||||
}
|
||||
|
@ -296,13 +298,14 @@ public abstract class Action {
|
|||
getLogger().info("Moving {} regions from {} servers to {} different servers",
|
||||
victimRegions.size(), fromServers.size(), toServers.size());
|
||||
Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (byte[] victimRegion : victimRegions) {
|
||||
// Don't keep moving regions if we're
|
||||
// trying to stop the monkey.
|
||||
if (context.isStopping()) {
|
||||
break;
|
||||
}
|
||||
int targetIx = RandomUtils.nextInt(0, toServers.size());
|
||||
int targetIx = rand.nextInt(toServers.size());
|
||||
admin.move(victimRegion, toServers.get(targetIx));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterUtil;
|
||||
|
@ -49,14 +49,13 @@ public class ChangeBloomFilterAction extends Action {
|
|||
|
||||
@Override
|
||||
public void perform() throws Exception {
|
||||
final Random random = new Random();
|
||||
final BloomType[] bloomArray = BloomType.values();
|
||||
final int bloomArraySize = bloomArray.length;
|
||||
|
||||
getLogger().info("Performing action: Change bloom filter on all columns of table " + tableName);
|
||||
|
||||
modifyAllTableColumns(tableName, (columnName, columnBuilder) -> {
|
||||
BloomType bloomType = bloomArray[random.nextInt(bloomArraySize)];
|
||||
BloomType bloomType = bloomArray[ThreadLocalRandom.current().nextInt(bloomArraySize)];
|
||||
getLogger().debug("Performing action: About to set bloom filter type to "
|
||||
+ bloomType + " on column " + columnName + " of table " + tableName);
|
||||
columnBuilder.setBloomFilterType(bloomType);
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
|
@ -31,12 +32,10 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class ChangeCompressionAction extends Action {
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ChangeCompressionAction.class);
|
||||
|
||||
public ChangeCompressionAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
|
@ -48,16 +47,15 @@ public class ChangeCompressionAction extends Action {
|
|||
// Possible compression algorithms. If an algorithm is not supported,
|
||||
// modifyTable will fail, so there is no harm.
|
||||
Algorithm[] possibleAlgos = Algorithm.values();
|
||||
|
||||
// Since not every compression algorithm is supported,
|
||||
// let's use the same algorithm for all column families.
|
||||
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
// If an unsupported compression algorithm is chosen, pick a different one.
|
||||
// This is to work around the issue that modifyTable() does not throw remote
|
||||
// exception.
|
||||
Algorithm algo;
|
||||
do {
|
||||
algo = possibleAlgos[random.nextInt(possibleAlgos.length)];
|
||||
algo = possibleAlgos[rand.nextInt(possibleAlgos.length)];
|
||||
|
||||
try {
|
||||
Compressor c = algo.getCompressor();
|
||||
|
@ -75,7 +73,7 @@ public class ChangeCompressionAction extends Action {
|
|||
getLogger().debug("Performing action: Changing compression algorithms on "
|
||||
+ tableName.getNameAsString() + " to " + chosenAlgo);
|
||||
modifyAllTableColumns(tableName, columnFamilyDescriptorBuilder -> {
|
||||
if (random.nextBoolean()) {
|
||||
if (rand.nextBoolean()) {
|
||||
columnFamilyDescriptorBuilder.setCompactionCompressionType(chosenAlgo);
|
||||
} else {
|
||||
columnFamilyDescriptorBuilder.setCompressionType(chosenAlgo);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -30,12 +30,10 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class ChangeEncodingAction extends Action {
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ChangeEncodingAction.class);
|
||||
|
||||
public ChangeEncodingAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
|
@ -47,9 +45,8 @@ public class ChangeEncodingAction extends Action {
|
|||
getLogger().debug("Performing action: Changing encodings on " + tableName);
|
||||
// possible DataBlockEncoding id's
|
||||
final int[] possibleIds = {0, 2, 3, 4, 7};
|
||||
|
||||
modifyAllTableColumns(tableName, (columnName, columnBuilder) -> {
|
||||
short id = (short) possibleIds[random.nextInt(possibleIds.length)];
|
||||
short id = (short) possibleIds[ThreadLocalRandom.current().nextInt(possibleIds.length)];
|
||||
DataBlockEncoding encoding = DataBlockEncoding.getEncodingById(id);
|
||||
columnBuilder.setDataBlockEncoding(encoding);
|
||||
getLogger().debug("Set encoding of column family " + columnName + " to: " + encoding);
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -33,7 +33,6 @@ public class ChangeSplitPolicyAction extends Action {
|
|||
private static final Logger LOG = LoggerFactory.getLogger(ChangeSplitPolicyAction.class);
|
||||
private final TableName tableName;
|
||||
private final String[] possiblePolicies;
|
||||
private final Random random;
|
||||
|
||||
public ChangeSplitPolicyAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
|
@ -42,7 +41,6 @@ public class ChangeSplitPolicyAction extends Action {
|
|||
ConstantSizeRegionSplitPolicy.class.getName(),
|
||||
DisabledRegionSplitPolicy.class.getName()
|
||||
};
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
|
@ -53,11 +51,11 @@ public class ChangeSplitPolicyAction extends Action {
|
|||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getAdmin();
|
||||
|
||||
getLogger().info("Performing action: Change split policy of table " + tableName);
|
||||
TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
|
||||
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor);
|
||||
String chosenPolicy = possiblePolicies[random.nextInt(possiblePolicies.length)];
|
||||
String chosenPolicy =
|
||||
possiblePolicies[ThreadLocalRandom.current().nextInt(possiblePolicies.length)];
|
||||
builder.setRegionSplitPolicyClassName(chosenPolicy);
|
||||
getLogger().info("Changing " + tableName + " split policy to " + chosenPolicy);
|
||||
admin.modifyTable(builder.build());
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -33,11 +33,8 @@ public class ChangeVersionsAction extends Action {
|
|||
private static final Logger LOG = LoggerFactory.getLogger(ChangeVersionsAction.class);
|
||||
private final TableName tableName;
|
||||
|
||||
private final Random random;
|
||||
|
||||
public ChangeVersionsAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
|
@ -46,8 +43,7 @@ public class ChangeVersionsAction extends Action {
|
|||
|
||||
@Override
|
||||
public void perform() throws IOException {
|
||||
final int versions = random.nextInt(3) + 1;
|
||||
|
||||
final int versions = ThreadLocalRandom.current().nextInt(3) + 1;
|
||||
getLogger().debug("Performing action: Changing versions on " + tableName + " to " + versions);
|
||||
modifyAllTableColumns(tableName, columnBuilder -> {
|
||||
columnBuilder.setMinVersions(versions).setMaxVersions(versions);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -39,8 +39,7 @@ public class CompactMobAction extends Action {
|
|||
this(-1, tableName, majorRatio);
|
||||
}
|
||||
|
||||
public CompactMobAction(
|
||||
int sleepTime, TableName tableName, float majorRatio) {
|
||||
public CompactMobAction(int sleepTime, TableName tableName, float majorRatio) {
|
||||
this.tableName = tableName;
|
||||
this.majorRatio = (int) (100 * majorRatio);
|
||||
this.sleepTime = sleepTime;
|
||||
|
@ -54,7 +53,7 @@ public class CompactMobAction extends Action {
|
|||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getAdmin();
|
||||
boolean major = RandomUtils.nextInt(0, 100) < majorRatio;
|
||||
boolean major = ThreadLocalRandom.current().nextInt(100) < majorRatio;
|
||||
|
||||
// Don't try the modify if we're stopping
|
||||
if (context.isStopping()) {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.List;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
|
@ -38,13 +38,11 @@ public class CompactRandomRegionOfTableAction extends Action {
|
|||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
|
||||
public CompactRandomRegionOfTableAction(
|
||||
TableName tableName, float majorRatio) {
|
||||
public CompactRandomRegionOfTableAction(TableName tableName, float majorRatio) {
|
||||
this(-1, tableName, majorRatio);
|
||||
}
|
||||
|
||||
public CompactRandomRegionOfTableAction(
|
||||
int sleepTime, TableName tableName, float majorRatio) {
|
||||
public CompactRandomRegionOfTableAction(int sleepTime, TableName tableName, float majorRatio) {
|
||||
this.majorRatio = (int) (100 * majorRatio);
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = tableName;
|
||||
|
@ -58,7 +56,7 @@ public class CompactRandomRegionOfTableAction extends Action {
|
|||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getAdmin();
|
||||
boolean major = RandomUtils.nextInt(0, 100) < majorRatio;
|
||||
boolean major = ThreadLocalRandom.current().nextInt(100) < majorRatio;
|
||||
|
||||
getLogger().info("Performing action: Compact random region of table "
|
||||
+ tableName + ", major=" + major);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -54,7 +54,7 @@ public class CompactTableAction extends Action {
|
|||
public void perform() throws Exception {
|
||||
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
|
||||
Admin admin = util.getAdmin();
|
||||
boolean major = RandomUtils.nextInt(0, 100) < majorRatio;
|
||||
boolean major = ThreadLocalRandom.current().nextInt(100) < majorRatio;
|
||||
|
||||
getLogger().info("Performing action: Compact table " + tableName + ", major=" + major);
|
||||
try {
|
||||
|
|
|
@ -18,7 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||
|
@ -56,15 +57,15 @@ public class CorruptDataFilesAction extends Action {
|
|||
Path rootDir = CommonFSUtils.getRootDir(getConf());
|
||||
Path defaultDir = rootDir.suffix("/data/default");
|
||||
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(defaultDir, true);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
while (iterator.hasNext()){
|
||||
LocatedFileStatus status = iterator.next();
|
||||
if(!HFile.isHFileFormat(fs, status.getPath())){
|
||||
continue;
|
||||
}
|
||||
if(RandomUtils.nextFloat(0, 100) > chance){
|
||||
if ((100 * rand.nextFloat()) > chance){
|
||||
continue;
|
||||
}
|
||||
|
||||
FSDataOutputStream out = fs.create(status.getPath(), true);
|
||||
try {
|
||||
out.write(0);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -35,13 +35,11 @@ public class DecreaseMaxHFileSizeAction extends Action {
|
|||
|
||||
private final long sleepTime;
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
private Admin admin;
|
||||
|
||||
public DecreaseMaxHFileSizeAction(long sleepTime, TableName tableName) {
|
||||
this.sleepTime = sleepTime;
|
||||
this.tableName = tableName;
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
|
@ -75,7 +73,8 @@ public class DecreaseMaxHFileSizeAction extends Action {
|
|||
|
||||
// We don't want to go too far below 1gb.
|
||||
// So go to about 1gb +/- 512 on each side.
|
||||
newValue = Math.max(minFileSize, newValue) - (512 - random.nextInt(1024));
|
||||
newValue = Math.max(minFileSize, newValue) -
|
||||
(512 - ThreadLocalRandom.current().nextInt(1024));
|
||||
|
||||
// Change the table descriptor.
|
||||
TableDescriptor modifiedTable =
|
||||
|
|
|
@ -18,7 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -54,12 +55,13 @@ public class DeleteDataFilesAction extends Action {
|
|||
Path rootDir = CommonFSUtils.getRootDir(getConf());
|
||||
Path defaultDir = rootDir.suffix("/data/default");
|
||||
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(defaultDir, true);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
while (iterator.hasNext()){
|
||||
LocatedFileStatus status = iterator.next();
|
||||
if(!HFile.isHFileFormat(fs, status.getPath())){
|
||||
continue;
|
||||
}
|
||||
if(RandomUtils.nextFloat(0, 100) > chance){
|
||||
if ((100 * rand.nextFloat()) > chance){
|
||||
continue;
|
||||
}
|
||||
fs.delete(status.getPath(), true);
|
||||
|
|
|
@ -21,7 +21,8 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.util.RegionMover;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
@ -47,10 +48,9 @@ public class GracefulRollingRestartRsAction extends RestartActionBaseAction {
|
|||
public void perform() throws Exception {
|
||||
getLogger().info("Performing action: Rolling restarting non-master region servers");
|
||||
List<ServerName> selectedServers = selectServers();
|
||||
|
||||
getLogger().info("Disabling balancer to make unloading possible");
|
||||
setBalancer(false, true);
|
||||
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (ServerName server : selectedServers) {
|
||||
String rsName = server.getAddress().toString();
|
||||
try (RegionMover rm =
|
||||
|
@ -64,7 +64,7 @@ public class GracefulRollingRestartRsAction extends RestartActionBaseAction {
|
|||
} catch (Shell.ExitCodeException e) {
|
||||
getLogger().info("Problem restarting but presume successful; code={}", e.getExitCode(), e);
|
||||
}
|
||||
sleep(RandomUtils.nextInt(0, (int)sleepTime));
|
||||
sleep(rand.nextInt((int)sleepTime));
|
||||
}
|
||||
getLogger().info("Enabling balancer");
|
||||
setBalancer(true, true);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.List;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -61,7 +61,7 @@ public class MergeRandomAdjacentRegionsOfTableAction extends Action {
|
|||
return;
|
||||
}
|
||||
|
||||
int i = RandomUtils.nextInt(0, regions.size() - 1);
|
||||
int i = ThreadLocalRandom.current().nextInt(regions.size() - 1);
|
||||
RegionInfo a = regions.get(i++);
|
||||
RegionInfo b = regions.get(i);
|
||||
getLogger().debug("Merging " + a.getRegionNameAsString() + " and " + b.getRegionNameAsString());
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.io.IOException;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -93,9 +93,10 @@ public class MoveRegionsOfTableAction extends Action {
|
|||
return serversList.toArray(new ServerName[0]);
|
||||
}
|
||||
|
||||
static void moveRegion(Admin admin, ServerName [] servers, RegionInfo regionInfo, Logger logger) {
|
||||
static void moveRegion(Admin admin, ServerName [] servers, RegionInfo regionInfo,
|
||||
Logger logger) {
|
||||
try {
|
||||
ServerName destServerName = servers[RandomUtils.nextInt(0, servers.length)];
|
||||
ServerName destServerName = servers[ThreadLocalRandom.current().nextInt(servers.length)];
|
||||
logger.debug("Moving {} to {}", regionInfo.getRegionNameAsString(), destServerName);
|
||||
admin.move(regionInfo.getEncodedNameAsBytes(), destServerName);
|
||||
} catch (Exception ex) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||
|
@ -39,12 +40,10 @@ public class RemoveColumnAction extends Action {
|
|||
private final TableName tableName;
|
||||
private final Set<String> protectedColumns;
|
||||
private Admin admin;
|
||||
private final Random random;
|
||||
|
||||
public RemoveColumnAction(TableName tableName, Set<String> protectedColumns) {
|
||||
this.tableName = tableName;
|
||||
this.protectedColumns = protectedColumns;
|
||||
random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
|
@ -61,15 +60,15 @@ public class RemoveColumnAction extends Action {
|
|||
public void perform() throws Exception {
|
||||
TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
|
||||
ColumnFamilyDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
|
||||
if (columnDescriptors.length <= (protectedColumns == null ? 1 : protectedColumns.size())) {
|
||||
return;
|
||||
}
|
||||
|
||||
int index = random.nextInt(columnDescriptors.length);
|
||||
int index = rand.nextInt(columnDescriptors.length);
|
||||
while(protectedColumns != null &&
|
||||
protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
|
||||
index = random.nextInt(columnDescriptors.length);
|
||||
index = rand.nextInt(columnDescriptors.length);
|
||||
}
|
||||
byte[] colDescName = columnDescriptors[index].getName();
|
||||
getLogger().debug("Performing action: Removing " + Bytes.toString(colDescName)+ " from "
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.List;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -46,8 +46,9 @@ public class RestartRsHoldingTableAction extends RestartActionBaseAction {
|
|||
public void perform() throws Exception {
|
||||
getLogger().info(
|
||||
"Performing action: Restart random RS holding table " + this.locator.getName());
|
||||
|
||||
List<HRegionLocation> locations = locator.getAllRegionLocations();
|
||||
restartRs(locations.get(RandomUtils.nextInt(0, locations.size())).getServerName(), sleepTime);
|
||||
restartRs(locations.get(ThreadLocalRandom.current().nextInt(locations.size()))
|
||||
.getServerName(),
|
||||
sleepTime);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,8 @@ import java.util.LinkedList;
|
|||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Queue;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -70,10 +71,9 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
getLogger().info("Performing action: Rolling batch restarting {}% of region servers",
|
||||
(int)(ratio * 100));
|
||||
List<ServerName> selectedServers = selectServers();
|
||||
|
||||
Queue<ServerName> serversToBeKilled = new LinkedList<>(selectedServers);
|
||||
LinkedList<ServerName> deadServers = new LinkedList<>();
|
||||
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
// loop while there are servers to be killed or dead servers to be restarted
|
||||
while ((!serversToBeKilled.isEmpty() || !deadServers.isEmpty()) && !context.isStopping()) {
|
||||
|
||||
|
@ -87,7 +87,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
action = KillOrStart.START;
|
||||
} else {
|
||||
// do a coin toss
|
||||
action = RandomUtils.nextBoolean() ? KillOrStart.KILL : KillOrStart.START;
|
||||
action = rand.nextBoolean() ? KillOrStart.KILL : KillOrStart.START;
|
||||
}
|
||||
|
||||
ServerName server;
|
||||
|
@ -120,7 +120,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
|
|||
break;
|
||||
}
|
||||
|
||||
sleep(RandomUtils.nextInt(0, (int)sleepTime));
|
||||
sleep(rand.nextInt((int)sleepTime));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,8 @@ import java.io.IOException;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
|
@ -66,10 +67,9 @@ public class RollingBatchSuspendResumeRsAction extends Action {
|
|||
getLogger().info("Performing action: Rolling batch restarting {}% of region servers",
|
||||
(int) (ratio * 100));
|
||||
List<ServerName> selectedServers = selectServers();
|
||||
|
||||
Queue<ServerName> serversToBeSuspended = new LinkedList<>(selectedServers);
|
||||
Queue<ServerName> suspendedServers = new LinkedList<>();
|
||||
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
// loop while there are servers to be suspended or suspended servers to be resumed
|
||||
while ((!serversToBeSuspended.isEmpty() || !suspendedServers.isEmpty()) && !context
|
||||
.isStopping()) {
|
||||
|
@ -84,7 +84,7 @@ public class RollingBatchSuspendResumeRsAction extends Action {
|
|||
action = SuspendOrResume.RESUME;
|
||||
} else {
|
||||
// do a coin toss
|
||||
action = RandomUtils.nextBoolean() ? SuspendOrResume.SUSPEND : SuspendOrResume.RESUME;
|
||||
action = rand.nextBoolean() ? SuspendOrResume.SUSPEND : SuspendOrResume.RESUME;
|
||||
}
|
||||
|
||||
ServerName server;
|
||||
|
|
|
@ -26,8 +26,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class SplitAllRegionOfTableAction extends Action {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(SplitAllRegionOfTableAction.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SplitAllRegionOfTableAction.class);
|
||||
private static final int DEFAULT_MAX_SPLITS = 3;
|
||||
private static final String MAX_SPLIT_KEY = "hbase.chaosmonkey.action.maxFullTableSplits";
|
||||
|
||||
|
@ -39,7 +38,6 @@ public class SplitAllRegionOfTableAction extends Action {
|
|||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
|
||||
public void init(ActionContext context) throws IOException {
|
||||
super.init(context);
|
||||
this.maxFullTableSplits = getConf().getInt(MAX_SPLIT_KEY, DEFAULT_MAX_SPLITS);
|
||||
|
@ -57,8 +55,6 @@ public class SplitAllRegionOfTableAction extends Action {
|
|||
if (context.isStopping()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// Don't always split. This should allow splitting of a full table later in the run
|
||||
if (ThreadLocalRandom.current().nextDouble()
|
||||
< (((double) splits) / ((double) maxFullTableSplits)) / ((double) 2)) {
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.actions;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
|
@ -31,11 +31,9 @@ import org.slf4j.LoggerFactory;
|
|||
public class TruncateTableAction extends Action {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TruncateTableAction.class);
|
||||
private final TableName tableName;
|
||||
private final Random random;
|
||||
|
||||
public TruncateTableAction(String tableName) {
|
||||
this.tableName = TableName.valueOf(tableName);
|
||||
this.random = new Random();
|
||||
}
|
||||
|
||||
@Override protected Logger getLogger() {
|
||||
|
@ -52,7 +50,7 @@ public class TruncateTableAction extends Action {
|
|||
return;
|
||||
}
|
||||
|
||||
boolean preserveSplits = random.nextBoolean();
|
||||
boolean preserveSplits = ThreadLocalRandom.current().nextBoolean();
|
||||
getLogger().info("Performing action: Truncate table {} preserve splits {}",
|
||||
tableName.getNameAsString(), preserveSplits);
|
||||
admin.truncateTable(tableName, preserveSplits);
|
||||
|
|
|
@ -22,8 +22,9 @@ import java.util.ArrayList;
|
|||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.junit.Assert;
|
||||
|
@ -64,15 +65,15 @@ public class UnbalanceKillAndRebalanceAction extends Action {
|
|||
ClusterMetrics status = this.cluster.getClusterMetrics();
|
||||
List<ServerName> victimServers = new LinkedList<>(status.getLiveServerMetrics().keySet());
|
||||
Set<ServerName> killedServers = new HashSet<>();
|
||||
|
||||
int liveCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_LIVE * victimServers.size());
|
||||
int deadCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_DIE * victimServers.size());
|
||||
Assert.assertTrue(
|
||||
"There are not enough victim servers: " + victimServers.size(),
|
||||
liveCount + deadCount < victimServers.size());
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
List<ServerName> targetServers = new ArrayList<>(liveCount);
|
||||
for (int i = 0; i < liveCount + deadCount; ++i) {
|
||||
int victimIx = RandomUtils.nextInt(0, victimServers.size());
|
||||
int victimIx = rand.nextInt(victimServers.size());
|
||||
targetServers.add(victimServers.remove(victimIx));
|
||||
}
|
||||
unbalanceRegions(status, victimServers, targetServers, HOARD_FRC_OF_REGIONS);
|
||||
|
|
|
@ -21,7 +21,8 @@ package org.apache.hadoop.hbase.chaos.actions;
|
|||
import java.util.ArrayList;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -57,8 +58,9 @@ public class UnbalanceRegionsAction extends Action {
|
|||
List<ServerName> victimServers = new LinkedList<>(status.getLiveServerMetrics().keySet());
|
||||
int targetServerCount = (int)Math.ceil(fractionOfServers * victimServers.size());
|
||||
List<ServerName> targetServers = new ArrayList<>(targetServerCount);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < targetServerCount; ++i) {
|
||||
int victimIx = RandomUtils.nextInt(0, victimServers.size());
|
||||
int victimIx = rand.nextInt(victimServers.size());
|
||||
targetServers.add(victimServers.remove(victimIx));
|
||||
}
|
||||
unbalanceRegions(status, victimServers, targetServers, fractionOfRegions);
|
||||
|
|
|
@ -26,13 +26,11 @@ import java.util.Objects;
|
|||
import java.util.Properties;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.hbase.IntegrationTestingUtility;
|
||||
import org.apache.hadoop.hbase.chaos.policies.Policy;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
/**
|
||||
|
@ -40,7 +38,6 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto
|
|||
*/
|
||||
public class PolicyBasedChaosMonkey extends ChaosMonkey {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(PolicyBasedChaosMonkey.class);
|
||||
private static final long ONE_SEC = 1000;
|
||||
private static final long ONE_MIN = 60 * ONE_SEC;
|
||||
|
||||
|
@ -93,7 +90,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
|
|||
|
||||
/** Selects a random item from the given items */
|
||||
public static <T> T selectRandomItem(T[] items) {
|
||||
return items[RandomUtils.nextInt(0, items.length)];
|
||||
return items[ThreadLocalRandom.current().nextInt(items.length)];
|
||||
}
|
||||
|
||||
/** Selects a random item from the given items with weights*/
|
||||
|
@ -103,7 +100,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
|
|||
totalWeight += pair.getSecond();
|
||||
}
|
||||
|
||||
int cutoff = RandomUtils.nextInt(0, totalWeight);
|
||||
int cutoff = ThreadLocalRandom.current().nextInt(totalWeight);
|
||||
int cummulative = 0;
|
||||
T item = null;
|
||||
|
||||
|
@ -127,7 +124,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
|
|||
List<T> originalItems = Arrays.asList(items);
|
||||
Collections.shuffle(originalItems);
|
||||
|
||||
int startIndex = RandomUtils.nextInt(0, items.length - selectedNumber);
|
||||
int startIndex = ThreadLocalRandom.current().nextInt(items.length - selectedNumber);
|
||||
return originalItems.subList(startIndex, startIndex + selectedNumber);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.hbase.chaos.policies;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
|
||||
|
@ -33,7 +33,7 @@ public abstract class PeriodicPolicy extends Policy {
|
|||
@Override
|
||||
public void run() {
|
||||
// Add some jitter.
|
||||
int jitter = RandomUtils.nextInt(0, (int) periodMs);
|
||||
int jitter = ThreadLocalRandom.current().nextInt((int)periodMs);
|
||||
LOG.info("Sleeping for {} ms to add jitter", jitter);
|
||||
Threads.sleep(jitter);
|
||||
|
||||
|
|
|
@ -30,13 +30,12 @@ import java.net.InetSocketAddress;
|
|||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.codec.Codec;
|
||||
|
@ -88,7 +87,6 @@ public class IntegrationTestRpcClient {
|
|||
}
|
||||
|
||||
class Cluster {
|
||||
Random random = new Random();
|
||||
ReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
HashMap<InetSocketAddress, RpcServer> rpcServers = new HashMap<>();
|
||||
List<RpcServer> serverList = new ArrayList<>();
|
||||
|
@ -134,7 +132,7 @@ public class IntegrationTestRpcClient {
|
|||
return;
|
||||
}
|
||||
int size = rpcServers.size();
|
||||
int rand = random.nextInt(size);
|
||||
int rand = ThreadLocalRandom.current().nextInt(size);
|
||||
rpcServer = serverList.remove(rand);
|
||||
InetSocketAddress address = rpcServer.getListenerAddress();
|
||||
if (address == null) {
|
||||
|
@ -176,7 +174,7 @@ public class IntegrationTestRpcClient {
|
|||
lock.readLock().lock();
|
||||
try {
|
||||
int size = rpcServers.size();
|
||||
int rand = random.nextInt(size);
|
||||
int rand = ThreadLocalRandom.current().nextInt(size);
|
||||
return serverList.get(rand);
|
||||
} finally {
|
||||
lock.readLock().unlock();
|
||||
|
@ -186,7 +184,6 @@ public class IntegrationTestRpcClient {
|
|||
|
||||
static class MiniChaosMonkey extends Thread {
|
||||
AtomicBoolean running = new AtomicBoolean(true);
|
||||
Random random = new Random();
|
||||
AtomicReference<Exception> exception = new AtomicReference<>(null);
|
||||
Cluster cluster;
|
||||
|
||||
|
@ -197,7 +194,7 @@ public class IntegrationTestRpcClient {
|
|||
@Override
|
||||
public void run() {
|
||||
while (running.get()) {
|
||||
if (random.nextBoolean()) {
|
||||
if (ThreadLocalRandom.current().nextBoolean()) {
|
||||
//start a server
|
||||
try {
|
||||
cluster.startServer();
|
||||
|
@ -238,7 +235,6 @@ public class IntegrationTestRpcClient {
|
|||
Cluster cluster;
|
||||
String id;
|
||||
long numCalls = 0;
|
||||
Random random = new Random();
|
||||
|
||||
public SimpleClient(Cluster cluster, AbstractRpcClient<?> rpcClient, String id) {
|
||||
this.cluster = cluster;
|
||||
|
@ -250,7 +246,7 @@ public class IntegrationTestRpcClient {
|
|||
@Override
|
||||
public void run() {
|
||||
while (running.get()) {
|
||||
boolean isBigPayload = random.nextBoolean();
|
||||
boolean isBigPayload = ThreadLocalRandom.current().nextBoolean();
|
||||
String message = isBigPayload ? BIG_PAYLOAD : id + numCalls;
|
||||
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build();
|
||||
EchoResponseProto ret;
|
||||
|
|
|
@ -27,10 +27,9 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
|
@ -155,7 +154,6 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
|
||||
public static class SlowMeCoproScanOperations implements RegionCoprocessor, RegionObserver {
|
||||
static final AtomicLong sleepTime = new AtomicLong(2000);
|
||||
Random r = new Random();
|
||||
AtomicLong countOfNext = new AtomicLong(0);
|
||||
AtomicLong countOfOpen = new AtomicLong(0);
|
||||
public SlowMeCoproScanOperations() {}
|
||||
|
@ -379,7 +377,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
taskId = taskId + iteration * numMapTasks;
|
||||
numMapTasks = numMapTasks * numIterations;
|
||||
|
||||
long chainId = Math.abs(new Random().nextLong());
|
||||
long chainId = Math.abs(ThreadLocalRandom.current().nextLong());
|
||||
chainId = chainId - (chainId % numMapTasks) + taskId; // ensure that chainId is unique per task and across iterations
|
||||
LongWritable[] keys = new LongWritable[] {new LongWritable(chainId)};
|
||||
|
||||
|
@ -397,8 +395,6 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
public static class LinkedListCreationMapper
|
||||
extends Mapper<LongWritable, LongWritable, ImmutableBytesWritable, KeyValue> {
|
||||
|
||||
private Random rand = new Random();
|
||||
|
||||
@Override
|
||||
protected void map(LongWritable key, LongWritable value, Context context)
|
||||
throws IOException, InterruptedException {
|
||||
|
@ -410,6 +406,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
|
||||
long chainLength = context.getConfiguration().getLong(CHAIN_LENGTH_KEY, CHAIN_LENGTH);
|
||||
long nextRow = getNextRow(0, chainLength);
|
||||
byte[] valueBytes = new byte[50];
|
||||
|
||||
for (long i = 0; i < chainLength; i++) {
|
||||
byte[] rk = Bytes.toBytes(currentRow);
|
||||
|
@ -419,9 +416,8 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
// What link in the chain this is.
|
||||
KeyValue sortKv = new KeyValue(rk, SORT_FAM, chainIdArray, Bytes.toBytes(i));
|
||||
// Added data so that large stores are created.
|
||||
KeyValue dataKv = new KeyValue(rk, DATA_FAM, chainIdArray,
|
||||
Bytes.toBytes(RandomStringUtils.randomAlphabetic(50))
|
||||
);
|
||||
Bytes.random(valueBytes);
|
||||
KeyValue dataKv = new KeyValue(rk, DATA_FAM, chainIdArray, valueBytes);
|
||||
|
||||
// Emit the key values.
|
||||
context.write(new ImmutableBytesWritable(rk), linkKv);
|
||||
|
@ -435,7 +431,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
|||
|
||||
/** Returns a unique row id within this chain for this index */
|
||||
private long getNextRow(long index, long chainLength) {
|
||||
long nextRow = Math.abs(rand.nextLong());
|
||||
long nextRow = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE);
|
||||
// use significant bits from the random number, but pad with index to ensure it is unique
|
||||
// this also ensures that we do not reuse row = 0
|
||||
// row collisions from multiple mappers are fine, since we guarantee unique chainIds
|
||||
|
|
|
@ -22,18 +22,17 @@ import java.io.DataOutput;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -344,7 +343,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
static class GeneratorRecordReader extends RecordReader<BytesWritable,NullWritable> {
|
||||
private long count;
|
||||
private long numNodes;
|
||||
private Random64 rand;
|
||||
// Use Random64 to avoid issue described in HBASE-21256.
|
||||
private Random64 rand = new Random64();
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
@ -371,15 +371,12 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
public void initialize(InputSplit arg0, TaskAttemptContext context)
|
||||
throws IOException, InterruptedException {
|
||||
numNodes = context.getConfiguration().getLong(GENERATOR_NUM_ROWS_PER_MAP_KEY, 25000000);
|
||||
// Use Random64 to avoid issue described in HBASE-21256.
|
||||
rand = new Random64();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean nextKeyValue() throws IOException, InterruptedException {
|
||||
return count++ < numNodes;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -457,6 +454,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
byte[] tinyValue = new byte[] { 't' };
|
||||
byte[] bigValue = null;
|
||||
Configuration conf;
|
||||
// Use Random64 to avoid issue described in HBASE-21256.
|
||||
private Random64 rand = new Random64();
|
||||
|
||||
volatile boolean walkersStop;
|
||||
int numWalkers;
|
||||
|
@ -494,7 +493,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
BIG_FAMILY_VALUE_SIZE_KEY, n, ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, limit);
|
||||
|
||||
bigValue = new byte[n];
|
||||
ThreadLocalRandom.current().nextBytes(bigValue);
|
||||
rand.nextBytes(bigValue);
|
||||
LOG.info("Create a bigValue with " + n + " bytes.");
|
||||
}
|
||||
|
||||
|
@ -642,12 +641,10 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
ConcurrentWalker walker;
|
||||
Configuration conf;
|
||||
Context context;
|
||||
Random rand;
|
||||
|
||||
public ContinuousConcurrentWalker(Configuration conf, Context context) {
|
||||
this.conf = conf;
|
||||
this.context = context;
|
||||
rand = new Random();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -681,7 +678,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
if (walkersStop) {
|
||||
throw new InterruptedException();
|
||||
}
|
||||
return flushedLoops.get(rand.nextInt(flushedLoops.size()));
|
||||
return flushedLoops.get(ThreadLocalRandom.current().nextInt(flushedLoops.size()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1761,7 +1758,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
if (cmd.hasOption('n')) {
|
||||
maxQueries = Long.parseLong(cmd.getOptionValue("n"));
|
||||
}
|
||||
Random rand = new SecureRandom();
|
||||
boolean isSpecificStart = cmd.hasOption('s');
|
||||
|
||||
byte[] startKey = isSpecificStart ? Bytes.toBytesBinary(cmd.getOptionValue('s')) : null;
|
||||
|
@ -1776,7 +1772,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
while (numQueries < maxQueries && (numQueries == 0 || !isSpecificStart)) {
|
||||
if (!isSpecificStart) {
|
||||
startKey = new byte[ROWKEY_LENGTH];
|
||||
rand.nextBytes(startKey);
|
||||
Bytes.random(startKey);
|
||||
}
|
||||
CINode node = findStartNode(table, startKey);
|
||||
if (node == null && isSpecificStart) {
|
||||
|
|
|
@ -23,9 +23,9 @@ import static org.junit.Assert.assertTrue;
|
|||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -189,10 +189,7 @@ public void cleanUpCluster() throws Exception {
|
|||
protected BufferedMutator mutator;
|
||||
protected Configuration conf;
|
||||
protected int numBackReferencesPerRow;
|
||||
|
||||
protected String shortTaskId;
|
||||
|
||||
protected Random rand = new Random();
|
||||
protected Counter rowsWritten, refsWritten;
|
||||
|
||||
@Override
|
||||
|
@ -229,8 +226,8 @@ public void cleanUpCluster() throws Exception {
|
|||
|
||||
String suffix = "/" + shortTaskId;
|
||||
byte[] row = Bytes.add(new byte[8], Bytes.toBytes(suffix));
|
||||
|
||||
int BLOCK_SIZE = (int)(recordsToWrite / 100);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
|
||||
for (long i = 0; i < recordsToWrite;) {
|
||||
long blockStart = i;
|
||||
|
|
|
@ -23,9 +23,9 @@ import java.util.List;
|
|||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
@ -331,7 +331,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
|
|||
@Override
|
||||
protected long getNextKeyToRead() {
|
||||
// always read a random key, assuming that the writer has finished writing all keys
|
||||
long key = startKey + Math.abs(RandomUtils.nextLong())
|
||||
long key = startKey + ThreadLocalRandom.current().nextLong(Long.MAX_VALUE)
|
||||
% (endKey - startKey);
|
||||
return key;
|
||||
}
|
||||
|
|
|
@ -23,7 +23,8 @@ import static org.junit.Assert.assertTrue;
|
|||
import java.io.IOException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.List;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -57,7 +58,6 @@ import org.apache.hadoop.mapreduce.Job;
|
|||
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||
|
||||
/**
|
||||
|
@ -166,9 +166,10 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
|
|||
InterruptedException {
|
||||
String suffix = "/" + shortTaskId;
|
||||
int BLOCK_SIZE = (int) (recordsToWrite / 100);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (long i = 0; i < recordsToWrite;) {
|
||||
for (long idx = 0; idx < BLOCK_SIZE && i < recordsToWrite; idx++, i++) {
|
||||
int expIdx = rand.nextInt(BLOCK_SIZE) % VISIBILITY_EXPS_COUNT;
|
||||
int expIdx = rand.nextInt(VISIBILITY_EXPS_COUNT);
|
||||
String exp = VISIBILITY_EXPS[expIdx];
|
||||
byte[] row = Bytes.add(Bytes.toBytes(i), Bytes.toBytes(suffix), Bytes.toBytes(exp));
|
||||
Put p = new Put(row);
|
||||
|
@ -379,10 +380,9 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
|
|||
return 0;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
protected void processOptions(CommandLine cmd) {
|
||||
List args = cmd.getArgList();
|
||||
List<String> args = cmd.getArgList();
|
||||
if (args.size() > 0) {
|
||||
printUsage();
|
||||
throw new RuntimeException("No args expected.");
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Random;
|
|||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -48,7 +49,6 @@ import org.junit.Test;
|
|||
import org.junit.experimental.categories.Category;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||
|
||||
@Category(IntegrationTests.class)
|
||||
|
@ -63,7 +63,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
|||
private TableName tableName = TableName.valueOf(TABLE_NAME_DEFAULT);
|
||||
private byte[] familyName = Bytes.toBytes(COLUMN_FAMILY_DEFAULT);
|
||||
private IntegrationTestingUtility util;
|
||||
private Random random = new Random();
|
||||
private Admin admin;
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
@ -227,17 +226,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
|||
private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException {
|
||||
LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
|
||||
BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
byte[] value = new byte[300];
|
||||
for (int x = 0; x < 5000; x++) {
|
||||
Span span = TraceUtil.getGlobalTracer().spanBuilder("insertData").startSpan();
|
||||
try (Scope scope = span.makeCurrent()) {
|
||||
for (int i = 0; i < 5; i++) {
|
||||
long rk = random.nextLong();
|
||||
long rk = rand.nextLong();
|
||||
rowKeys.add(rk);
|
||||
Put p = new Put(Bytes.toBytes(rk));
|
||||
for (int y = 0; y < 10; y++) {
|
||||
random.nextBytes(value);
|
||||
p.addColumn(familyName, Bytes.toBytes(random.nextLong()), value);
|
||||
Bytes.random(value);
|
||||
p.addColumn(familyName, Bytes.toBytes(rand.nextLong()), value);
|
||||
}
|
||||
ht.mutate(p);
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import java.util.concurrent.ExecutionException;
|
|||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
|
@ -1516,7 +1517,6 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
static class AsyncRandomReadTest extends AsyncTableTest {
|
||||
private final Consistency consistency;
|
||||
private ArrayList<Get> gets;
|
||||
private Random rd = new Random();
|
||||
|
||||
AsyncRandomReadTest(AsyncConnection con, TestOptions options, Status status) {
|
||||
super(con, options, status);
|
||||
|
@ -1530,7 +1530,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
@Override
|
||||
boolean testRow(final int i, final long startTime) throws IOException, InterruptedException {
|
||||
if (opts.randomSleep > 0) {
|
||||
Thread.sleep(rd.nextInt(opts.randomSleep));
|
||||
Thread.sleep(ThreadLocalRandom.current().nextInt(opts.randomSleep));
|
||||
}
|
||||
Get get = new Get(getRandomRow(this.rand, opts.totalRows));
|
||||
for (int family = 0; family < opts.families; family++) {
|
||||
|
@ -1938,8 +1938,6 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
static class RandomReadTest extends TableTest {
|
||||
private final Consistency consistency;
|
||||
private ArrayList<Get> gets;
|
||||
private Random rd = new Random();
|
||||
private long numOfReplyFromReplica = 0;
|
||||
|
||||
RandomReadTest(Connection con, TestOptions options, Status status) {
|
||||
super(con, options, status);
|
||||
|
@ -1953,7 +1951,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
@Override
|
||||
boolean testRow(final int i, final long startTime) throws IOException, InterruptedException {
|
||||
if (opts.randomSleep > 0) {
|
||||
Thread.sleep(rd.nextInt(opts.randomSleep));
|
||||
Thread.sleep(ThreadLocalRandom.current().nextInt(opts.randomSleep));
|
||||
}
|
||||
Get get = new Get(getRandomRow(this.rand, opts.totalRows));
|
||||
for (int family = 0; family < opts.families; family++) {
|
||||
|
|
|
@ -37,6 +37,8 @@ import java.util.LinkedList;
|
|||
import java.util.NoSuchElementException;
|
||||
import java.util.Queue;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -153,11 +155,11 @@ public class TestPerformanceEvaluation {
|
|||
opts.setNumClientThreads(2);
|
||||
opts = PerformanceEvaluation.calculateRowsAndSize(opts);
|
||||
assertEquals(1000, opts.getPerClientRunRows());
|
||||
Random random = new Random();
|
||||
// assuming we will get one before this loop expires
|
||||
boolean foundValue = false;
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < 10000000; i++) {
|
||||
int randomRow = PerformanceEvaluation.generateRandomRow(random, opts.totalRows);
|
||||
int randomRow = PerformanceEvaluation.generateRandomRow(rand, opts.totalRows);
|
||||
if (randomRow > 1000) {
|
||||
foundValue = true;
|
||||
break;
|
||||
|
|
|
@ -41,6 +41,7 @@ import java.util.concurrent.Callable;
|
|||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -96,7 +97,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
|
|||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem;
|
||||
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
|
||||
import org.apache.hadoop.hbase.security.SecurityConstants;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
|
||||
|
@ -198,14 +198,13 @@ public class TestHFileOutputFormat2 {
|
|||
|
||||
int taskId = context.getTaskAttemptID().getTaskID().getId();
|
||||
assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
|
||||
Random random = new Random();
|
||||
byte[] key;
|
||||
for (int j = 0; j < tables.length; ++j) {
|
||||
for (int i = 0; i < ROWSPERSPLIT; i++) {
|
||||
random.nextBytes(keyBytes);
|
||||
Bytes.random(keyBytes);
|
||||
// Ensure that unique tasks generate unique keys
|
||||
keyBytes[keyLength - 1] = (byte) (taskId & 0xFF);
|
||||
random.nextBytes(valBytes);
|
||||
Bytes.random(valBytes);
|
||||
key = keyBytes;
|
||||
if (multiTableMapper) {
|
||||
key = MultiTableHFileOutputFormat.createCompositeKey(tables[j].getName(), keyBytes);
|
||||
|
@ -268,14 +267,13 @@ public class TestHFileOutputFormat2 {
|
|||
int taskId = context.getTaskAttemptID().getTaskID().getId();
|
||||
assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
|
||||
|
||||
Random random = new Random();
|
||||
byte[] key;
|
||||
for (int j = 0; j < tables.length; ++j) {
|
||||
for (int i = 0; i < ROWSPERSPLIT; i++) {
|
||||
random.nextBytes(keyBytes);
|
||||
Bytes.random(keyBytes);
|
||||
// Ensure that unique tasks generate unique keys
|
||||
keyBytes[keyLength - 1] = (byte) (taskId & 0xFF);
|
||||
random.nextBytes(valBytes);
|
||||
Bytes.random(valBytes);
|
||||
key = keyBytes;
|
||||
if (multiTableMapper) {
|
||||
key = MultiTableHFileOutputFormat.createCompositeKey(tables[j].getName(), keyBytes);
|
||||
|
@ -556,7 +554,7 @@ public class TestHFileOutputFormat2 {
|
|||
}
|
||||
|
||||
private byte [][] generateRandomStartKeys(int numKeys) {
|
||||
Random random = new Random();
|
||||
Random random = ThreadLocalRandom.current();
|
||||
byte[][] ret = new byte[numKeys][];
|
||||
// first region start key is always empty
|
||||
ret[0] = HConstants.EMPTY_BYTE_ARRAY;
|
||||
|
@ -568,7 +566,7 @@ public class TestHFileOutputFormat2 {
|
|||
}
|
||||
|
||||
private byte[][] generateRandomSplitKeys(int numKeys) {
|
||||
Random random = new Random();
|
||||
Random random = ThreadLocalRandom.current();
|
||||
byte[][] ret = new byte[numKeys][];
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
ret[i] =
|
||||
|
@ -1222,13 +1220,10 @@ public class TestHFileOutputFormat2 {
|
|||
int taskId = context.getTaskAttemptID().getTaskID().getId();
|
||||
assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
|
||||
final byte [] qualifier = Bytes.toBytes("data");
|
||||
Random random = new Random();
|
||||
for (int i = 0; i < numRows; i++) {
|
||||
|
||||
Bytes.putInt(keyBytes, 0, i);
|
||||
random.nextBytes(valBytes);
|
||||
Bytes.random(valBytes);
|
||||
ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
|
||||
|
||||
for (byte[] family : families) {
|
||||
Cell kv = new KeyValue(keyBytes, family, qualifier, valBytes);
|
||||
writer.write(key, kv);
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util;
|
|||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
@ -294,7 +293,7 @@ public class LoadTestTool extends AbstractHBaseTool {
|
|||
}
|
||||
if (cipher != null) {
|
||||
byte[] keyBytes = new byte[cipher.getKeyLength()];
|
||||
new SecureRandom().nextBytes(keyBytes);
|
||||
Bytes.secureRandom(keyBytes);
|
||||
columnDescBuilder.setEncryptionType(cipher.getName());
|
||||
columnDescBuilder.setEncryptionKey(
|
||||
EncryptionUtil.wrapKey(conf,
|
||||
|
|
|
@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals;
|
|||
|
||||
import java.util.Arrays;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
|
@ -71,7 +73,7 @@ public class TestFastLongHistogram {
|
|||
// assumes the uniform distribution
|
||||
FastLongHistogram hist = new FastLongHistogram(100, 0, 100);
|
||||
|
||||
Random rand = new Random();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
|
||||
for (int n = 0; n < 10; n++) {
|
||||
for (int i = 0; i < 900; i++) {
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.hbase.procedure2.store;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
@ -31,6 +30,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
|
||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||
import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
|
||||
|
@ -106,7 +106,7 @@ public abstract class ProcedureStorePerformanceEvaluation<T extends ProcedureSto
|
|||
syncType) : "sync argument can only accept one of these three values: hsync, hflush, nosync";
|
||||
stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE);
|
||||
SERIALIZED_STATE = new byte[stateSize];
|
||||
new Random(12345).nextBytes(SERIALIZED_STATE);
|
||||
Bytes.random(SERIALIZED_STATE);
|
||||
}
|
||||
|
||||
private void setUpProcedureStore() throws IOException {
|
||||
|
|
|
@ -22,8 +22,8 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -136,14 +136,13 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
|
|||
* value denotes delete state.
|
||||
*/
|
||||
private List<Integer> shuffleProcWriteSequence() {
|
||||
Random rand = new Random();
|
||||
List<Integer> procStatesSequence = new ArrayList<>();
|
||||
Set<Integer> toBeDeletedProcs = new HashSet<>();
|
||||
// Add n + 1 entries of the proc id for insert + updates. If proc is chosen for delete, add
|
||||
// extra entry which is marked -ve in the loop after shuffle.
|
||||
for (int procId = 1; procId <= numProcs; ++procId) {
|
||||
procStatesSequence.addAll(Collections.nCopies(updatesPerProc + 1, procId));
|
||||
if (rand.nextFloat() < deleteProcsFraction) {
|
||||
if (ThreadLocalRandom.current().nextFloat() < deleteProcsFraction) {
|
||||
procStatesSequence.add(procId);
|
||||
toBeDeletedProcs.add(procId);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
|
@ -153,13 +154,12 @@ public class TestProcedureStoreTracker {
|
|||
|
||||
final ProcedureStoreTracker tracker = new ProcedureStoreTracker();
|
||||
|
||||
Random rand = new Random(1);
|
||||
for (int i = 0; i < NRUNS; ++i) {
|
||||
assertTrue(tracker.isEmpty());
|
||||
|
||||
int count = 0;
|
||||
while (count < NPROCEDURES) {
|
||||
long procId = rand.nextLong();
|
||||
long procId = ThreadLocalRandom.current().nextLong();
|
||||
if (procId < 1) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -97,12 +98,12 @@ public class TestStressWALProcedureStore {
|
|||
public void testInsertUpdateDelete() throws Exception {
|
||||
final long LAST_PROC_ID = 19999;
|
||||
final Thread[] thread = new Thread[PROCEDURE_STORE_SLOTS];
|
||||
final AtomicLong procCounter = new AtomicLong((long)Math.round(Math.random() * 100));
|
||||
final Random rand = ThreadLocalRandom.current();
|
||||
final AtomicLong procCounter = new AtomicLong(rand.nextInt(100));
|
||||
for (int i = 0; i < thread.length; ++i) {
|
||||
thread[i] = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
Random rand = new Random();
|
||||
TestProcedure proc;
|
||||
do {
|
||||
// After HBASE-15579 there may be gap in the procId sequence, trying to simulate that.
|
||||
|
|
|
@ -59,7 +59,7 @@ public class TestZKReplicationPeerStorage {
|
|||
HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class);
|
||||
|
||||
private static final HBaseZKTestingUtility UTIL = new HBaseZKTestingUtility();
|
||||
|
||||
private static final Random RNG = new Random(); // Seed may be set with Random#setSeed
|
||||
private static ZKReplicationPeerStorage STORAGE;
|
||||
|
||||
@BeforeClass
|
||||
|
@ -96,12 +96,12 @@ public class TestZKReplicationPeerStorage {
|
|||
}
|
||||
|
||||
private ReplicationPeerConfig getConfig(int seed) {
|
||||
Random rand = new Random(seed);
|
||||
return ReplicationPeerConfig.newBuilder().setClusterKey(Long.toHexString(rand.nextLong()))
|
||||
.setReplicationEndpointImpl(Long.toHexString(rand.nextLong()))
|
||||
.setNamespaces(randNamespaces(rand)).setExcludeNamespaces(randNamespaces(rand))
|
||||
.setTableCFsMap(randTableCFs(rand)).setReplicateAllUserTables(rand.nextBoolean())
|
||||
.setBandwidth(rand.nextInt(1000)).build();
|
||||
RNG.setSeed(seed);
|
||||
return ReplicationPeerConfig.newBuilder().setClusterKey(Long.toHexString(RNG.nextLong()))
|
||||
.setReplicationEndpointImpl(Long.toHexString(RNG.nextLong()))
|
||||
.setNamespaces(randNamespaces(RNG)).setExcludeNamespaces(randNamespaces(RNG))
|
||||
.setTableCFsMap(randTableCFs(RNG)).setReplicateAllUserTables(RNG.nextBoolean())
|
||||
.setBandwidth(RNG.nextInt(1000)).build();
|
||||
}
|
||||
|
||||
private void assertSetEquals(Set<String> expected, Set<String> actual) {
|
||||
|
|
|
@ -29,6 +29,8 @@ import java.util.ArrayList;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import javax.xml.bind.JAXBContext;
|
||||
import javax.xml.bind.JAXBException;
|
||||
import javax.xml.bind.Marshaller;
|
||||
|
@ -94,7 +96,7 @@ public class TestScannerResource {
|
|||
|
||||
static int insertData(Configuration conf, TableName tableName, String column, double prob)
|
||||
throws IOException {
|
||||
Random rng = new Random();
|
||||
Random rng = ThreadLocalRandom.current();
|
||||
byte[] k = new byte[3];
|
||||
byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column));
|
||||
List<Put> puts = new ArrayList<>();
|
||||
|
|
|
@ -23,14 +23,15 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -52,7 +53,6 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.mockito.Mockito;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||
|
||||
|
@ -61,7 +61,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
|||
*/
|
||||
public class RSGroupableBalancerTestBase extends BalancerTestBase{
|
||||
|
||||
static SecureRandom rand = new SecureRandom();
|
||||
static String[] groups = new String[] {RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4"};
|
||||
static TableName table0 = TableName.valueOf("dt0");
|
||||
static TableName[] tables =
|
||||
|
@ -305,10 +304,10 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{
|
|||
protected List<RegionInfo> randomRegions(int numRegions) {
|
||||
List<RegionInfo> regions = new ArrayList<>(numRegions);
|
||||
byte[] start = new byte[16];
|
||||
Bytes.random(start);
|
||||
byte[] end = new byte[16];
|
||||
rand.nextBytes(start);
|
||||
rand.nextBytes(end);
|
||||
int regionIdx = rand.nextInt(tables.length);
|
||||
Bytes.random(end);
|
||||
int regionIdx = ThreadLocalRandom.current().nextInt(tables.length);
|
||||
for (int i = 0; i < numRegions; i++) {
|
||||
Bytes.putInt(start, 0, numRegions << 1);
|
||||
Bytes.putInt(end, 0, (numRegions << 1) + 1);
|
||||
|
@ -351,6 +350,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{
|
|||
|
||||
protected static List<ServerName> generateServers(int numServers) {
|
||||
List<ServerName> servers = new ArrayList<>(numServers);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < numServers; i++) {
|
||||
String host = "server" + rand.nextInt(100000);
|
||||
int port = rand.nextInt(60000);
|
||||
|
@ -378,6 +378,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{
|
|||
groupMap.put(grpName, RSGroupInfo);
|
||||
index++;
|
||||
}
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
while (index < servers.size()) {
|
||||
int grpIndex = rand.nextInt(groups.length);
|
||||
groupMap.get(groups[grpIndex]).addServer(
|
||||
|
@ -394,6 +395,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{
|
|||
*/
|
||||
protected static List<TableDescriptor> constructTableDesc(boolean hasBogusTable) {
|
||||
List<TableDescriptor> tds = Lists.newArrayList();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
int index = rand.nextInt(groups.length);
|
||||
for (int i = 0; i < tables.length; i++) {
|
||||
TableDescriptor htd = TableDescriptorBuilder.newBuilder(tables[i]).build();
|
||||
|
|
|
@ -24,10 +24,9 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER;
|
|||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.LongAdder;
|
||||
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.commons.lang3.mutable.MutableInt;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -267,7 +266,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements
|
|||
|
||||
// after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks
|
||||
try {
|
||||
int sleepTime = RandomUtils.nextInt(0, 500) + 500;
|
||||
int sleepTime = ThreadLocalRandom.current().nextInt(500) + 500;
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("Interrupted while yielding for other region servers", e);
|
||||
|
|
|
@ -19,13 +19,13 @@
|
|||
package org.apache.hadoop.hbase.io.hfile;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -71,8 +71,6 @@ public final class PrefetchExecutor {
|
|||
});
|
||||
}
|
||||
|
||||
private static final Random RNG = new Random();
|
||||
|
||||
// TODO: We want HFile, which is where the blockcache lives, to handle
|
||||
// prefetching of file blocks but the Store level is where path convention
|
||||
// knowledge should be contained
|
||||
|
@ -93,7 +91,8 @@ public final class PrefetchExecutor {
|
|||
long delay;
|
||||
if (prefetchDelayMillis > 0) {
|
||||
delay = (long)((prefetchDelayMillis * (1.0f - (prefetchDelayVariation/2))) +
|
||||
(prefetchDelayMillis * (prefetchDelayVariation/2) * RNG.nextFloat()));
|
||||
(prefetchDelayMillis * (prefetchDelayVariation/2) *
|
||||
ThreadLocalRandom.current().nextFloat()));
|
||||
} else {
|
||||
delay = 0;
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ import java.util.Random;
|
|||
import java.util.Scanner;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -518,7 +520,7 @@ public class RegionPlacementMaintainer {
|
|||
public RandomizedMatrix(int rows, int cols) {
|
||||
this.rows = rows;
|
||||
this.cols = cols;
|
||||
Random random = new Random();
|
||||
Random random = ThreadLocalRandom.current();
|
||||
rowTransform = new int[rows];
|
||||
rowInverse = new int[rows];
|
||||
for (int i = 0; i < rows; i++) {
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -53,7 +53,6 @@ public class AdaptiveMemStoreCompactionStrategy extends MemStoreCompactionStrate
|
|||
private double compactionThreshold;
|
||||
private double initialCompactionProbability;
|
||||
private double compactionProbability;
|
||||
private Random rand = new Random();
|
||||
private double numCellsInVersionedList = 0;
|
||||
private boolean compacted = false;
|
||||
|
||||
|
@ -66,9 +65,10 @@ public class AdaptiveMemStoreCompactionStrategy extends MemStoreCompactionStrate
|
|||
resetStats();
|
||||
}
|
||||
|
||||
@Override public Action getAction(VersionedSegmentsList versionedList) {
|
||||
@Override
|
||||
public Action getAction(VersionedSegmentsList versionedList) {
|
||||
if (versionedList.getEstimatedUniquesFrac() < 1.0 - compactionThreshold) {
|
||||
double r = rand.nextDouble();
|
||||
double r = ThreadLocalRandom.current().nextDouble();
|
||||
if(r < compactionProbability) {
|
||||
numCellsInVersionedList = versionedList.getNumOfCells();
|
||||
compacted = true;
|
||||
|
|
|
@ -52,13 +52,13 @@ import java.util.TreeSet;
|
|||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.servlet.http.HttpServlet;
|
||||
import org.apache.commons.lang3.RandomUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.SystemUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -1966,14 +1966,14 @@ public class HRegionServer extends Thread implements
|
|||
if (r.shouldFlush(whyFlush)) {
|
||||
FlushRequester requester = server.getFlushRequester();
|
||||
if (requester != null) {
|
||||
long randomDelay = RandomUtils.nextLong(0, rangeOfDelayMs) + MIN_DELAY_TIME;
|
||||
long delay = ThreadLocalRandom.current().nextLong(rangeOfDelayMs) + MIN_DELAY_TIME;
|
||||
//Throttle the flushes by putting a delay. If we don't throttle, and there
|
||||
//is a balanced write-load on the regions in a table, we might end up
|
||||
//overwhelming the filesystem with too many flushes at once.
|
||||
if (requester.requestDelayedFlush(r, randomDelay)) {
|
||||
if (requester.requestDelayedFlush(r, delay)) {
|
||||
LOG.info("{} requesting flush of {} because {} after random delay {} ms",
|
||||
getName(), r.getRegionInfo().getRegionNameAsString(), whyFlush.toString(),
|
||||
randomDelay);
|
||||
delay);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2262,7 +2262,8 @@ public class HRegionServer extends Thread implements
|
|||
double brokenStoreFileCleanerDelayJitter = conf.getDouble(
|
||||
BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY_JITTER,
|
||||
BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY_JITTER);
|
||||
double jitterRate = (RandomUtils.nextDouble() - 0.5D) * brokenStoreFileCleanerDelayJitter;
|
||||
double jitterRate = (ThreadLocalRandom.current().nextDouble() - 0.5D) *
|
||||
brokenStoreFileCleanerDelayJitter;
|
||||
long jitterValue = Math.round(brokenStoreFileCleanerDelay * jitterRate);
|
||||
this.brokenStoreFileCleaner =
|
||||
new BrokenStoreFileCleaner((int) (brokenStoreFileCleanerDelay + jitterValue),
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.regionserver.StoreUtils;
|
|||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||
|
||||
|
@ -35,6 +34,8 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
|
|||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SortedCompactionPolicy.class);
|
||||
|
||||
private static final Random RNG = new Random();
|
||||
|
||||
public SortedCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) {
|
||||
super(conf, storeConfigInfo);
|
||||
}
|
||||
|
@ -109,11 +110,6 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
|
|||
public abstract boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Used calculation jitter
|
||||
*/
|
||||
private final Random random = new Random();
|
||||
|
||||
/**
|
||||
* @param filesToCompact
|
||||
* @return When to run next major compaction
|
||||
|
@ -137,14 +133,12 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
|
|||
// deterministic jitter avoids a major compaction storm on restart
|
||||
OptionalInt seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
|
||||
if (seed.isPresent()) {
|
||||
// Synchronized to ensure one user of random instance at a time.
|
||||
double rnd;
|
||||
synchronized (this) {
|
||||
this.random.setSeed(seed.getAsInt());
|
||||
rnd = this.random.nextDouble();
|
||||
}
|
||||
long jitter = Math.round(period * jitterPct);
|
||||
return period + jitter - Math.round(2L * jitter * rnd);
|
||||
// Synchronized to ensure one user of random instance at a time.
|
||||
synchronized (RNG) {
|
||||
RNG.setSeed(seed.getAsInt());
|
||||
return period + jitter - Math.round(2L * jitter * RNG.nextDouble());
|
||||
}
|
||||
} else {
|
||||
return 0L;
|
||||
}
|
||||
|
|
|
@ -23,9 +23,7 @@ import static org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.WAL_TRA
|
|||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.security.Key;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -110,11 +108,8 @@ public abstract class AbstractProtobufLogWriter {
|
|||
throw new RuntimeException("Cipher '" + cipherName + "' is not available");
|
||||
}
|
||||
|
||||
// Generate an encryption key for this WAL
|
||||
SecureRandom rng = new SecureRandom();
|
||||
byte[] keyBytes = new byte[cipher.getKeyLength()];
|
||||
rng.nextBytes(keyBytes);
|
||||
Key key = new SecretKeySpec(keyBytes, cipher.getName());
|
||||
// Generate a random encryption key for this WAL
|
||||
Key key = cipher.getRandomKey();
|
||||
builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(EncryptionUtil.wrapKey(conf,
|
||||
conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY,
|
||||
conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.ByteArrayOutputStream;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -151,7 +150,7 @@ public class SecureWALCellCodec extends WALCellCodec {
|
|||
@Override
|
||||
protected byte[] initialValue() {
|
||||
byte[] iv = new byte[encryptor.getIvLength()];
|
||||
new SecureRandom().nextBytes(iv);
|
||||
Bytes.secureRandom(iv);
|
||||
return iv;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -15,7 +15,6 @@ import java.io.FileNotFoundException;
|
|||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.math.BigInteger;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Deque;
|
||||
import java.util.HashMap;
|
||||
|
@ -26,6 +25,7 @@ import java.util.Map.Entry;
|
|||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -333,7 +333,7 @@ public class HFileReplicator implements Closeable {
|
|||
int RANDOM_RADIX = 32;
|
||||
String doubleUnderScore = UNDERSCORE + UNDERSCORE;
|
||||
String randomDir = user.getShortName() + doubleUnderScore + tblName + doubleUnderScore
|
||||
+ (new BigInteger(RANDOM_WIDTH, new SecureRandom()).toString(RANDOM_RADIX));
|
||||
+ (new BigInteger(RANDOM_WIDTH, ThreadLocalRandom.current()).toString(RANDOM_RADIX));
|
||||
return createStagingDir(baseDir, user, randomDir);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ import java.util.HashSet;
|
|||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.Callable;
|
||||
|
@ -100,7 +99,6 @@ import org.apache.zookeeper.client.ConnectStringParser;
|
|||
import org.apache.zookeeper.data.Stat;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
|
@ -604,14 +602,13 @@ public class CanaryTool implements Tool, Canary {
|
|||
if (rowToCheck.length == 0) {
|
||||
rowToCheck = new byte[]{0x0};
|
||||
}
|
||||
int writeValueSize =
|
||||
connection.getConfiguration().getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10);
|
||||
int writeValueSize = connection.getConfiguration()
|
||||
.getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10);
|
||||
for (ColumnFamilyDescriptor column : tableDesc.getColumnFamilies()) {
|
||||
Put put = new Put(rowToCheck);
|
||||
byte[] value = new byte[writeValueSize];
|
||||
Bytes.random(value);
|
||||
put.addColumn(column.getName(), HConstants.EMPTY_BYTE_ARRAY, value);
|
||||
|
||||
LOG.debug("Writing to {} {} {} {}",
|
||||
tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(),
|
||||
Bytes.toStringBinary(rowToCheck));
|
||||
|
@ -1832,7 +1829,6 @@ public class CanaryTool implements Tool, Canary {
|
|||
RegionServerStdOutSink regionServerSink) {
|
||||
List<RegionServerTask> tasks = new ArrayList<>();
|
||||
Map<String, AtomicLong> successMap = new HashMap<>();
|
||||
Random rand = new Random();
|
||||
for (Map.Entry<String, List<RegionInfo>> entry : rsAndRMap.entrySet()) {
|
||||
String serverName = entry.getKey();
|
||||
AtomicLong successes = new AtomicLong(0);
|
||||
|
@ -1849,7 +1845,8 @@ public class CanaryTool implements Tool, Canary {
|
|||
}
|
||||
} else {
|
||||
// random select a region if flag not set
|
||||
RegionInfo region = entry.getValue().get(rand.nextInt(entry.getValue().size()));
|
||||
RegionInfo region = entry.getValue()
|
||||
.get(ThreadLocalRandom.current().nextInt(entry.getValue().size()));
|
||||
tasks.add(new RegionServerTask(this.connection,
|
||||
serverName,
|
||||
region,
|
||||
|
|
|
@ -135,7 +135,7 @@ public class EncryptionTest {
|
|||
byte[] iv = null;
|
||||
if (context.getCipher().getIvLength() > 0) {
|
||||
iv = new byte[context.getCipher().getIvLength()];
|
||||
Bytes.random(iv);
|
||||
Bytes.secureRandom(iv);
|
||||
}
|
||||
byte[] plaintext = new byte[1024];
|
||||
Bytes.random(plaintext);
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.util.Collection;
|
|||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
|
@ -164,10 +166,10 @@ public class HBaseFsckRepair {
|
|||
Table meta = conn.getTable(TableName.META_TABLE_NAME);
|
||||
Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
|
||||
if (numReplicas > 1) {
|
||||
Random r = new Random();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]);
|
||||
for (int i = 1; i < numReplicas; i++) {
|
||||
ServerName sn = serversArr[r.nextInt(serversArr.length)];
|
||||
ServerName sn = serversArr[rand.nextInt(serversArr.length)];
|
||||
// the column added here is just to make sure the master is able to
|
||||
// see the additional replicas when it is asked to assign. The
|
||||
// final value of these columns will be different and will be updated
|
||||
|
|
|
@ -19,10 +19,10 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
@ -137,7 +137,6 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool {
|
|||
* Thread that does random full-row writes into a table.
|
||||
*/
|
||||
public static class AtomicityWriter extends RepeatingTestThread {
|
||||
Random rand = new Random();
|
||||
byte data[] = new byte[10];
|
||||
byte[][] targetRows;
|
||||
byte[][] targetFamilies;
|
||||
|
@ -157,10 +156,9 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool {
|
|||
@Override
|
||||
public void doAnAction() throws Exception {
|
||||
// Pick a random row to write into
|
||||
byte[] targetRow = targetRows[rand.nextInt(targetRows.length)];
|
||||
byte[] targetRow = targetRows[ThreadLocalRandom.current().nextInt(targetRows.length)];
|
||||
Put p = new Put(targetRow);
|
||||
rand.nextBytes(data);
|
||||
|
||||
Bytes.random(data);
|
||||
for (byte[] family : targetFamilies) {
|
||||
for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
|
||||
byte qualifier[] = Bytes.toBytes("col" + i);
|
||||
|
|
|
@ -48,6 +48,7 @@ import java.util.Properties;
|
|||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BooleanSupplier;
|
||||
|
@ -2419,10 +2420,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
|
||||
public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
|
||||
throws IOException {
|
||||
Random r = new Random();
|
||||
byte[] row = new byte[rowSize];
|
||||
for (int i = 0; i < totalRows; i++) {
|
||||
r.nextBytes(row);
|
||||
Bytes.random(row);
|
||||
Put put = new Put(row);
|
||||
put.addColumn(f, new byte[]{0}, new byte[]{0});
|
||||
t.put(put);
|
||||
|
@ -3295,7 +3295,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
// There are chances that before we get the region for the table from an RS the region may
|
||||
// be going for CLOSE. This may be because online schema change is enabled
|
||||
if (regCount > 0) {
|
||||
idx = random.nextInt(regCount);
|
||||
idx = ThreadLocalRandom.current().nextInt(regCount);
|
||||
// if we have just tried this region, there is no need to try again
|
||||
if (attempted.contains(idx)) {
|
||||
continue;
|
||||
|
@ -3894,7 +3894,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
|
||||
"\n");
|
||||
|
||||
final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
|
||||
final int numCF = families.size();
|
||||
final byte[][] cfBytes = new byte[numCF][];
|
||||
{
|
||||
|
@ -3922,6 +3921,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
|
||||
BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
|
||||
|
||||
final Random rand = ThreadLocalRandom.current();
|
||||
for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
|
||||
for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
|
||||
final byte[] row = Bytes.toBytes(String.format(keyFormat,
|
||||
|
@ -3967,8 +3967,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
public static int randomFreePort() {
|
||||
return HBaseCommonTestingUtility.randomFreePort();
|
||||
}
|
||||
|
||||
public static String randomMultiCastAddress() {
|
||||
return "226.1.1." + random.nextInt(254);
|
||||
return "226.1.1." + ThreadLocalRandom.current().nextInt(254);
|
||||
}
|
||||
|
||||
public static void waitForHostPort(String host, int port)
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
package org.apache.hadoop.hbase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.SecureRandom;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.commons.math3.random.RandomData;
|
||||
import org.apache.commons.math3.random.RandomDataImpl;
|
||||
|
@ -337,7 +336,6 @@ public class HFilePerformanceEvaluation {
|
|||
|
||||
static class SequentialWriteBenchmark extends RowOrientedBenchmark {
|
||||
protected HFile.Writer writer;
|
||||
private Random random = new Random();
|
||||
private byte[] bytes = new byte[ROW_LENGTH];
|
||||
|
||||
public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf,
|
||||
|
@ -354,7 +352,7 @@ public class HFilePerformanceEvaluation {
|
|||
|
||||
if (cipher == "aes") {
|
||||
byte[] cipherKey = new byte[AES.KEY_LENGTH];
|
||||
new SecureRandom().nextBytes(cipherKey);
|
||||
Bytes.secureRandom(cipherKey);
|
||||
builder.withEncryptionContext(Encryption.newContext(conf)
|
||||
.setCipher(Encryption.getCipher(conf, cipher))
|
||||
.setKey(cipherKey));
|
||||
|
@ -376,7 +374,7 @@ public class HFilePerformanceEvaluation {
|
|||
}
|
||||
|
||||
private byte[] generateValue() {
|
||||
random.nextBytes(bytes);
|
||||
Bytes.random(bytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
|
@ -447,8 +445,6 @@ public class HFilePerformanceEvaluation {
|
|||
|
||||
static class UniformRandomReadBenchmark extends ReadBenchmark {
|
||||
|
||||
private Random random = new Random();
|
||||
|
||||
public UniformRandomReadBenchmark(Configuration conf, FileSystem fs,
|
||||
Path mf, int totalRows) {
|
||||
super(conf, fs, mf, totalRows);
|
||||
|
@ -469,12 +465,11 @@ public class HFilePerformanceEvaluation {
|
|||
}
|
||||
|
||||
private byte [] getRandomRow() {
|
||||
return format(random.nextInt(totalRows));
|
||||
return format(ThreadLocalRandom.current().nextInt(totalRows));
|
||||
}
|
||||
}
|
||||
|
||||
static class UniformRandomSmallScan extends ReadBenchmark {
|
||||
private Random random = new Random();
|
||||
|
||||
public UniformRandomSmallScan(Configuration conf, FileSystem fs,
|
||||
Path mf, int totalRows) {
|
||||
|
@ -507,7 +502,7 @@ public class HFilePerformanceEvaluation {
|
|||
}
|
||||
|
||||
private byte [] getRandomRow() {
|
||||
return format(random.nextInt(totalRows));
|
||||
return format(ThreadLocalRandom.current().nextInt(totalRows));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -428,7 +428,7 @@ public class TestHBaseTestingUtility {
|
|||
when(portChecker.available(anyInt())).thenReturn(true);
|
||||
|
||||
HBaseTestingUtility.PortAllocator portAllocator =
|
||||
new HBaseTestingUtility.PortAllocator(random, portChecker);
|
||||
new HBaseTestingUtility.PortAllocator(portChecker);
|
||||
|
||||
int port1 = portAllocator.randomFreePort();
|
||||
int port2 = portAllocator.randomFreePort();
|
||||
|
|
|
@ -38,6 +38,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
|
@ -89,7 +90,6 @@ public class TestMetaTableAccessor {
|
|||
private static final Logger LOG = LoggerFactory.getLogger(TestMetaTableAccessor.class);
|
||||
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||
private static Connection connection;
|
||||
private Random random = new Random();
|
||||
|
||||
@Rule
|
||||
public TestName name = new TestName();
|
||||
|
@ -440,9 +440,11 @@ public class TestMetaTableAccessor {
|
|||
|
||||
@Test
|
||||
public void testMetaLocationsForRegionReplicas() throws IOException {
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
|
||||
ServerName serverName1 = ServerName.valueOf("bar", 60010, random.nextLong());
|
||||
ServerName serverName100 = ServerName.valueOf("baz", 60010, random.nextLong());
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010, rand.nextLong());
|
||||
ServerName serverName1 = ServerName.valueOf("bar", 60010, rand.nextLong());
|
||||
ServerName serverName100 = ServerName.valueOf("baz", 60010, rand.nextLong());
|
||||
|
||||
long regionId = EnvironmentEdgeManager.currentTime();
|
||||
RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
|
@ -467,9 +469,9 @@ public class TestMetaTableAccessor {
|
|||
.setReplicaId(100)
|
||||
.build();
|
||||
|
||||
long seqNum0 = random.nextLong();
|
||||
long seqNum1 = random.nextLong();
|
||||
long seqNum100 = random.nextLong();
|
||||
long seqNum0 = rand.nextLong();
|
||||
long seqNum1 = rand.nextLong();
|
||||
long seqNum100 = rand.nextLong();
|
||||
|
||||
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
|
||||
MetaTableAccessor.updateRegionLocation(connection, primary, serverName0, seqNum0,
|
||||
|
@ -555,7 +557,8 @@ public class TestMetaTableAccessor {
|
|||
@Test
|
||||
public void testMetaLocationForRegionReplicasIsAddedAtRegionSplit() throws IOException {
|
||||
long regionId = EnvironmentEdgeManager.currentTime();
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010,
|
||||
ThreadLocalRandom.current().nextLong());
|
||||
RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
|
@ -595,7 +598,8 @@ public class TestMetaTableAccessor {
|
|||
@Test
|
||||
public void testMetaLocationForRegionReplicasIsAddedAtRegionMerge() throws IOException {
|
||||
long regionId = EnvironmentEdgeManager.currentTime();
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010,
|
||||
ThreadLocalRandom.current().nextLong());
|
||||
|
||||
RegionInfo parentA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(Bytes.toBytes("a"))
|
||||
|
@ -882,7 +886,8 @@ public class TestMetaTableAccessor {
|
|||
@Test
|
||||
public void testEmptyMetaDaughterLocationDuringSplit() throws IOException {
|
||||
long regionId = EnvironmentEdgeManager.currentTime();
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010,
|
||||
ThreadLocalRandom.current().nextLong());
|
||||
RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo"))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
|
|
|
@ -30,7 +30,7 @@ import java.util.EnumSet;
|
|||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -643,9 +643,7 @@ public class TestAdmin2 extends TestAdminBase {
|
|||
|
||||
@Test
|
||||
public void testAbortProcedureFail() throws Exception {
|
||||
Random randomGenerator = new Random();
|
||||
long procId = randomGenerator.nextLong();
|
||||
|
||||
long procId = ThreadLocalRandom.current().nextLong();
|
||||
boolean abortResult = ADMIN.abortProcedure(procId, true);
|
||||
assertFalse(abortResult);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.util.Arrays;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
@ -47,7 +46,6 @@ import org.junit.BeforeClass;
|
|||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
|
||||
import org.apache.hbase.thirdparty.io.netty.util.Timeout;
|
||||
|
||||
|
@ -80,7 +78,7 @@ public class TestAsyncBufferMutator {
|
|||
TEST_UTIL.createTable(TABLE_NAME, CF);
|
||||
TEST_UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, CF);
|
||||
CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
|
||||
ThreadLocalRandom.current().nextBytes(VALUE);
|
||||
Bytes.random(VALUE);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
|
|
@ -197,11 +197,11 @@ public class TestAsyncNonMetaRegionLocator {
|
|||
assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
|
||||
getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, locateType, false).get());
|
||||
}
|
||||
byte[] randKey = new byte[ThreadLocalRandom.current().nextInt(128)];
|
||||
ThreadLocalRandom.current().nextBytes(randKey);
|
||||
byte[] key = new byte[ThreadLocalRandom.current().nextInt(128)];
|
||||
Bytes.random(key);
|
||||
for (RegionLocateType locateType : RegionLocateType.values()) {
|
||||
assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
|
||||
getDefaultRegionLocation(TABLE_NAME, randKey, locateType, false).get());
|
||||
getDefaultRegionLocation(TABLE_NAME, key, locateType, false).get());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,8 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
|
@ -116,8 +117,7 @@ public class TestAsyncProcedureAdminApi extends TestAsyncAdminBase {
|
|||
|
||||
@Test
|
||||
public void abortProcedure() throws Exception {
|
||||
Random randomGenerator = new Random();
|
||||
long procId = randomGenerator.nextLong();
|
||||
long procId = ThreadLocalRandom.current().nextLong();
|
||||
boolean abortResult = admin.abortProcedure(procId, true).get();
|
||||
assertFalse(abortResult);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
|
|||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
|
@ -69,7 +68,7 @@ public class TestAsyncTableBatchRetryImmediately {
|
|||
UTIL.startMiniCluster(1);
|
||||
Table table = UTIL.createTable(TABLE_NAME, FAMILY);
|
||||
UTIL.waitTableAvailable(TABLE_NAME);
|
||||
ThreadLocalRandom.current().nextBytes(VALUE_PREFIX);
|
||||
Bytes.random(VALUE_PREFIX);
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL,
|
||||
Bytes.add(VALUE_PREFIX, Bytes.toBytes(i))));
|
||||
|
|
|
@ -27,11 +27,11 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -146,7 +146,7 @@ public class TestAsyncTableGetMultiThreaded {
|
|||
return null;
|
||||
})));
|
||||
LOG.info("====== Scheduled {} read threads ======", numThreads);
|
||||
Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
|
||||
Collections.shuffle(Arrays.asList(SPLIT_KEYS), ThreadLocalRandom.current());
|
||||
Admin admin = TEST_UTIL.getAdmin();
|
||||
for (byte[] splitPoint : SPLIT_KEYS) {
|
||||
int oldRegionCount = admin.getRegions(TABLE_NAME).size();
|
||||
|
|
|
@ -92,7 +92,6 @@ public class TestFromClientSide3 {
|
|||
= new HBaseTestingUtility();
|
||||
private static final int WAITTABLE_MILLIS = 10000;
|
||||
private static byte[] FAMILY = Bytes.toBytes("testFamily");
|
||||
private static Random random = new Random();
|
||||
private static int SLAVES = 3;
|
||||
private static final byte[] ROW = Bytes.toBytes("testRow");
|
||||
private static final byte[] ANOTHERROW = Bytes.toBytes("anotherrow");
|
||||
|
@ -144,9 +143,10 @@ public class TestFromClientSide3 {
|
|||
private void randomCFPuts(Table table, byte[] row, byte[] family, int nPuts)
|
||||
throws Exception {
|
||||
Put put = new Put(row);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < nPuts; i++) {
|
||||
byte[] qualifier = Bytes.toBytes(random.nextInt());
|
||||
byte[] value = Bytes.toBytes(random.nextInt());
|
||||
byte[] qualifier = Bytes.toBytes(rand.nextInt());
|
||||
byte[] value = Bytes.toBytes(rand.nextInt());
|
||||
put.addColumn(family, qualifier, value);
|
||||
}
|
||||
table.put(put);
|
||||
|
@ -286,7 +286,7 @@ public class TestFromClientSide3 {
|
|||
ClusterConnection connection = (ClusterConnection) TEST_UTIL.getConnection();
|
||||
|
||||
// Create 3 store files.
|
||||
byte[] row = Bytes.toBytes(random.nextInt());
|
||||
byte[] row = Bytes.toBytes(ThreadLocalRandom.current().nextInt());
|
||||
performMultiplePutAndFlush((HBaseAdmin) admin, table, row, FAMILY, 3, 100);
|
||||
|
||||
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
|
||||
|
|
|
@ -21,7 +21,6 @@ import static junit.framework.TestCase.assertEquals;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellBuilderFactory;
|
||||
import org.apache.hadoop.hbase.CellBuilderType;
|
||||
|
@ -153,7 +152,7 @@ public class TestMultiRespectsLimits {
|
|||
// however the block being reference will be larger than MAX_SIZE.
|
||||
// This should cause the regionserver to try and send a result immediately.
|
||||
byte[] value = new byte[MAX_SIZE - 100];
|
||||
ThreadLocalRandom.current().nextBytes(value);
|
||||
Bytes.random(value);
|
||||
|
||||
for (byte[] col:cols) {
|
||||
Put p = new Put(row);
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
|
|||
import static org.apache.hadoop.hbase.ipc.RpcServer.MAX_REQUEST_SIZE;
|
||||
import static org.junit.Assert.assertThrows;
|
||||
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -33,7 +32,6 @@ import org.junit.BeforeClass;
|
|||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
|
||||
|
||||
@Category({ MediumTests.class, ClientTests.class })
|
||||
|
@ -68,7 +66,7 @@ public class TestRequestTooBigException {
|
|||
@Test
|
||||
public void testHbasePutDeleteCell() throws Exception {
|
||||
byte[] value = new byte[1024];
|
||||
ThreadLocalRandom.current().nextBytes(value);
|
||||
Bytes.random(value);
|
||||
for (int m = 0; m < 100; m++) {
|
||||
Put p = new Put(Bytes.toBytes("bigrow-" + m));
|
||||
// max request is 10K, big request = 100 * 1K
|
||||
|
|
|
@ -43,8 +43,6 @@ import org.junit.Rule;
|
|||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Run tests related to {@link TimestampsFilter} using HBase client APIs.
|
||||
|
@ -58,7 +56,6 @@ public class TestTimestampsFilter {
|
|||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestTimestampsFilter.class);
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestTimestampsFilter.class);
|
||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
|
||||
@Rule
|
||||
|
|
|
@ -28,7 +28,7 @@ import static org.mockito.Mockito.times;
|
|||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
|
@ -97,7 +97,7 @@ public class TestEntityLocks {
|
|||
admin = getAdmin();
|
||||
lockReqArgCaptor = ArgumentCaptor.forClass(LockRequest.class);
|
||||
lockHeartbeatReqArgCaptor = ArgumentCaptor.forClass(LockHeartbeatRequest.class);
|
||||
procId = new Random().nextLong();
|
||||
procId = ThreadLocalRandom.current().nextLong();
|
||||
}
|
||||
|
||||
private boolean waitLockTimeOut(EntityLock lock, long maxWaitTimeMillis) {
|
||||
|
|
|
@ -34,6 +34,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import javax.management.MBeanAttributeInfo;
|
||||
import javax.management.MBeanInfo;
|
||||
import javax.management.MBeanServerConnection;
|
||||
|
@ -99,7 +101,7 @@ public class TestMetaTableMetrics {
|
|||
UTIL.getConfiguration().set("hbase.coprocessor.region.classes",
|
||||
MetaTableMetrics.class.getName());
|
||||
conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, JMXListener.class.getName());
|
||||
Random rand = new Random();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
do {
|
||||
int sign = i % 2 == 0 ? 1 : -1;
|
||||
|
|
|
@ -20,8 +20,10 @@ package org.apache.hadoop.hbase.io.compress;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.security.SecureRandom;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -44,7 +46,6 @@ public class HFileTestBase {
|
|||
|
||||
protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
protected static final Logger LOG = LoggerFactory.getLogger(HFileTestBase.class);
|
||||
protected static final SecureRandom RNG = new SecureRandom();
|
||||
protected static FileSystem FS;
|
||||
|
||||
public static void setUpBeforeClass() throws Exception {
|
||||
|
@ -105,13 +106,14 @@ public class HFileTestBase {
|
|||
assertEquals("Did not read back as many KVs as written", i, testKvs.size());
|
||||
|
||||
// Test random seeks with pread
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
LOG.info("Random seeking with " + fileContext);
|
||||
reader = HFile.createReader(FS, path, cacheConf, true, conf);
|
||||
try {
|
||||
scanner = reader.getScanner(conf, false, true);
|
||||
assertTrue("Initial seekTo failed", scanner.seekTo());
|
||||
for (i = 0; i < 100; i++) {
|
||||
KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size()));
|
||||
KeyValue kv = testKvs.get(rand.nextInt(testKvs.size()));
|
||||
assertEquals("Unable to find KV as expected: " + kv, 0, scanner.seekTo(kv));
|
||||
}
|
||||
} finally {
|
||||
|
|
|
@ -25,6 +25,8 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
|
@ -252,7 +254,7 @@ public class TestChangingEncoding {
|
|||
@Test
|
||||
public void testCrazyRandomChanges() throws Exception {
|
||||
prepareTest("RandomChanges");
|
||||
Random rand = new Random(2934298742974297L);
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
int encodingOrdinal = rand.nextInt(DataBlockEncoding.values().length);
|
||||
DataBlockEncoding encoding = DataBlockEncoding.values()[encodingOrdinal];
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ArrayBackedTag;
|
||||
|
@ -87,7 +88,6 @@ public class TestDataBlockEncoders {
|
|||
|
||||
private final Configuration conf = HBaseConfiguration.create();
|
||||
private final RedundantKVGenerator generator = new RedundantKVGenerator();
|
||||
private final Random randomizer = new Random(42L);
|
||||
private final boolean includesMemstoreTS;
|
||||
private final boolean includesTags;
|
||||
private final boolean useOffheapData;
|
||||
|
@ -217,13 +217,14 @@ public class TestDataBlockEncoders {
|
|||
LOG.info("Testing it!");
|
||||
// test it!
|
||||
// try a few random seeks
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (boolean seekBefore : new boolean[] { false, true }) {
|
||||
for (int i = 0; i < NUM_RANDOM_SEEKS; ++i) {
|
||||
int keyValueId;
|
||||
if (!seekBefore) {
|
||||
keyValueId = randomizer.nextInt(sampleKv.size());
|
||||
keyValueId = rand.nextInt(sampleKv.size());
|
||||
} else {
|
||||
keyValueId = randomizer.nextInt(sampleKv.size() - 1) + 1;
|
||||
keyValueId = rand.nextInt(sampleKv.size() - 1) + 1;
|
||||
}
|
||||
|
||||
KeyValue keyValue = sampleKv.get(keyValueId);
|
||||
|
|
|
@ -30,8 +30,8 @@ import java.util.Arrays;
|
|||
import java.util.HashSet;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.MultithreadedTestUtil;
|
||||
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
|
|||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
|
||||
public class CacheTestUtils {
|
||||
|
@ -282,11 +283,11 @@ public class CacheTestUtils {
|
|||
|
||||
public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) {
|
||||
HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks];
|
||||
Random rand = new Random();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
HashSet<String> usedStrings = new HashSet<>();
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize);
|
||||
rand.nextBytes(cachedBuffer.array());
|
||||
Bytes.random(cachedBuffer.array());
|
||||
cachedBuffer.rewind();
|
||||
int onDiskSizeWithoutHeader = blockSize;
|
||||
int uncompressedSizeWithoutHeader = blockSize;
|
||||
|
|
|
@ -39,6 +39,8 @@ import java.util.Arrays;
|
|||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
|
@ -280,16 +282,15 @@ public class TestHFile {
|
|||
StoreFileWriter sfw =
|
||||
new StoreFileWriter.Builder(conf, fs).withOutputDir(storeFileParentDir)
|
||||
.withFileContext(meta).build();
|
||||
|
||||
final int rowLen = 32;
|
||||
Random RNG = new Random();
|
||||
Random rand = ThreadLocalRandom.current();
|
||||
for (int i = 0; i < 1000; ++i) {
|
||||
byte[] k = RandomKeyValueUtil.randomOrderedKey(RNG, i);
|
||||
byte[] v = RandomKeyValueUtil.randomValue(RNG);
|
||||
int cfLen = RNG.nextInt(k.length - rowLen + 1);
|
||||
byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i);
|
||||
byte[] v = RandomKeyValueUtil.randomValue(rand);
|
||||
int cfLen = rand.nextInt(k.length - rowLen + 1);
|
||||
KeyValue kv =
|
||||
new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen,
|
||||
k.length - rowLen - cfLen, RNG.nextLong(), generateKeyType(RNG), v, 0, v.length);
|
||||
k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length);
|
||||
sfw.append(kv);
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue