HBASE-26582 Prune use of Random and SecureRandom objects (#4118)

Avoid the pattern where a Random object is allocated, used once or twice, and
then left for GC. This pattern triggers warnings from some static analysis tools
because this pattern leads to poor effective randomness. In a few cases we were
legitimately suffering from this issue; in others a change is still good to
reduce noise in analysis results.

Use ThreadLocalRandom where there is no requirement to set the seed to gain
good reuse.

Where useful relax use of SecureRandom to simply Random or ThreadLocalRandom,
which are unlikely to block if the system entropy pool is low, if we don't need
crypographically strong randomness for the use case. The exception to this is
normalization of use of Bytes#random to fill byte arrays with randomness.
Because Bytes#random may be used to generate key material it must be backed by
SecureRandom.

Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
Andrew Purtell 2022-03-08 13:49:02 -08:00 committed by Duo Zhang
parent e8cca6d111
commit 5386325acd
178 changed files with 662 additions and 767 deletions

View File

@ -34,7 +34,6 @@ import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -44,6 +43,7 @@ import org.apache.hadoop.hbase.io.asyncfs.monitor.ExcludeDatanodeManager;
import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
@ -57,7 +57,6 @@ import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.Channel;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
@ -72,13 +71,9 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutput.class); HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutput.class);
private static final Logger LOG = LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutput.class); private static final Logger LOG = LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutput.class);
private static DistributedFileSystem FS; private static DistributedFileSystem FS;
private static EventLoopGroup EVENT_LOOP_GROUP; private static EventLoopGroup EVENT_LOOP_GROUP;
private static Class<? extends Channel> CHANNEL_CLASS; private static Class<? extends Channel> CHANNEL_CLASS;
private static int READ_TIMEOUT_MS = 2000; private static int READ_TIMEOUT_MS = 2000;
private static StreamSlowMonitor MONITOR; private static StreamSlowMonitor MONITOR;
@ -104,14 +99,16 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
shutdownMiniDFSCluster(); shutdownMiniDFSCluster();
} }
private static final Random RNG = new Random(); // This test depends on Random#setSeed
static void writeAndVerify(FileSystem fs, Path f, AsyncFSOutput out) static void writeAndVerify(FileSystem fs, Path f, AsyncFSOutput out)
throws IOException, InterruptedException, ExecutionException { throws IOException, InterruptedException, ExecutionException {
List<CompletableFuture<Long>> futures = new ArrayList<>(); List<CompletableFuture<Long>> futures = new ArrayList<>();
byte[] b = new byte[10]; byte[] b = new byte[10];
Random rand = new Random(12345);
// test pipelined flush // test pipelined flush
RNG.setSeed(12345);
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
rand.nextBytes(b); RNG.nextBytes(b);
out.write(b); out.write(b);
futures.add(out.flush(false)); futures.add(out.flush(false));
futures.add(out.flush(false)); futures.add(out.flush(false));
@ -123,11 +120,11 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
out.close(); out.close();
assertEquals(b.length * 10, fs.getFileStatus(f).getLen()); assertEquals(b.length * 10, fs.getFileStatus(f).getLen());
byte[] actual = new byte[b.length]; byte[] actual = new byte[b.length];
rand.setSeed(12345); RNG.setSeed(12345);
try (FSDataInputStream in = fs.open(f)) { try (FSDataInputStream in = fs.open(f)) {
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
in.readFully(actual); in.readFully(actual);
rand.nextBytes(b); RNG.nextBytes(b);
assertArrayEquals(b, actual); assertArrayEquals(b, actual);
} }
assertEquals(-1, in.read()); assertEquals(-1, in.read());
@ -150,7 +147,7 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true,
false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR); false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, MONITOR);
byte[] b = new byte[10]; byte[] b = new byte[10];
ThreadLocalRandom.current().nextBytes(b); Bytes.random(b);
out.write(b, 0, b.length); out.write(b, 0, b.length);
out.flush(false).get(); out.flush(false).get();
// restart one datanode which causes one connection broken // restart one datanode which causes one connection broken
@ -262,7 +259,7 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true,
false, (short) 3, 1024 * 1024 * 1024, eventLoop, CHANNEL_CLASS, MONITOR); false, (short) 3, 1024 * 1024 * 1024, eventLoop, CHANNEL_CLASS, MONITOR);
byte[] b = new byte[50 * 1024 * 1024]; byte[] b = new byte[50 * 1024 * 1024];
ThreadLocalRandom.current().nextBytes(b); Bytes.random(b);
out.write(b); out.write(b);
out.flush(false); out.flush(false);
assertEquals(b.length, out.flush(false).get().longValue()); assertEquals(b.length, out.flush(false).get().longValue());

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hbase.io.asyncfs; package org.apache.hadoop.hbase.io.asyncfs;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -29,12 +28,12 @@ import java.util.Map;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CyclicBarrier; import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor; import org.apache.hadoop.hbase.io.asyncfs.monitor.StreamSlowMonitor;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -57,7 +56,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup;
import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup; import org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoopGroup;
import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel;
/** /**
* Testcase for HBASE-26679, here we introduce a separate test class and not put the testcase in * Testcase for HBASE-26679, here we introduce a separate test class and not put the testcase in
* {@link TestFanOutOneBlockAsyncDFSOutput} because we will send heartbeat to DN when there is no * {@link TestFanOutOneBlockAsyncDFSOutput} because we will send heartbeat to DN when there is no
@ -191,7 +189,7 @@ public class TestFanOutOneBlockAsyncDFSOutputHang extends AsyncFSTestBase {
}); });
byte[] b = new byte[10]; byte[] b = new byte[10];
ThreadLocalRandom.current().nextBytes(b); Bytes.random(b);
OUT.write(b, 0, b.length); OUT.write(b, 0, b.length);
CompletableFuture<Long> future = OUT.flush(false); CompletableFuture<Long> future = OUT.flush(false);
/** /**

View File

@ -19,8 +19,7 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import java.util.Arrays; import java.util.Arrays;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -33,12 +32,12 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator {
private static final PerClientRandomNonceGenerator INST = new PerClientRandomNonceGenerator(); private static final PerClientRandomNonceGenerator INST = new PerClientRandomNonceGenerator();
private final Random rdm = new Random();
private final long clientId; private final long clientId;
private PerClientRandomNonceGenerator() { private PerClientRandomNonceGenerator() {
byte[] clientIdBase = ClientIdGenerator.generateClientId(); byte[] clientIdBase = ClientIdGenerator.generateClientId();
this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) + rdm.nextInt(); this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) +
ThreadLocalRandom.current().nextInt();
} }
@Override @Override
@ -50,7 +49,7 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator {
public long newNonce() { public long newNonce() {
long result = HConstants.NO_NONCE; long result = HConstants.NO_NONCE;
do { do {
result = rdm.nextLong(); result = ThreadLocalRandom.current().nextLong();
} while (result == HConstants.NO_NONCE); } while (result == HConstants.NO_NONCE);
return result; return result;
} }

View File

@ -20,7 +20,7 @@
package org.apache.hadoop.hbase.filter; package org.apache.hadoop.hbase.filter;
import java.util.Objects; import java.util.Objects;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -35,7 +35,6 @@ import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferExce
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
public class RandomRowFilter extends FilterBase { public class RandomRowFilter extends FilterBase {
protected static final Random random = new Random();
protected float chance; protected float chance;
protected boolean filterOutRow; protected boolean filterOutRow;
@ -104,7 +103,7 @@ public class RandomRowFilter extends FilterBase {
filterOutRow = false; filterOutRow = false;
} else { } else {
// roll the dice // roll the dice
filterOutRow = !(random.nextFloat() < chance); filterOutRow = !(ThreadLocalRandom.current().nextFloat() < chance);
} }
return filterOutRow; return filterOutRow;
} }

View File

@ -22,7 +22,6 @@ import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.security.Key; import java.security.Key;
import java.security.KeyException; import java.security.KeyException;
import java.security.SecureRandom;
import java.util.Properties; import java.util.Properties;
import javax.crypto.spec.SecretKeySpec; import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.crypto.cipher.CryptoCipherFactory; import org.apache.commons.crypto.cipher.CryptoCipherFactory;
@ -37,7 +36,6 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability; import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.generated.EncryptionProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.EncryptionProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
@ -50,8 +48,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
public final class EncryptionUtil { public final class EncryptionUtil {
static private final Logger LOG = LoggerFactory.getLogger(EncryptionUtil.class); static private final Logger LOG = LoggerFactory.getLogger(EncryptionUtil.class);
static private final SecureRandom RNG = new SecureRandom();
/** /**
* Private constructor to keep this class from being instantiated. * Private constructor to keep this class from being instantiated.
*/ */
@ -96,7 +92,7 @@ public final class EncryptionUtil {
byte[] iv = null; byte[] iv = null;
if (cipher.getIvLength() > 0) { if (cipher.getIvLength() > 0) {
iv = new byte[cipher.getIvLength()]; iv = new byte[cipher.getIvLength()];
RNG.nextBytes(iv); Bytes.secureRandom(iv);
builder.setIv(UnsafeByteOperations.unsafeWrap(iv)); builder.setIv(UnsafeByteOperations.unsafeWrap(iv));
} }
byte[] keyBytes = key.getEncoded(); byte[] keyBytes = key.getEncoded();
@ -286,7 +282,7 @@ public final class EncryptionUtil {
* @throws IOException if create CryptoAES failed * @throws IOException if create CryptoAES failed
*/ */
public static CryptoAES createCryptoAES(RPCProtos.CryptoCipherMeta cryptoCipherMeta, public static CryptoAES createCryptoAES(RPCProtos.CryptoCipherMeta cryptoCipherMeta,
Configuration conf) throws IOException { Configuration conf) throws IOException {
Properties properties = new Properties(); Properties properties = new Properties();
// the property for cipher class // the property for cipher class
properties.setProperty(CryptoCipherFactory.CLASSES_KEY, properties.setProperty(CryptoCipherFactory.CLASSES_KEY,

View File

@ -22,7 +22,8 @@ package org.apache.hadoop.hbase.slowlog;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
@ -49,8 +50,6 @@ public class SlowLogTableAccessor {
private static final Logger LOG = LoggerFactory.getLogger(SlowLogTableAccessor.class); private static final Logger LOG = LoggerFactory.getLogger(SlowLogTableAccessor.class);
private static final Random RANDOM = new Random();
private static Connection connection; private static Connection connection;
/** /**
@ -139,7 +138,7 @@ public class SlowLogTableAccessor {
String lastFiveDig = String lastFiveDig =
hashcode.substring((hashcode.length() > 5) ? (hashcode.length() - 5) : 0); hashcode.substring((hashcode.length() > 5) ? (hashcode.length() - 5) : 0);
if (lastFiveDig.startsWith("-")) { if (lastFiveDig.startsWith("-")) {
lastFiveDig = String.valueOf(RANDOM.nextInt(99999)); lastFiveDig = String.valueOf(ThreadLocalRandom.current().nextInt(99999));
} }
final long currentTime = EnvironmentEdgeManager.currentTime(); final long currentTime = EnvironmentEdgeManager.currentTime();
final String timeAndHashcode = currentTime + lastFiveDig; final String timeAndHashcode = currentTime + lastFiveDig;

View File

@ -23,7 +23,7 @@ import static org.junit.Assert.fail;
import java.security.Key; import java.security.Key;
import java.security.KeyException; import java.security.KeyException;
import java.security.SecureRandom;
import javax.crypto.spec.SecretKeySpec; import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -110,7 +110,7 @@ public class TestEncryptionUtil {
// generate a test key // generate a test key
byte[] keyBytes = new byte[AES.KEY_LENGTH]; byte[] keyBytes = new byte[AES.KEY_LENGTH];
new SecureRandom().nextBytes(keyBytes); Bytes.secureRandom(keyBytes);
String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Key key = new SecretKeySpec(keyBytes, algorithm); Key key = new SecretKeySpec(keyBytes, algorithm);
@ -152,7 +152,7 @@ public class TestEncryptionUtil {
// generate a test key // generate a test key
byte[] keyBytes = new byte[AES.KEY_LENGTH]; byte[] keyBytes = new byte[AES.KEY_LENGTH];
new SecureRandom().nextBytes(keyBytes); Bytes.secureRandom(keyBytes);
String algorithm = String algorithm =
conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Key key = new SecretKeySpec(keyBytes, algorithm); Key key = new SecretKeySpec(keyBytes, algorithm);
@ -189,7 +189,7 @@ public class TestEncryptionUtil {
// generate a test key // generate a test key
byte[] keyBytes = new byte[AES.KEY_LENGTH]; byte[] keyBytes = new byte[AES.KEY_LENGTH];
new SecureRandom().nextBytes(keyBytes); Bytes.secureRandom(keyBytes);
String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Key key = new SecretKeySpec(keyBytes, algorithm); Key key = new SecretKeySpec(keyBytes, algorithm);
@ -214,7 +214,7 @@ public class TestEncryptionUtil {
// generate a test key // generate a test key
byte[] keyBytes = new byte[AES.KEY_LENGTH]; byte[] keyBytes = new byte[AES.KEY_LENGTH];
new SecureRandom().nextBytes(keyBytes); Bytes.secureRandom(keyBytes);
String algorithm = String algorithm =
conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Key key = new SecretKeySpec(keyBytes, algorithm); Key key = new SecretKeySpec(keyBytes, algorithm);

View File

@ -25,11 +25,9 @@ import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException; import java.util.concurrent.CompletionException;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;

View File

@ -20,11 +20,9 @@ package org.apache.hadoop.hbase.util;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import java.io.IOException; import java.io.IOException;
import java.util.Random;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException; import java.util.concurrent.CompletionException;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;

View File

@ -317,7 +317,7 @@ public final class Encryption {
*/ */
private static byte[] generateSecretKey(String algorithm, int keyLengthBytes, char[] password) { private static byte[] generateSecretKey(String algorithm, int keyLengthBytes, char[] password) {
byte[] salt = new byte[keyLengthBytes]; byte[] salt = new byte[keyLengthBytes];
Bytes.random(salt); Bytes.secureRandom(salt);
PBEKeySpec spec = new PBEKeySpec(password, salt, 10000, keyLengthBytes*8); PBEKeySpec spec = new PBEKeySpec(password, salt, 10000, keyLengthBytes*8);
try { try {
return SecretKeyFactory.getInstance(algorithm).generateSecret(spec).getEncoded(); return SecretKeyFactory.getInstance(algorithm).generateSecret(spec).getEncoded();

View File

@ -21,7 +21,6 @@ import java.io.ByteArrayInputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.security.SecureRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream; import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
@ -110,7 +109,7 @@ public class HFileBlockDefaultEncodingContext implements HFileBlockEncodingConte
if (cryptoContext != Encryption.Context.NONE) { if (cryptoContext != Encryption.Context.NONE) {
cryptoByteStream = new ByteArrayOutputStream(); cryptoByteStream = new ByteArrayOutputStream();
iv = new byte[cryptoContext.getCipher().getIvLength()]; iv = new byte[cryptoContext.getCipher().getIvLength()];
new SecureRandom().nextBytes(iv); Bytes.secureRandom(iv);
} }
dummyHeader = Preconditions.checkNotNull(headerBytes, dummyHeader = Preconditions.checkNotNull(headerBytes,

View File

@ -38,6 +38,8 @@ import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Random;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
@ -2555,11 +2557,16 @@ public class Bytes implements Comparable<Bytes> {
Arrays.fill(b, offset, offset + length, (byte) 0); Arrays.fill(b, offset, offset + length, (byte) 0);
} }
private static final SecureRandom RNG = new SecureRandom(); // Pseudorandom random number generator, do not use SecureRandom here
private static final Random RNG = new Random();
/** /**
* Fill given array with random bytes. * Fill given array with random bytes.
* @param b array which needs to be filled with random bytes * @param b array which needs to be filled with random bytes
* <p>
* If you want random bytes generated by a strong source of randomness use {@link
* Bytes#secureRandom(byte[])}.
* @param b array which needs to be filled with random bytes
*/ */
public static void random(byte[] b) { public static void random(byte[] b) {
RNG.nextBytes(b); RNG.nextBytes(b);
@ -2567,9 +2574,12 @@ public class Bytes implements Comparable<Bytes> {
/** /**
* Fill given array with random bytes at the specified position. * Fill given array with random bytes at the specified position.
* @param b * <p>
* @param offset * If you want random bytes generated by a strong source of randomness use {@link
* @param length * Bytes#secureRandom(byte[], int, int)}.
* @param b array which needs to be filled with random bytes
* @param offset staring offset in array
* @param length number of bytes to fill
*/ */
public static void random(byte[] b, int offset, int length) { public static void random(byte[] b, int offset, int length) {
checkPositionIndex(offset, b.length, "offset"); checkPositionIndex(offset, b.length, "offset");
@ -2580,6 +2590,33 @@ public class Bytes implements Comparable<Bytes> {
System.arraycopy(buf, 0, b, offset, length); System.arraycopy(buf, 0, b, offset, length);
} }
// Bytes.secureRandom may be used to create key material.
private static final SecureRandom SECURE_RNG = new SecureRandom();
/**
* Fill given array with random bytes using a strong random number generator.
* @param b array which needs to be filled with random bytes
*/
public static void secureRandom(byte[] b) {
SECURE_RNG.nextBytes(b);
}
/**
* Fill given array with random bytes at the specified position using a strong random number
* generator.
* @param b array which needs to be filled with random bytes
* @param offset staring offset in array
* @param length number of bytes to fill
*/
public static void secureRandom(byte[] b, int offset, int length) {
checkPositionIndex(offset, b.length, "offset");
checkArgument(length > 0, "length must be greater than 0");
checkPositionIndex(offset + length, b.length, "offset + length");
byte[] buf = new byte[length];
SECURE_RNG.nextBytes(buf);
System.arraycopy(buf, 0, b, offset, length);
}
/** /**
* Create a max byte array with the specified max byte count * Create a max byte array with the specified max byte count
* @param maxByteCount the length of returned byte array * @param maxByteCount the length of returned byte array

View File

@ -23,7 +23,6 @@ import java.net.ServerSocket;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadLocalRandom;
@ -269,10 +268,7 @@ public class HBaseCommonTestingUtility {
return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate); return Waiter.waitFor(this.conf, timeout, interval, failIfTimeout, predicate);
} }
// Support for Random Port Generation. private static final PortAllocator portAllocator = new PortAllocator();
static Random random = new Random();
private static final PortAllocator portAllocator = new PortAllocator(random);
public static int randomFreePort() { public static int randomFreePort() {
return portAllocator.randomFreePort(); return portAllocator.randomFreePort();
@ -285,11 +281,9 @@ public class HBaseCommonTestingUtility {
/** A set of ports that have been claimed using {@link #randomFreePort()}. */ /** A set of ports that have been claimed using {@link #randomFreePort()}. */
private final Set<Integer> takenRandomPorts = new HashSet<>(); private final Set<Integer> takenRandomPorts = new HashSet<>();
private final Random random;
private final AvailablePortChecker portChecker; private final AvailablePortChecker portChecker;
public PortAllocator(Random random) { public PortAllocator() {
this.random = random;
this.portChecker = new AvailablePortChecker() { this.portChecker = new AvailablePortChecker() {
@Override @Override
public boolean available(int port) { public boolean available(int port) {
@ -304,8 +298,7 @@ public class HBaseCommonTestingUtility {
}; };
} }
public PortAllocator(Random random, AvailablePortChecker portChecker) { public PortAllocator(AvailablePortChecker portChecker) {
this.random = random;
this.portChecker = portChecker; this.portChecker = portChecker;
} }
@ -336,7 +329,7 @@ public class HBaseCommonTestingUtility {
*/ */
private int randomPort() { private int randomPort() {
return MIN_RANDOM_PORT return MIN_RANDOM_PORT
+ random.nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT); + ThreadLocalRandom.current().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
} }
interface AvailablePortChecker { interface AvailablePortChecker {

View File

@ -48,9 +48,9 @@ public class TestEncryption {
@Test @Test
public void testSmallBlocks() throws Exception { public void testSmallBlocks() throws Exception {
byte[] key = new byte[16]; byte[] key = new byte[16];
Bytes.random(key); Bytes.secureRandom(key);
byte[] iv = new byte[16]; byte[] iv = new byte[16];
Bytes.random(iv); Bytes.secureRandom(iv);
for (int size: new int[] { 4, 8, 16, 32, 64, 128, 256, 512 }) { for (int size: new int[] { 4, 8, 16, 32, 64, 128, 256, 512 }) {
checkTransformSymmetry(key, iv, getRandomBlock(size)); checkTransformSymmetry(key, iv, getRandomBlock(size));
} }
@ -59,9 +59,9 @@ public class TestEncryption {
@Test @Test
public void testLargeBlocks() throws Exception { public void testLargeBlocks() throws Exception {
byte[] key = new byte[16]; byte[] key = new byte[16];
Bytes.random(key); Bytes.secureRandom(key);
byte[] iv = new byte[16]; byte[] iv = new byte[16];
Bytes.random(iv); Bytes.secureRandom(iv);
for (int size: new int[] { 256 * 1024, 512 * 1024, 1024 * 1024 }) { for (int size: new int[] { 256 * 1024, 512 * 1024, 1024 * 1024 }) {
checkTransformSymmetry(key, iv, getRandomBlock(size)); checkTransformSymmetry(key, iv, getRandomBlock(size));
} }
@ -70,9 +70,9 @@ public class TestEncryption {
@Test @Test
public void testOddSizedBlocks() throws Exception { public void testOddSizedBlocks() throws Exception {
byte[] key = new byte[16]; byte[] key = new byte[16];
Bytes.random(key); Bytes.secureRandom(key);
byte[] iv = new byte[16]; byte[] iv = new byte[16];
Bytes.random(iv); Bytes.secureRandom(iv);
for (int size: new int[] { 3, 7, 11, 23, 47, 79, 119, 175 }) { for (int size: new int[] { 3, 7, 11, 23, 47, 79, 119, 175 }) {
checkTransformSymmetry(key, iv, getRandomBlock(size)); checkTransformSymmetry(key, iv, getRandomBlock(size));
} }
@ -81,9 +81,9 @@ public class TestEncryption {
@Test @Test
public void testTypicalHFileBlocks() throws Exception { public void testTypicalHFileBlocks() throws Exception {
byte[] key = new byte[16]; byte[] key = new byte[16];
Bytes.random(key); Bytes.secureRandom(key);
byte[] iv = new byte[16]; byte[] iv = new byte[16];
Bytes.random(iv); Bytes.secureRandom(iv);
for (int size: new int[] { 4 * 1024, 8 * 1024, 64 * 1024, 128 * 1024 }) { for (int size: new int[] { 4 * 1024, 8 * 1024, 64 * 1024, 128 * 1024 }) {
checkTransformSymmetry(key, iv, getRandomBlock(size)); checkTransformSymmetry(key, iv, getRandomBlock(size));
} }

View File

@ -23,7 +23,7 @@ import static org.junit.Assert.assertTrue;
import java.math.BigInteger; import java.math.BigInteger;
import java.util.Arrays; import java.util.Arrays;
import java.util.Random;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
@ -81,9 +81,8 @@ public class TestLRUDictionary {
@Test @Test
public void testBasic() { public void testBasic() {
Random rand = new Random();
byte[] testBytes = new byte[10]; byte[] testBytes = new byte[10];
rand.nextBytes(testBytes); Bytes.random(testBytes);
// Verify that our randomly generated array doesn't exist in the dictionary // Verify that our randomly generated array doesn't exist in the dictionary
assertEquals(-1, testee.findEntry(testBytes, 0, testBytes.length)); assertEquals(-1, testee.findEntry(testBytes, 0, testBytes.length));

View File

@ -36,7 +36,7 @@ public class LoadTestKVGenerator {
private static int logLimit = 10; private static int logLimit = 10;
/** A random number generator for determining value size */ /** A random number generator for determining value size */
private Random randomForValueSize = new Random(); private Random randomForValueSize = new Random(); // Seed may be set with Random#setSeed
private final int minValueSize; private final int minValueSize;
private final int maxValueSize; private final int maxValueSize;

View File

@ -21,8 +21,11 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.util.Random; import java.util.Random;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
@ -54,7 +57,7 @@ public class TestAvlUtil {
final TreeMap<Integer, Object> treeMap = new TreeMap<>(); final TreeMap<Integer, Object> treeMap = new TreeMap<>();
TestAvlNode root = null; TestAvlNode root = null;
final Random rand = new Random(); Random rand = ThreadLocalRandom.current();
for (int i = 0; i < NELEM; ++i) { for (int i = 0; i < NELEM; ++i) {
int key = rand.nextInt(MAX_KEY); int key = rand.nextInt(MAX_KEY);
if (AvlTree.get(root, key, KEY_COMPARATOR) != null) { if (AvlTree.get(root, key, KEY_COMPARATOR) != null) {

View File

@ -24,7 +24,7 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.ByteBuff;
@ -39,8 +39,6 @@ import org.junit.experimental.categories.Category;
@Category({ MiscTests.class, SmallTests.class }) @Category({ MiscTests.class, SmallTests.class })
public class TestByteBufferArray { public class TestByteBufferArray {
private static final Random RANDOM = new Random(EnvironmentEdgeManager.currentTime());
@ClassRule @ClassRule
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestByteBufferArray.class); HBaseClassTestRule.forClass(TestByteBufferArray.class);
@ -87,7 +85,7 @@ public class TestByteBufferArray {
private ByteBuff createByteBuff(int len) { private ByteBuff createByteBuff(int len) {
assert len >= 0; assert len >= 0;
int pos = len == 0 ? 0 : RANDOM.nextInt(len); int pos = len == 0 ? 0 : ThreadLocalRandom.current().nextInt(len);
ByteBuff b = ByteBuff.wrap(ByteBuffer.allocate(2 * len)); ByteBuff b = ByteBuff.wrap(ByteBuffer.allocate(2 * len));
b.position(pos).limit(pos + len); b.position(pos).limit(pos + len);
return b; return b;

View File

@ -38,6 +38,8 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
@ -385,14 +387,11 @@ public class TestBytes {
@Test @Test
public void testToStringBytesBinaryReversible() { public void testToStringBytesBinaryReversible() {
// let's run test with 1000 randomly generated byte arrays
Random rand = new Random(EnvironmentEdgeManager.currentTime());
byte[] randomBytes = new byte[1000]; byte[] randomBytes = new byte[1000];
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 1000; i++) {
rand.nextBytes(randomBytes); Bytes.random(randomBytes);
verifyReversibleForBytes(randomBytes); verifyReversibleForBytes(randomBytes);
} }
// some specific cases // some specific cases
verifyReversibleForBytes(new byte[] {}); verifyReversibleForBytes(new byte[] {});
verifyReversibleForBytes(new byte[] {'\\', 'x', 'A', 'D'}); verifyReversibleForBytes(new byte[] {'\\', 'x', 'A', 'D'});
@ -597,10 +596,10 @@ public class TestBytes {
List<byte[]> testByteData = new ArrayList<>(5); List<byte[]> testByteData = new ArrayList<>(5);
testByteData.addAll(Arrays.asList(new byte[0], new byte[1], new byte[10], testByteData.addAll(Arrays.asList(new byte[0], new byte[1], new byte[10],
new byte[] { 1, 2, 3, 4, 5 }, new byte[] { (byte) 0xFF })); new byte[] { 1, 2, 3, 4, 5 }, new byte[] { (byte) 0xFF }));
Random r = new Random(); Random rand = ThreadLocalRandom.current();
for (int i = 0; i < 20; i++) { for (int i = 0; i < 20; i++) {
byte[] bytes = new byte[r.nextInt(100)]; byte[] bytes = new byte[rand.nextInt(100)];
r.nextBytes(bytes); Bytes.random(bytes);
testByteData.add(bytes); testByteData.add(bytes);
} }

View File

@ -22,7 +22,6 @@ import static org.junit.Assert.assertNotEquals;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
@ -41,13 +40,13 @@ public class TestCompatibilitySingletonFactory {
HBaseClassTestRule.forClass(TestCompatibilitySingletonFactory.class); HBaseClassTestRule.forClass(TestCompatibilitySingletonFactory.class);
private static final int ITERATIONS = 100000; private static final int ITERATIONS = 100000;
private static final Random RANDOM = new Random();
private class TestCompatibilitySingletonFactoryCallable implements Callable<String> { private class TestCompatibilitySingletonFactoryCallable implements Callable<String> {
@Override @Override
public String call() throws Exception { public String call() throws Exception {
Thread.sleep(RANDOM.nextInt(10)); // XXX: Why is this sleep here?
Thread.sleep(10);
RandomStringGenerator RandomStringGenerator
instance = instance =
CompatibilitySingletonFactory.getInstance(RandomStringGenerator.class); CompatibilitySingletonFactory.getInstance(RandomStringGenerator.class);

View File

@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.http;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import javax.servlet.Filter; import javax.servlet.Filter;
import javax.servlet.FilterChain; import javax.servlet.FilterChain;
import javax.servlet.FilterConfig; import javax.servlet.FilterConfig;
@ -114,12 +116,12 @@ public class TestServletFilter extends HttpServerFunctionalTest {
final String hadooplogoURL = "/static/hadoop-logo.jpg"; final String hadooplogoURL = "/static/hadoop-logo.jpg";
final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL}; final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL};
final Random ran = new Random(); final Random rand = ThreadLocalRandom.current();
final int[] sequence = new int[50]; final int[] sequence = new int[50];
//generate a random sequence and update counts //generate a random sequence and update counts
for(int i = 0; i < sequence.length; i++) { for(int i = 0; i < sequence.length; i++) {
sequence[i] = ran.nextInt(urls.length); sequence[i] = rand.nextInt(urls.length);
} }
//access the urls as the sequence //access the urls as the sequence

View File

@ -23,10 +23,8 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@ -258,7 +256,8 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
return null; return null;
} }
ArrayList<String> namespaceList = new ArrayList<>(namespaceMap.keySet()); ArrayList<String> namespaceList = new ArrayList<>(namespaceMap.keySet());
String randomKey = namespaceList.get(RandomUtils.nextInt(0, namespaceList.size())); String randomKey = namespaceList.get(ThreadLocalRandom.current()
.nextInt(namespaceList.size()));
NamespaceDescriptor randomNsd = namespaceMap.get(randomKey); NamespaceDescriptor randomNsd = namespaceMap.get(randomKey);
// remove from namespaceMap // remove from namespaceMap
namespaceMap.remove(randomKey); namespaceMap.remove(randomKey);
@ -307,12 +306,12 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
private NamespaceDescriptor createNamespaceDesc() { private NamespaceDescriptor createNamespaceDesc() {
String namespaceName = "itnamespace" + String.format("%010d", String namespaceName = "itnamespace" + String.format("%010d",
RandomUtils.nextInt()); ThreadLocalRandom.current().nextInt());
NamespaceDescriptor nsd = NamespaceDescriptor.create(namespaceName).build(); NamespaceDescriptor nsd = NamespaceDescriptor.create(namespaceName).build();
nsd.setConfiguration( nsd.setConfiguration(
nsTestConfigKey, nsTestConfigKey,
String.format("%010d", RandomUtils.nextInt())); String.format("%010d", ThreadLocalRandom.current().nextInt()));
return nsd; return nsd;
} }
} }
@ -332,7 +331,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
NamespaceDescriptor modifiedNsd = NamespaceDescriptor.create(namespaceName).build(); NamespaceDescriptor modifiedNsd = NamespaceDescriptor.create(namespaceName).build();
String nsValueNew; String nsValueNew;
do { do {
nsValueNew = String.format("%010d", RandomUtils.nextInt()); nsValueNew = String.format("%010d", ThreadLocalRandom.current().nextInt());
} while (selected.getConfigurationValue(nsTestConfigKey).equals(nsValueNew)); } while (selected.getConfigurationValue(nsTestConfigKey).equals(nsValueNew));
modifiedNsd.setConfiguration(nsTestConfigKey, nsValueNew); modifiedNsd.setConfiguration(nsTestConfigKey, nsValueNew);
admin.modifyNamespace(modifiedNsd); admin.modifyNamespace(modifiedNsd);
@ -398,8 +397,8 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
return null; return null;
} }
ArrayList<TableName> tableList = new ArrayList<>(tableMap.keySet()); ArrayList<TableName> tableList = new ArrayList<>(tableMap.keySet());
TableName randomKey = tableList.get(RandomUtils.nextInt(0, tableList.size())); TableName key = tableList.get(ThreadLocalRandom.current().nextInt(tableList.size()));
TableDescriptor randomTd = tableMap.remove(randomKey); TableDescriptor randomTd = tableMap.remove(key);
return randomTd; return randomTd;
} }
} }
@ -437,8 +436,9 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
} }
private TableDescriptor createTableDesc() { private TableDescriptor createTableDesc() {
String tableName = String.format("ittable-%010d", RandomUtils.nextInt()); String tableName = String.format("ittable-%010d",
String familyName = "cf-" + Math.abs(RandomUtils.nextInt()); ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE));
String familyName = "cf-" + ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE);
return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName)) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyName))
.build(); .build();
@ -582,8 +582,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
LOG.info("No column families in table: " + td); LOG.info("No column families in table: " + td);
return null; return null;
} }
ColumnFamilyDescriptor randomCfd = families[RandomUtils.nextInt(0, families.length)]; return families[ThreadLocalRandom.current().nextInt(families.length)];
return randomCfd;
} }
} }
@ -600,7 +599,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
try { try {
ColumnFamilyDescriptor cfd = createFamilyDesc(); ColumnFamilyDescriptor cfd = createFamilyDesc();
if (selected.hasColumnFamily(cfd.getName())){ if (selected.hasColumnFamily(cfd.getName())){
LOG.info(new String(cfd.getName()) + " already exists in table " LOG.info(Bytes.toString(cfd.getName()) + " already exists in table "
+ selected.getTableName()); + selected.getTableName());
return; return;
} }
@ -625,7 +624,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
} }
private ColumnFamilyDescriptor createFamilyDesc() { private ColumnFamilyDescriptor createFamilyDesc() {
String familyName = String.format("cf-%010d", RandomUtils.nextInt()); String familyName = String.format("cf-%010d", ThreadLocalRandom.current().nextInt());
return ColumnFamilyDescriptorBuilder.of(familyName); return ColumnFamilyDescriptorBuilder.of(familyName);
} }
} }
@ -644,7 +643,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
} }
Admin admin = connection.getAdmin(); Admin admin = connection.getAdmin();
int versions = RandomUtils.nextInt(0, 10) + 3; int versions = ThreadLocalRandom.current().nextInt(10) + 3;
try { try {
TableName tableName = selected.getTableName(); TableName tableName = selected.getTableName();
LOG.info("Altering versions of column family: " + columnDesc + " to: " + versions + LOG.info("Altering versions of column family: " + columnDesc + " to: " + versions +
@ -700,7 +699,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
// possible DataBlockEncoding ids // possible DataBlockEncoding ids
DataBlockEncoding[] possibleIds = {DataBlockEncoding.NONE, DataBlockEncoding.PREFIX, DataBlockEncoding[] possibleIds = {DataBlockEncoding.NONE, DataBlockEncoding.PREFIX,
DataBlockEncoding.DIFF, DataBlockEncoding.FAST_DIFF, DataBlockEncoding.ROW_INDEX_V1}; DataBlockEncoding.DIFF, DataBlockEncoding.FAST_DIFF, DataBlockEncoding.ROW_INDEX_V1};
short id = possibleIds[RandomUtils.nextInt(0, possibleIds.length)].getId(); short id = possibleIds[ThreadLocalRandom.current().nextInt(possibleIds.length)].getId();
LOG.info("Altering encoding of column family: " + columnDesc + " to: " + id + LOG.info("Altering encoding of column family: " + columnDesc + " to: " + id +
" in table: " + tableName); " in table: " + tableName);
@ -788,17 +787,18 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
int average_rows = 1; int average_rows = 1;
int numRows = average_rows * numRegions; int numRows = average_rows * numRegions;
LOG.info("Adding " + numRows + " rows to table: " + selected); LOG.info("Adding " + numRows + " rows to table: " + selected);
byte[] value = new byte[10];
for (int i = 0; i < numRows; i++){ for (int i = 0; i < numRows; i++){
// nextInt(Integer.MAX_VALUE)) to return positive numbers only // nextInt(Integer.MAX_VALUE)) to return positive numbers only
byte[] rowKey = Bytes.toBytes( byte[] rowKey = Bytes.toBytes(
"row-" + String.format("%010d", RandomUtils.nextInt())); "row-" + String.format("%010d", ThreadLocalRandom.current().nextInt()));
ColumnFamilyDescriptor cfd = selectFamily(selected); ColumnFamilyDescriptor cfd = selectFamily(selected);
if (cfd == null){ if (cfd == null){
return; return;
} }
byte[] family = cfd.getName(); byte[] family = cfd.getName();
byte[] qualifier = Bytes.toBytes("col-" + RandomUtils.nextInt() % 10); byte[] qualifier = Bytes.toBytes("col-" + ThreadLocalRandom.current().nextInt(10));
byte[] value = Bytes.toBytes("val-" + RandomStringUtils.randomAlphanumeric(10)); Bytes.random(value);
Put put = new Put(rowKey); Put put = new Put(rowKey);
put.addColumn(family, qualifier, value); put.addColumn(family, qualifier, value);
table.put(put); table.put(put);
@ -842,7 +842,8 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
public void run() { public void run() {
while (running.get()) { while (running.get()) {
// select random action // select random action
ACTION selectedAction = ACTION.values()[RandomUtils.nextInt() % ACTION.values().length]; ACTION selectedAction =
ACTION.values()[ThreadLocalRandom.current().nextInt(ACTION.values().length)];
this.action = selectedAction; this.action = selectedAction;
LOG.info("Performing Action: " + selectedAction); LOG.info("Performing Action: " + selectedAction);
@ -875,7 +876,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
break; break;
case DELETE_TABLE: case DELETE_TABLE:
// reduce probability of deleting table to 20% // reduce probability of deleting table to 20%
if (RandomUtils.nextInt(0, 100) < 20) { if (ThreadLocalRandom.current().nextInt(100) < 20) {
new DeleteTableAction().perform(); new DeleteTableAction().perform();
} }
break; break;
@ -884,7 +885,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
break; break;
case DELETE_COLUMNFAMILY: case DELETE_COLUMNFAMILY:
// reduce probability of deleting column family to 20% // reduce probability of deleting column family to 20%
if (RandomUtils.nextInt(0, 100) < 20) { if (ThreadLocalRandom.current().nextInt(100) < 20) {
new DeleteColumnFamilyAction().perform(); new DeleteColumnFamilyAction().perform();
} }
break; break;

View File

@ -26,15 +26,15 @@ import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Properties; import java.util.Properties;
import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.BiConsumer; import java.util.function.BiConsumer;
import java.util.function.Consumer; import java.util.function.Consumer;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.HBaseCluster; import org.apache.hadoop.hbase.HBaseCluster;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.IntegrationTestBase; import org.apache.hadoop.hbase.IntegrationTestBase;
import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -286,9 +287,10 @@ public abstract class Action {
List<byte[]> regions = new LinkedList<>(serverLoad.getRegionMetrics().keySet()); List<byte[]> regions = new LinkedList<>(serverLoad.getRegionMetrics().keySet());
int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size()); int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size());
getLogger().debug("Removing {} regions from {}", victimRegionCount, sn); getLogger().debug("Removing {} regions from {}", victimRegionCount, sn);
Random rand = ThreadLocalRandom.current();
for (int i = 0; i < victimRegionCount; ++i) { for (int i = 0; i < victimRegionCount; ++i) {
int victimIx = RandomUtils.nextInt(0, regions.size()); int victimIx = rand.nextInt(regions.size());
String regionId = HRegionInfo.encodeRegionName(regions.remove(victimIx)); String regionId = RegionInfo.encodeRegionName(regions.remove(victimIx));
victimRegions.add(Bytes.toBytes(regionId)); victimRegions.add(Bytes.toBytes(regionId));
} }
} }
@ -296,13 +298,14 @@ public abstract class Action {
getLogger().info("Moving {} regions from {} servers to {} different servers", getLogger().info("Moving {} regions from {} servers to {} different servers",
victimRegions.size(), fromServers.size(), toServers.size()); victimRegions.size(), fromServers.size(), toServers.size());
Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin(); Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin();
Random rand = ThreadLocalRandom.current();
for (byte[] victimRegion : victimRegions) { for (byte[] victimRegion : victimRegions) {
// Don't keep moving regions if we're // Don't keep moving regions if we're
// trying to stop the monkey. // trying to stop the monkey.
if (context.isStopping()) { if (context.isStopping()) {
break; break;
} }
int targetIx = RandomUtils.nextInt(0, toServers.size()); int targetIx = rand.nextInt(toServers.size());
admin.move(victimRegion, toServers.get(targetIx)); admin.move(victimRegion, toServers.get(targetIx));
} }
} }

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.BloomFilterUtil;
@ -49,14 +49,13 @@ public class ChangeBloomFilterAction extends Action {
@Override @Override
public void perform() throws Exception { public void perform() throws Exception {
final Random random = new Random();
final BloomType[] bloomArray = BloomType.values(); final BloomType[] bloomArray = BloomType.values();
final int bloomArraySize = bloomArray.length; final int bloomArraySize = bloomArray.length;
getLogger().info("Performing action: Change bloom filter on all columns of table " + tableName); getLogger().info("Performing action: Change bloom filter on all columns of table " + tableName);
modifyAllTableColumns(tableName, (columnName, columnBuilder) -> { modifyAllTableColumns(tableName, (columnName, columnBuilder) -> {
BloomType bloomType = bloomArray[random.nextInt(bloomArraySize)]; BloomType bloomType = bloomArray[ThreadLocalRandom.current().nextInt(bloomArraySize)];
getLogger().debug("Performing action: About to set bloom filter type to " getLogger().debug("Performing action: About to set bloom filter type to "
+ bloomType + " on column " + columnName + " of table " + tableName); + bloomType + " on column " + columnName + " of table " + tableName);
columnBuilder.setBloomFilterType(bloomType); columnBuilder.setBloomFilterType(bloomType);

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.chaos.actions;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.io.compress.Compressor; import org.apache.hadoop.io.compress.Compressor;
@ -31,12 +32,10 @@ import org.slf4j.LoggerFactory;
*/ */
public class ChangeCompressionAction extends Action { public class ChangeCompressionAction extends Action {
private final TableName tableName; private final TableName tableName;
private final Random random;
private static final Logger LOG = LoggerFactory.getLogger(ChangeCompressionAction.class); private static final Logger LOG = LoggerFactory.getLogger(ChangeCompressionAction.class);
public ChangeCompressionAction(TableName tableName) { public ChangeCompressionAction(TableName tableName) {
this.tableName = tableName; this.tableName = tableName;
this.random = new Random();
} }
@Override protected Logger getLogger() { @Override protected Logger getLogger() {
@ -48,16 +47,15 @@ public class ChangeCompressionAction extends Action {
// Possible compression algorithms. If an algorithm is not supported, // Possible compression algorithms. If an algorithm is not supported,
// modifyTable will fail, so there is no harm. // modifyTable will fail, so there is no harm.
Algorithm[] possibleAlgos = Algorithm.values(); Algorithm[] possibleAlgos = Algorithm.values();
// Since not every compression algorithm is supported, // Since not every compression algorithm is supported,
// let's use the same algorithm for all column families. // let's use the same algorithm for all column families.
Random rand = ThreadLocalRandom.current();
// If an unsupported compression algorithm is chosen, pick a different one. // If an unsupported compression algorithm is chosen, pick a different one.
// This is to work around the issue that modifyTable() does not throw remote // This is to work around the issue that modifyTable() does not throw remote
// exception. // exception.
Algorithm algo; Algorithm algo;
do { do {
algo = possibleAlgos[random.nextInt(possibleAlgos.length)]; algo = possibleAlgos[rand.nextInt(possibleAlgos.length)];
try { try {
Compressor c = algo.getCompressor(); Compressor c = algo.getCompressor();
@ -75,7 +73,7 @@ public class ChangeCompressionAction extends Action {
getLogger().debug("Performing action: Changing compression algorithms on " getLogger().debug("Performing action: Changing compression algorithms on "
+ tableName.getNameAsString() + " to " + chosenAlgo); + tableName.getNameAsString() + " to " + chosenAlgo);
modifyAllTableColumns(tableName, columnFamilyDescriptorBuilder -> { modifyAllTableColumns(tableName, columnFamilyDescriptorBuilder -> {
if (random.nextBoolean()) { if (rand.nextBoolean()) {
columnFamilyDescriptorBuilder.setCompactionCompressionType(chosenAlgo); columnFamilyDescriptorBuilder.setCompactionCompressionType(chosenAlgo);
} else { } else {
columnFamilyDescriptorBuilder.setCompressionType(chosenAlgo); columnFamilyDescriptorBuilder.setCompressionType(chosenAlgo);

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -30,12 +30,10 @@ import org.slf4j.LoggerFactory;
*/ */
public class ChangeEncodingAction extends Action { public class ChangeEncodingAction extends Action {
private final TableName tableName; private final TableName tableName;
private final Random random;
private static final Logger LOG = LoggerFactory.getLogger(ChangeEncodingAction.class); private static final Logger LOG = LoggerFactory.getLogger(ChangeEncodingAction.class);
public ChangeEncodingAction(TableName tableName) { public ChangeEncodingAction(TableName tableName) {
this.tableName = tableName; this.tableName = tableName;
this.random = new Random();
} }
@Override protected Logger getLogger() { @Override protected Logger getLogger() {
@ -47,9 +45,8 @@ public class ChangeEncodingAction extends Action {
getLogger().debug("Performing action: Changing encodings on " + tableName); getLogger().debug("Performing action: Changing encodings on " + tableName);
// possible DataBlockEncoding id's // possible DataBlockEncoding id's
final int[] possibleIds = {0, 2, 3, 4, 7}; final int[] possibleIds = {0, 2, 3, 4, 7};
modifyAllTableColumns(tableName, (columnName, columnBuilder) -> { modifyAllTableColumns(tableName, (columnName, columnBuilder) -> {
short id = (short) possibleIds[random.nextInt(possibleIds.length)]; short id = (short) possibleIds[ThreadLocalRandom.current().nextInt(possibleIds.length)];
DataBlockEncoding encoding = DataBlockEncoding.getEncodingById(id); DataBlockEncoding encoding = DataBlockEncoding.getEncodingById(id);
columnBuilder.setDataBlockEncoding(encoding); columnBuilder.setDataBlockEncoding(encoding);
getLogger().debug("Set encoding of column family " + columnName + " to: " + encoding); getLogger().debug("Set encoding of column family " + columnName + " to: " + encoding);

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -33,7 +33,6 @@ public class ChangeSplitPolicyAction extends Action {
private static final Logger LOG = LoggerFactory.getLogger(ChangeSplitPolicyAction.class); private static final Logger LOG = LoggerFactory.getLogger(ChangeSplitPolicyAction.class);
private final TableName tableName; private final TableName tableName;
private final String[] possiblePolicies; private final String[] possiblePolicies;
private final Random random;
public ChangeSplitPolicyAction(TableName tableName) { public ChangeSplitPolicyAction(TableName tableName) {
this.tableName = tableName; this.tableName = tableName;
@ -42,7 +41,6 @@ public class ChangeSplitPolicyAction extends Action {
ConstantSizeRegionSplitPolicy.class.getName(), ConstantSizeRegionSplitPolicy.class.getName(),
DisabledRegionSplitPolicy.class.getName() DisabledRegionSplitPolicy.class.getName()
}; };
this.random = new Random();
} }
@Override protected Logger getLogger() { @Override protected Logger getLogger() {
@ -53,11 +51,11 @@ public class ChangeSplitPolicyAction extends Action {
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin(); Admin admin = util.getAdmin();
getLogger().info("Performing action: Change split policy of table " + tableName); getLogger().info("Performing action: Change split policy of table " + tableName);
TableDescriptor tableDescriptor = admin.getDescriptor(tableName); TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor);
String chosenPolicy = possiblePolicies[random.nextInt(possiblePolicies.length)]; String chosenPolicy =
possiblePolicies[ThreadLocalRandom.current().nextInt(possiblePolicies.length)];
builder.setRegionSplitPolicyClassName(chosenPolicy); builder.setRegionSplitPolicyClassName(chosenPolicy);
getLogger().info("Changing " + tableName + " split policy to " + chosenPolicy); getLogger().info("Changing " + tableName + " split policy to " + chosenPolicy);
admin.modifyTable(builder.build()); admin.modifyTable(builder.build());

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -33,11 +33,8 @@ public class ChangeVersionsAction extends Action {
private static final Logger LOG = LoggerFactory.getLogger(ChangeVersionsAction.class); private static final Logger LOG = LoggerFactory.getLogger(ChangeVersionsAction.class);
private final TableName tableName; private final TableName tableName;
private final Random random;
public ChangeVersionsAction(TableName tableName) { public ChangeVersionsAction(TableName tableName) {
this.tableName = tableName; this.tableName = tableName;
this.random = new Random();
} }
@Override protected Logger getLogger() { @Override protected Logger getLogger() {
@ -46,8 +43,7 @@ public class ChangeVersionsAction extends Action {
@Override @Override
public void perform() throws IOException { public void perform() throws IOException {
final int versions = random.nextInt(3) + 1; final int versions = ThreadLocalRandom.current().nextInt(3) + 1;
getLogger().debug("Performing action: Changing versions on " + tableName + " to " + versions); getLogger().debug("Performing action: Changing versions on " + tableName + " to " + versions);
modifyAllTableColumns(tableName, columnBuilder -> { modifyAllTableColumns(tableName, columnBuilder -> {
columnBuilder.setMinVersions(versions).setMaxVersions(versions); columnBuilder.setMinVersions(versions).setMaxVersions(versions);

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -39,8 +39,7 @@ public class CompactMobAction extends Action {
this(-1, tableName, majorRatio); this(-1, tableName, majorRatio);
} }
public CompactMobAction( public CompactMobAction(int sleepTime, TableName tableName, float majorRatio) {
int sleepTime, TableName tableName, float majorRatio) {
this.tableName = tableName; this.tableName = tableName;
this.majorRatio = (int) (100 * majorRatio); this.majorRatio = (int) (100 * majorRatio);
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
@ -54,7 +53,7 @@ public class CompactMobAction extends Action {
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin(); Admin admin = util.getAdmin();
boolean major = RandomUtils.nextInt(0, 100) < majorRatio; boolean major = ThreadLocalRandom.current().nextInt(100) < majorRatio;
// Don't try the modify if we're stopping // Don't try the modify if we're stopping
if (context.isStopping()) { if (context.isStopping()) {

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
@ -38,13 +38,11 @@ public class CompactRandomRegionOfTableAction extends Action {
private final long sleepTime; private final long sleepTime;
private final TableName tableName; private final TableName tableName;
public CompactRandomRegionOfTableAction( public CompactRandomRegionOfTableAction(TableName tableName, float majorRatio) {
TableName tableName, float majorRatio) {
this(-1, tableName, majorRatio); this(-1, tableName, majorRatio);
} }
public CompactRandomRegionOfTableAction( public CompactRandomRegionOfTableAction(int sleepTime, TableName tableName, float majorRatio) {
int sleepTime, TableName tableName, float majorRatio) {
this.majorRatio = (int) (100 * majorRatio); this.majorRatio = (int) (100 * majorRatio);
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableName = tableName; this.tableName = tableName;
@ -58,7 +56,7 @@ public class CompactRandomRegionOfTableAction extends Action {
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin(); Admin admin = util.getAdmin();
boolean major = RandomUtils.nextInt(0, 100) < majorRatio; boolean major = ThreadLocalRandom.current().nextInt(100) < majorRatio;
getLogger().info("Performing action: Compact random region of table " getLogger().info("Performing action: Compact random region of table "
+ tableName + ", major=" + major); + tableName + ", major=" + major);

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -54,7 +54,7 @@ public class CompactTableAction extends Action {
public void perform() throws Exception { public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin(); Admin admin = util.getAdmin();
boolean major = RandomUtils.nextInt(0, 100) < majorRatio; boolean major = ThreadLocalRandom.current().nextInt(100) < majorRatio;
getLogger().info("Performing action: Compact table " + tableName + ", major=" + major); getLogger().info("Performing action: Compact table " + tableName + ", major=" + major);
try { try {

View File

@ -18,7 +18,8 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import org.apache.commons.lang3.RandomUtils; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.LocatedFileStatus;
@ -56,15 +57,15 @@ public class CorruptDataFilesAction extends Action {
Path rootDir = CommonFSUtils.getRootDir(getConf()); Path rootDir = CommonFSUtils.getRootDir(getConf());
Path defaultDir = rootDir.suffix("/data/default"); Path defaultDir = rootDir.suffix("/data/default");
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(defaultDir, true); RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(defaultDir, true);
Random rand = ThreadLocalRandom.current();
while (iterator.hasNext()){ while (iterator.hasNext()){
LocatedFileStatus status = iterator.next(); LocatedFileStatus status = iterator.next();
if(!HFile.isHFileFormat(fs, status.getPath())){ if(!HFile.isHFileFormat(fs, status.getPath())){
continue; continue;
} }
if(RandomUtils.nextFloat(0, 100) > chance){ if ((100 * rand.nextFloat()) > chance){
continue; continue;
} }
FSDataOutputStream out = fs.create(status.getPath(), true); FSDataOutputStream out = fs.create(status.getPath(), true);
try { try {
out.write(0); out.write(0);

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -35,13 +35,11 @@ public class DecreaseMaxHFileSizeAction extends Action {
private final long sleepTime; private final long sleepTime;
private final TableName tableName; private final TableName tableName;
private final Random random;
private Admin admin; private Admin admin;
public DecreaseMaxHFileSizeAction(long sleepTime, TableName tableName) { public DecreaseMaxHFileSizeAction(long sleepTime, TableName tableName) {
this.sleepTime = sleepTime; this.sleepTime = sleepTime;
this.tableName = tableName; this.tableName = tableName;
this.random = new Random();
} }
@Override protected Logger getLogger() { @Override protected Logger getLogger() {
@ -75,7 +73,8 @@ public class DecreaseMaxHFileSizeAction extends Action {
// We don't want to go too far below 1gb. // We don't want to go too far below 1gb.
// So go to about 1gb +/- 512 on each side. // So go to about 1gb +/- 512 on each side.
newValue = Math.max(minFileSize, newValue) - (512 - random.nextInt(1024)); newValue = Math.max(minFileSize, newValue) -
(512 - ThreadLocalRandom.current().nextInt(1024));
// Change the table descriptor. // Change the table descriptor.
TableDescriptor modifiedTable = TableDescriptor modifiedTable =

View File

@ -18,7 +18,8 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import org.apache.commons.lang3.RandomUtils; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -54,12 +55,13 @@ public class DeleteDataFilesAction extends Action {
Path rootDir = CommonFSUtils.getRootDir(getConf()); Path rootDir = CommonFSUtils.getRootDir(getConf());
Path defaultDir = rootDir.suffix("/data/default"); Path defaultDir = rootDir.suffix("/data/default");
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(defaultDir, true); RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(defaultDir, true);
Random rand = ThreadLocalRandom.current();
while (iterator.hasNext()){ while (iterator.hasNext()){
LocatedFileStatus status = iterator.next(); LocatedFileStatus status = iterator.next();
if(!HFile.isHFileFormat(fs, status.getPath())){ if(!HFile.isHFileFormat(fs, status.getPath())){
continue; continue;
} }
if(RandomUtils.nextFloat(0, 100) > chance){ if ((100 * rand.nextFloat()) > chance){
continue; continue;
} }
fs.delete(status.getPath(), true); fs.delete(status.getPath(), true);

View File

@ -21,7 +21,8 @@ package org.apache.hadoop.hbase.chaos.actions;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.RandomUtils; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.RegionMover; import org.apache.hadoop.hbase.util.RegionMover;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
@ -47,10 +48,9 @@ public class GracefulRollingRestartRsAction extends RestartActionBaseAction {
public void perform() throws Exception { public void perform() throws Exception {
getLogger().info("Performing action: Rolling restarting non-master region servers"); getLogger().info("Performing action: Rolling restarting non-master region servers");
List<ServerName> selectedServers = selectServers(); List<ServerName> selectedServers = selectServers();
getLogger().info("Disabling balancer to make unloading possible"); getLogger().info("Disabling balancer to make unloading possible");
setBalancer(false, true); setBalancer(false, true);
Random rand = ThreadLocalRandom.current();
for (ServerName server : selectedServers) { for (ServerName server : selectedServers) {
String rsName = server.getAddress().toString(); String rsName = server.getAddress().toString();
try (RegionMover rm = try (RegionMover rm =
@ -64,7 +64,7 @@ public class GracefulRollingRestartRsAction extends RestartActionBaseAction {
} catch (Shell.ExitCodeException e) { } catch (Shell.ExitCodeException e) {
getLogger().info("Problem restarting but presume successful; code={}", e.getExitCode(), e); getLogger().info("Problem restarting but presume successful; code={}", e.getExitCode(), e);
} }
sleep(RandomUtils.nextInt(0, (int)sleepTime)); sleep(rand.nextInt((int)sleepTime));
} }
getLogger().info("Enabling balancer"); getLogger().info("Enabling balancer");
setBalancer(true, true); setBalancer(true, true);

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -61,7 +61,7 @@ public class MergeRandomAdjacentRegionsOfTableAction extends Action {
return; return;
} }
int i = RandomUtils.nextInt(0, regions.size() - 1); int i = ThreadLocalRandom.current().nextInt(regions.size() - 1);
RegionInfo a = regions.get(i++); RegionInfo a = regions.get(i++);
RegionInfo b = regions.get(i); RegionInfo b = regions.get(i);
getLogger().debug("Merging " + a.getRegionNameAsString() + " and " + b.getRegionNameAsString()); getLogger().debug("Merging " + a.getRegionNameAsString() + " and " + b.getRegionNameAsString());

View File

@ -22,7 +22,7 @@ import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -93,9 +93,10 @@ public class MoveRegionsOfTableAction extends Action {
return serversList.toArray(new ServerName[0]); return serversList.toArray(new ServerName[0]);
} }
static void moveRegion(Admin admin, ServerName [] servers, RegionInfo regionInfo, Logger logger) { static void moveRegion(Admin admin, ServerName [] servers, RegionInfo regionInfo,
Logger logger) {
try { try {
ServerName destServerName = servers[RandomUtils.nextInt(0, servers.length)]; ServerName destServerName = servers[ThreadLocalRandom.current().nextInt(servers.length)];
logger.debug("Moving {} to {}", regionInfo.getRegionNameAsString(), destServerName); logger.debug("Moving {} to {}", regionInfo.getRegionNameAsString(), destServerName);
admin.move(regionInfo.getEncodedNameAsBytes(), destServerName); admin.move(regionInfo.getEncodedNameAsBytes(), destServerName);
} catch (Exception ex) { } catch (Exception ex) {

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.chaos.actions;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@ -39,12 +40,10 @@ public class RemoveColumnAction extends Action {
private final TableName tableName; private final TableName tableName;
private final Set<String> protectedColumns; private final Set<String> protectedColumns;
private Admin admin; private Admin admin;
private final Random random;
public RemoveColumnAction(TableName tableName, Set<String> protectedColumns) { public RemoveColumnAction(TableName tableName, Set<String> protectedColumns) {
this.tableName = tableName; this.tableName = tableName;
this.protectedColumns = protectedColumns; this.protectedColumns = protectedColumns;
random = new Random();
} }
@Override protected Logger getLogger() { @Override protected Logger getLogger() {
@ -61,15 +60,15 @@ public class RemoveColumnAction extends Action {
public void perform() throws Exception { public void perform() throws Exception {
TableDescriptor tableDescriptor = admin.getDescriptor(tableName); TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
ColumnFamilyDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies(); ColumnFamilyDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
Random rand = ThreadLocalRandom.current();
if (columnDescriptors.length <= (protectedColumns == null ? 1 : protectedColumns.size())) { if (columnDescriptors.length <= (protectedColumns == null ? 1 : protectedColumns.size())) {
return; return;
} }
int index = rand.nextInt(columnDescriptors.length);
int index = random.nextInt(columnDescriptors.length);
while(protectedColumns != null && while(protectedColumns != null &&
protectedColumns.contains(columnDescriptors[index].getNameAsString())) { protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
index = random.nextInt(columnDescriptors.length); index = rand.nextInt(columnDescriptors.length);
} }
byte[] colDescName = columnDescriptors[index].getName(); byte[] colDescName = columnDescriptors[index].getName();
getLogger().debug("Performing action: Removing " + Bytes.toString(colDescName)+ " from " getLogger().debug("Performing action: Removing " + Bytes.toString(colDescName)+ " from "

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionLocator;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -46,8 +46,9 @@ public class RestartRsHoldingTableAction extends RestartActionBaseAction {
public void perform() throws Exception { public void perform() throws Exception {
getLogger().info( getLogger().info(
"Performing action: Restart random RS holding table " + this.locator.getName()); "Performing action: Restart random RS holding table " + this.locator.getName());
List<HRegionLocation> locations = locator.getAllRegionLocations(); List<HRegionLocation> locations = locator.getAllRegionLocations();
restartRs(locations.get(RandomUtils.nextInt(0, locations.size())).getServerName(), sleepTime); restartRs(locations.get(ThreadLocalRandom.current().nextInt(locations.size()))
.getServerName(),
sleepTime);
} }
} }

View File

@ -25,7 +25,8 @@ import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.Queue; import java.util.Queue;
import org.apache.commons.lang3.RandomUtils; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -70,10 +71,9 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
getLogger().info("Performing action: Rolling batch restarting {}% of region servers", getLogger().info("Performing action: Rolling batch restarting {}% of region servers",
(int)(ratio * 100)); (int)(ratio * 100));
List<ServerName> selectedServers = selectServers(); List<ServerName> selectedServers = selectServers();
Queue<ServerName> serversToBeKilled = new LinkedList<>(selectedServers); Queue<ServerName> serversToBeKilled = new LinkedList<>(selectedServers);
LinkedList<ServerName> deadServers = new LinkedList<>(); LinkedList<ServerName> deadServers = new LinkedList<>();
Random rand = ThreadLocalRandom.current();
// loop while there are servers to be killed or dead servers to be restarted // loop while there are servers to be killed or dead servers to be restarted
while ((!serversToBeKilled.isEmpty() || !deadServers.isEmpty()) && !context.isStopping()) { while ((!serversToBeKilled.isEmpty() || !deadServers.isEmpty()) && !context.isStopping()) {
@ -87,7 +87,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
action = KillOrStart.START; action = KillOrStart.START;
} else { } else {
// do a coin toss // do a coin toss
action = RandomUtils.nextBoolean() ? KillOrStart.KILL : KillOrStart.START; action = rand.nextBoolean() ? KillOrStart.KILL : KillOrStart.START;
} }
ServerName server; ServerName server;
@ -120,7 +120,7 @@ public class RollingBatchRestartRsAction extends BatchRestartRsAction {
break; break;
} }
sleep(RandomUtils.nextInt(0, (int)sleepTime)); sleep(rand.nextInt((int)sleepTime));
} }
} }

View File

@ -22,7 +22,8 @@ import java.io.IOException;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Queue; import java.util.Queue;
import org.apache.commons.lang3.RandomUtils; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
@ -66,10 +67,9 @@ public class RollingBatchSuspendResumeRsAction extends Action {
getLogger().info("Performing action: Rolling batch restarting {}% of region servers", getLogger().info("Performing action: Rolling batch restarting {}% of region servers",
(int) (ratio * 100)); (int) (ratio * 100));
List<ServerName> selectedServers = selectServers(); List<ServerName> selectedServers = selectServers();
Queue<ServerName> serversToBeSuspended = new LinkedList<>(selectedServers); Queue<ServerName> serversToBeSuspended = new LinkedList<>(selectedServers);
Queue<ServerName> suspendedServers = new LinkedList<>(); Queue<ServerName> suspendedServers = new LinkedList<>();
Random rand = ThreadLocalRandom.current();
// loop while there are servers to be suspended or suspended servers to be resumed // loop while there are servers to be suspended or suspended servers to be resumed
while ((!serversToBeSuspended.isEmpty() || !suspendedServers.isEmpty()) && !context while ((!serversToBeSuspended.isEmpty() || !suspendedServers.isEmpty()) && !context
.isStopping()) { .isStopping()) {
@ -84,7 +84,7 @@ public class RollingBatchSuspendResumeRsAction extends Action {
action = SuspendOrResume.RESUME; action = SuspendOrResume.RESUME;
} else { } else {
// do a coin toss // do a coin toss
action = RandomUtils.nextBoolean() ? SuspendOrResume.SUSPEND : SuspendOrResume.RESUME; action = rand.nextBoolean() ? SuspendOrResume.SUSPEND : SuspendOrResume.RESUME;
} }
ServerName server; ServerName server;

View File

@ -26,8 +26,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
public class SplitAllRegionOfTableAction extends Action { public class SplitAllRegionOfTableAction extends Action {
private static final Logger LOG = private static final Logger LOG = LoggerFactory.getLogger(SplitAllRegionOfTableAction.class);
LoggerFactory.getLogger(SplitAllRegionOfTableAction.class);
private static final int DEFAULT_MAX_SPLITS = 3; private static final int DEFAULT_MAX_SPLITS = 3;
private static final String MAX_SPLIT_KEY = "hbase.chaosmonkey.action.maxFullTableSplits"; private static final String MAX_SPLIT_KEY = "hbase.chaosmonkey.action.maxFullTableSplits";
@ -39,7 +38,6 @@ public class SplitAllRegionOfTableAction extends Action {
this.tableName = tableName; this.tableName = tableName;
} }
public void init(ActionContext context) throws IOException { public void init(ActionContext context) throws IOException {
super.init(context); super.init(context);
this.maxFullTableSplits = getConf().getInt(MAX_SPLIT_KEY, DEFAULT_MAX_SPLITS); this.maxFullTableSplits = getConf().getInt(MAX_SPLIT_KEY, DEFAULT_MAX_SPLITS);
@ -57,8 +55,6 @@ public class SplitAllRegionOfTableAction extends Action {
if (context.isStopping()) { if (context.isStopping()) {
return; return;
} }
// Don't always split. This should allow splitting of a full table later in the run // Don't always split. This should allow splitting of a full table later in the run
if (ThreadLocalRandom.current().nextDouble() if (ThreadLocalRandom.current().nextDouble()
< (((double) splits) / ((double) maxFullTableSplits)) / ((double) 2)) { < (((double) splits) / ((double) maxFullTableSplits)) / ((double) 2)) {

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.chaos.actions; package org.apache.hadoop.hbase.chaos.actions;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -31,11 +31,9 @@ import org.slf4j.LoggerFactory;
public class TruncateTableAction extends Action { public class TruncateTableAction extends Action {
private static final Logger LOG = LoggerFactory.getLogger(TruncateTableAction.class); private static final Logger LOG = LoggerFactory.getLogger(TruncateTableAction.class);
private final TableName tableName; private final TableName tableName;
private final Random random;
public TruncateTableAction(String tableName) { public TruncateTableAction(String tableName) {
this.tableName = TableName.valueOf(tableName); this.tableName = TableName.valueOf(tableName);
this.random = new Random();
} }
@Override protected Logger getLogger() { @Override protected Logger getLogger() {
@ -52,7 +50,7 @@ public class TruncateTableAction extends Action {
return; return;
} }
boolean preserveSplits = random.nextBoolean(); boolean preserveSplits = ThreadLocalRandom.current().nextBoolean();
getLogger().info("Performing action: Truncate table {} preserve splits {}", getLogger().info("Performing action: Truncate table {} preserve splits {}",
tableName.getNameAsString(), preserveSplits); tableName.getNameAsString(), preserveSplits);
admin.truncateTable(tableName, preserveSplits); admin.truncateTable(tableName, preserveSplits);

View File

@ -22,8 +22,9 @@ import java.util.ArrayList;
import java.util.HashSet; import java.util.HashSet;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.Set; import java.util.Set;
import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.junit.Assert; import org.junit.Assert;
@ -64,15 +65,15 @@ public class UnbalanceKillAndRebalanceAction extends Action {
ClusterMetrics status = this.cluster.getClusterMetrics(); ClusterMetrics status = this.cluster.getClusterMetrics();
List<ServerName> victimServers = new LinkedList<>(status.getLiveServerMetrics().keySet()); List<ServerName> victimServers = new LinkedList<>(status.getLiveServerMetrics().keySet());
Set<ServerName> killedServers = new HashSet<>(); Set<ServerName> killedServers = new HashSet<>();
int liveCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_LIVE * victimServers.size()); int liveCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_LIVE * victimServers.size());
int deadCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_DIE * victimServers.size()); int deadCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_DIE * victimServers.size());
Assert.assertTrue( Assert.assertTrue(
"There are not enough victim servers: " + victimServers.size(), "There are not enough victim servers: " + victimServers.size(),
liveCount + deadCount < victimServers.size()); liveCount + deadCount < victimServers.size());
Random rand = ThreadLocalRandom.current();
List<ServerName> targetServers = new ArrayList<>(liveCount); List<ServerName> targetServers = new ArrayList<>(liveCount);
for (int i = 0; i < liveCount + deadCount; ++i) { for (int i = 0; i < liveCount + deadCount; ++i) {
int victimIx = RandomUtils.nextInt(0, victimServers.size()); int victimIx = rand.nextInt(victimServers.size());
targetServers.add(victimServers.remove(victimIx)); targetServers.add(victimServers.remove(victimIx));
} }
unbalanceRegions(status, victimServers, targetServers, HOARD_FRC_OF_REGIONS); unbalanceRegions(status, victimServers, targetServers, HOARD_FRC_OF_REGIONS);

View File

@ -21,7 +21,8 @@ package org.apache.hadoop.hbase.chaos.actions;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import org.apache.commons.lang3.RandomUtils; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -57,8 +58,9 @@ public class UnbalanceRegionsAction extends Action {
List<ServerName> victimServers = new LinkedList<>(status.getLiveServerMetrics().keySet()); List<ServerName> victimServers = new LinkedList<>(status.getLiveServerMetrics().keySet());
int targetServerCount = (int)Math.ceil(fractionOfServers * victimServers.size()); int targetServerCount = (int)Math.ceil(fractionOfServers * victimServers.size());
List<ServerName> targetServers = new ArrayList<>(targetServerCount); List<ServerName> targetServers = new ArrayList<>(targetServerCount);
Random rand = ThreadLocalRandom.current();
for (int i = 0; i < targetServerCount; ++i) { for (int i = 0; i < targetServerCount; ++i) {
int victimIx = RandomUtils.nextInt(0, victimServers.size()); int victimIx = rand.nextInt(victimServers.size());
targetServers.add(victimServers.remove(victimIx)); targetServers.add(victimServers.remove(victimIx));
} }
unbalanceRegions(status, victimServers, targetServers, fractionOfRegions); unbalanceRegions(status, victimServers, targetServers, fractionOfRegions);

View File

@ -26,13 +26,11 @@ import java.util.Objects;
import java.util.Properties; import java.util.Properties;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.chaos.policies.Policy; import org.apache.hadoop.hbase.chaos.policies.Policy;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
/** /**
@ -40,7 +38,6 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto
*/ */
public class PolicyBasedChaosMonkey extends ChaosMonkey { public class PolicyBasedChaosMonkey extends ChaosMonkey {
private static final Logger LOG = LoggerFactory.getLogger(PolicyBasedChaosMonkey.class);
private static final long ONE_SEC = 1000; private static final long ONE_SEC = 1000;
private static final long ONE_MIN = 60 * ONE_SEC; private static final long ONE_MIN = 60 * ONE_SEC;
@ -93,7 +90,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
/** Selects a random item from the given items */ /** Selects a random item from the given items */
public static <T> T selectRandomItem(T[] items) { public static <T> T selectRandomItem(T[] items) {
return items[RandomUtils.nextInt(0, items.length)]; return items[ThreadLocalRandom.current().nextInt(items.length)];
} }
/** Selects a random item from the given items with weights*/ /** Selects a random item from the given items with weights*/
@ -103,7 +100,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
totalWeight += pair.getSecond(); totalWeight += pair.getSecond();
} }
int cutoff = RandomUtils.nextInt(0, totalWeight); int cutoff = ThreadLocalRandom.current().nextInt(totalWeight);
int cummulative = 0; int cummulative = 0;
T item = null; T item = null;
@ -127,7 +124,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey {
List<T> originalItems = Arrays.asList(items); List<T> originalItems = Arrays.asList(items);
Collections.shuffle(originalItems); Collections.shuffle(originalItems);
int startIndex = RandomUtils.nextInt(0, items.length - selectedNumber); int startIndex = ThreadLocalRandom.current().nextInt(items.length - selectedNumber);
return originalItems.subList(startIndex, startIndex + selectedNumber); return originalItems.subList(startIndex, startIndex + selectedNumber);
} }

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hbase.chaos.policies; package org.apache.hadoop.hbase.chaos.policies;
import org.apache.commons.lang3.RandomUtils; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
@ -33,7 +33,7 @@ public abstract class PeriodicPolicy extends Policy {
@Override @Override
public void run() { public void run() {
// Add some jitter. // Add some jitter.
int jitter = RandomUtils.nextInt(0, (int) periodMs); int jitter = ThreadLocalRandom.current().nextInt((int)periodMs);
LOG.info("Sleeping for {} ms to add jitter", jitter); LOG.info("Sleeping for {} ms to add jitter", jitter);
Threads.sleep(jitter); Threads.sleep(jitter);

View File

@ -30,13 +30,12 @@ import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.codec.Codec;
@ -88,7 +87,6 @@ public class IntegrationTestRpcClient {
} }
class Cluster { class Cluster {
Random random = new Random();
ReadWriteLock lock = new ReentrantReadWriteLock(); ReadWriteLock lock = new ReentrantReadWriteLock();
HashMap<InetSocketAddress, RpcServer> rpcServers = new HashMap<>(); HashMap<InetSocketAddress, RpcServer> rpcServers = new HashMap<>();
List<RpcServer> serverList = new ArrayList<>(); List<RpcServer> serverList = new ArrayList<>();
@ -134,7 +132,7 @@ public class IntegrationTestRpcClient {
return; return;
} }
int size = rpcServers.size(); int size = rpcServers.size();
int rand = random.nextInt(size); int rand = ThreadLocalRandom.current().nextInt(size);
rpcServer = serverList.remove(rand); rpcServer = serverList.remove(rand);
InetSocketAddress address = rpcServer.getListenerAddress(); InetSocketAddress address = rpcServer.getListenerAddress();
if (address == null) { if (address == null) {
@ -176,7 +174,7 @@ public class IntegrationTestRpcClient {
lock.readLock().lock(); lock.readLock().lock();
try { try {
int size = rpcServers.size(); int size = rpcServers.size();
int rand = random.nextInt(size); int rand = ThreadLocalRandom.current().nextInt(size);
return serverList.get(rand); return serverList.get(rand);
} finally { } finally {
lock.readLock().unlock(); lock.readLock().unlock();
@ -186,7 +184,6 @@ public class IntegrationTestRpcClient {
static class MiniChaosMonkey extends Thread { static class MiniChaosMonkey extends Thread {
AtomicBoolean running = new AtomicBoolean(true); AtomicBoolean running = new AtomicBoolean(true);
Random random = new Random();
AtomicReference<Exception> exception = new AtomicReference<>(null); AtomicReference<Exception> exception = new AtomicReference<>(null);
Cluster cluster; Cluster cluster;
@ -197,7 +194,7 @@ public class IntegrationTestRpcClient {
@Override @Override
public void run() { public void run() {
while (running.get()) { while (running.get()) {
if (random.nextBoolean()) { if (ThreadLocalRandom.current().nextBoolean()) {
//start a server //start a server
try { try {
cluster.startServer(); cluster.startServer();
@ -238,7 +235,6 @@ public class IntegrationTestRpcClient {
Cluster cluster; Cluster cluster;
String id; String id;
long numCalls = 0; long numCalls = 0;
Random random = new Random();
public SimpleClient(Cluster cluster, AbstractRpcClient<?> rpcClient, String id) { public SimpleClient(Cluster cluster, AbstractRpcClient<?> rpcClient, String id) {
this.cluster = cluster; this.cluster = cluster;
@ -250,7 +246,7 @@ public class IntegrationTestRpcClient {
@Override @Override
public void run() { public void run() {
while (running.get()) { while (running.get()) {
boolean isBigPayload = random.nextBoolean(); boolean isBigPayload = ThreadLocalRandom.current().nextBoolean();
String message = isBigPayload ? BIG_PAYLOAD : id + numCalls; String message = isBigPayload ? BIG_PAYLOAD : id + numCalls;
EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build(); EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build();
EchoResponseProto ret; EchoResponseProto ret;

View File

@ -27,10 +27,9 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Optional; import java.util.Optional;
import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
@ -155,7 +154,6 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
public static class SlowMeCoproScanOperations implements RegionCoprocessor, RegionObserver { public static class SlowMeCoproScanOperations implements RegionCoprocessor, RegionObserver {
static final AtomicLong sleepTime = new AtomicLong(2000); static final AtomicLong sleepTime = new AtomicLong(2000);
Random r = new Random();
AtomicLong countOfNext = new AtomicLong(0); AtomicLong countOfNext = new AtomicLong(0);
AtomicLong countOfOpen = new AtomicLong(0); AtomicLong countOfOpen = new AtomicLong(0);
public SlowMeCoproScanOperations() {} public SlowMeCoproScanOperations() {}
@ -379,7 +377,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
taskId = taskId + iteration * numMapTasks; taskId = taskId + iteration * numMapTasks;
numMapTasks = numMapTasks * numIterations; numMapTasks = numMapTasks * numIterations;
long chainId = Math.abs(new Random().nextLong()); long chainId = Math.abs(ThreadLocalRandom.current().nextLong());
chainId = chainId - (chainId % numMapTasks) + taskId; // ensure that chainId is unique per task and across iterations chainId = chainId - (chainId % numMapTasks) + taskId; // ensure that chainId is unique per task and across iterations
LongWritable[] keys = new LongWritable[] {new LongWritable(chainId)}; LongWritable[] keys = new LongWritable[] {new LongWritable(chainId)};
@ -397,8 +395,6 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
public static class LinkedListCreationMapper public static class LinkedListCreationMapper
extends Mapper<LongWritable, LongWritable, ImmutableBytesWritable, KeyValue> { extends Mapper<LongWritable, LongWritable, ImmutableBytesWritable, KeyValue> {
private Random rand = new Random();
@Override @Override
protected void map(LongWritable key, LongWritable value, Context context) protected void map(LongWritable key, LongWritable value, Context context)
throws IOException, InterruptedException { throws IOException, InterruptedException {
@ -410,6 +406,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
long chainLength = context.getConfiguration().getLong(CHAIN_LENGTH_KEY, CHAIN_LENGTH); long chainLength = context.getConfiguration().getLong(CHAIN_LENGTH_KEY, CHAIN_LENGTH);
long nextRow = getNextRow(0, chainLength); long nextRow = getNextRow(0, chainLength);
byte[] valueBytes = new byte[50];
for (long i = 0; i < chainLength; i++) { for (long i = 0; i < chainLength; i++) {
byte[] rk = Bytes.toBytes(currentRow); byte[] rk = Bytes.toBytes(currentRow);
@ -419,9 +416,8 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
// What link in the chain this is. // What link in the chain this is.
KeyValue sortKv = new KeyValue(rk, SORT_FAM, chainIdArray, Bytes.toBytes(i)); KeyValue sortKv = new KeyValue(rk, SORT_FAM, chainIdArray, Bytes.toBytes(i));
// Added data so that large stores are created. // Added data so that large stores are created.
KeyValue dataKv = new KeyValue(rk, DATA_FAM, chainIdArray, Bytes.random(valueBytes);
Bytes.toBytes(RandomStringUtils.randomAlphabetic(50)) KeyValue dataKv = new KeyValue(rk, DATA_FAM, chainIdArray, valueBytes);
);
// Emit the key values. // Emit the key values.
context.write(new ImmutableBytesWritable(rk), linkKv); context.write(new ImmutableBytesWritable(rk), linkKv);
@ -435,7 +431,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
/** Returns a unique row id within this chain for this index */ /** Returns a unique row id within this chain for this index */
private long getNextRow(long index, long chainLength) { private long getNextRow(long index, long chainLength) {
long nextRow = Math.abs(rand.nextLong()); long nextRow = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE);
// use significant bits from the random number, but pad with index to ensure it is unique // use significant bits from the random number, but pad with index to ensure it is unique
// this also ensures that we do not reuse row = 0 // this also ensures that we do not reuse row = 0
// row collisions from multiple mappers are fine, since we guarantee unique chainIds // row collisions from multiple mappers are fine, since we guarantee unique chainIds

View File

@ -22,18 +22,17 @@ import java.io.DataOutput;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.security.SecureRandom;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.SortedSet; import java.util.SortedSet;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -344,7 +343,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
static class GeneratorRecordReader extends RecordReader<BytesWritable,NullWritable> { static class GeneratorRecordReader extends RecordReader<BytesWritable,NullWritable> {
private long count; private long count;
private long numNodes; private long numNodes;
private Random64 rand; // Use Random64 to avoid issue described in HBASE-21256.
private Random64 rand = new Random64();
@Override @Override
public void close() throws IOException { public void close() throws IOException {
@ -371,15 +371,12 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
public void initialize(InputSplit arg0, TaskAttemptContext context) public void initialize(InputSplit arg0, TaskAttemptContext context)
throws IOException, InterruptedException { throws IOException, InterruptedException {
numNodes = context.getConfiguration().getLong(GENERATOR_NUM_ROWS_PER_MAP_KEY, 25000000); numNodes = context.getConfiguration().getLong(GENERATOR_NUM_ROWS_PER_MAP_KEY, 25000000);
// Use Random64 to avoid issue described in HBASE-21256.
rand = new Random64();
} }
@Override @Override
public boolean nextKeyValue() throws IOException, InterruptedException { public boolean nextKeyValue() throws IOException, InterruptedException {
return count++ < numNodes; return count++ < numNodes;
} }
} }
@Override @Override
@ -457,6 +454,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
byte[] tinyValue = new byte[] { 't' }; byte[] tinyValue = new byte[] { 't' };
byte[] bigValue = null; byte[] bigValue = null;
Configuration conf; Configuration conf;
// Use Random64 to avoid issue described in HBASE-21256.
private Random64 rand = new Random64();
volatile boolean walkersStop; volatile boolean walkersStop;
int numWalkers; int numWalkers;
@ -494,7 +493,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
BIG_FAMILY_VALUE_SIZE_KEY, n, ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, limit); BIG_FAMILY_VALUE_SIZE_KEY, n, ConnectionConfiguration.MAX_KEYVALUE_SIZE_KEY, limit);
bigValue = new byte[n]; bigValue = new byte[n];
ThreadLocalRandom.current().nextBytes(bigValue); rand.nextBytes(bigValue);
LOG.info("Create a bigValue with " + n + " bytes."); LOG.info("Create a bigValue with " + n + " bytes.");
} }
@ -642,12 +641,10 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
ConcurrentWalker walker; ConcurrentWalker walker;
Configuration conf; Configuration conf;
Context context; Context context;
Random rand;
public ContinuousConcurrentWalker(Configuration conf, Context context) { public ContinuousConcurrentWalker(Configuration conf, Context context) {
this.conf = conf; this.conf = conf;
this.context = context; this.context = context;
rand = new Random();
} }
@Override @Override
@ -681,7 +678,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
if (walkersStop) { if (walkersStop) {
throw new InterruptedException(); throw new InterruptedException();
} }
return flushedLoops.get(rand.nextInt(flushedLoops.size())); return flushedLoops.get(ThreadLocalRandom.current().nextInt(flushedLoops.size()));
} }
} }
} }
@ -1761,7 +1758,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
if (cmd.hasOption('n')) { if (cmd.hasOption('n')) {
maxQueries = Long.parseLong(cmd.getOptionValue("n")); maxQueries = Long.parseLong(cmd.getOptionValue("n"));
} }
Random rand = new SecureRandom();
boolean isSpecificStart = cmd.hasOption('s'); boolean isSpecificStart = cmd.hasOption('s');
byte[] startKey = isSpecificStart ? Bytes.toBytesBinary(cmd.getOptionValue('s')) : null; byte[] startKey = isSpecificStart ? Bytes.toBytesBinary(cmd.getOptionValue('s')) : null;
@ -1776,7 +1772,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
while (numQueries < maxQueries && (numQueries == 0 || !isSpecificStart)) { while (numQueries < maxQueries && (numQueries == 0 || !isSpecificStart)) {
if (!isSpecificStart) { if (!isSpecificStart) {
startKey = new byte[ROWKEY_LENGTH]; startKey = new byte[ROWKEY_LENGTH];
rand.nextBytes(startKey); Bytes.random(startKey);
} }
CINode node = findStartNode(table, startKey); CINode node = findStartNode(table, startKey);
if (node == null && isSpecificStart) { if (node == null && isSpecificStart) {

View File

@ -23,9 +23,9 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -189,10 +189,7 @@ public void cleanUpCluster() throws Exception {
protected BufferedMutator mutator; protected BufferedMutator mutator;
protected Configuration conf; protected Configuration conf;
protected int numBackReferencesPerRow; protected int numBackReferencesPerRow;
protected String shortTaskId; protected String shortTaskId;
protected Random rand = new Random();
protected Counter rowsWritten, refsWritten; protected Counter rowsWritten, refsWritten;
@Override @Override
@ -229,8 +226,8 @@ public void cleanUpCluster() throws Exception {
String suffix = "/" + shortTaskId; String suffix = "/" + shortTaskId;
byte[] row = Bytes.add(new byte[8], Bytes.toBytes(suffix)); byte[] row = Bytes.add(new byte[8], Bytes.toBytes(suffix));
int BLOCK_SIZE = (int)(recordsToWrite / 100); int BLOCK_SIZE = (int)(recordsToWrite / 100);
Random rand = ThreadLocalRandom.current();
for (long i = 0; i < recordsToWrite;) { for (long i = 0; i < recordsToWrite;) {
long blockStart = i; long blockStart = i;

View File

@ -23,9 +23,9 @@ import java.util.List;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
@ -331,7 +331,7 @@ public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends Integr
@Override @Override
protected long getNextKeyToRead() { protected long getNextKeyToRead() {
// always read a random key, assuming that the writer has finished writing all keys // always read a random key, assuming that the writer has finished writing all keys
long key = startKey + Math.abs(RandomUtils.nextLong()) long key = startKey + ThreadLocalRandom.current().nextLong(Long.MAX_VALUE)
% (endKey - startKey); % (endKey - startKey);
return key; return key;
} }

View File

@ -23,7 +23,8 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
@ -57,7 +58,6 @@ import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
/** /**
@ -166,9 +166,10 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
InterruptedException { InterruptedException {
String suffix = "/" + shortTaskId; String suffix = "/" + shortTaskId;
int BLOCK_SIZE = (int) (recordsToWrite / 100); int BLOCK_SIZE = (int) (recordsToWrite / 100);
Random rand = ThreadLocalRandom.current();
for (long i = 0; i < recordsToWrite;) { for (long i = 0; i < recordsToWrite;) {
for (long idx = 0; idx < BLOCK_SIZE && i < recordsToWrite; idx++, i++) { for (long idx = 0; idx < BLOCK_SIZE && i < recordsToWrite; idx++, i++) {
int expIdx = rand.nextInt(BLOCK_SIZE) % VISIBILITY_EXPS_COUNT; int expIdx = rand.nextInt(VISIBILITY_EXPS_COUNT);
String exp = VISIBILITY_EXPS[expIdx]; String exp = VISIBILITY_EXPS[expIdx];
byte[] row = Bytes.add(Bytes.toBytes(i), Bytes.toBytes(suffix), Bytes.toBytes(exp)); byte[] row = Bytes.add(Bytes.toBytes(i), Bytes.toBytes(suffix), Bytes.toBytes(exp));
Put p = new Put(row); Put p = new Put(row);
@ -379,10 +380,9 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
return 0; return 0;
} }
@SuppressWarnings("unchecked")
@Override @Override
protected void processOptions(CommandLine cmd) { protected void processOptions(CommandLine cmd) {
List args = cmd.getArgList(); List<String> args = cmd.getArgList();
if (args.size() > 0) { if (args.size() > 0) {
printUsage(); printUsage();
throw new RuntimeException("No args expected."); throw new RuntimeException("No args expected.");

View File

@ -27,6 +27,7 @@ import java.util.Random;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
@ -48,7 +49,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
@Category(IntegrationTests.class) @Category(IntegrationTests.class)
@ -63,7 +63,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
private TableName tableName = TableName.valueOf(TABLE_NAME_DEFAULT); private TableName tableName = TableName.valueOf(TABLE_NAME_DEFAULT);
private byte[] familyName = Bytes.toBytes(COLUMN_FAMILY_DEFAULT); private byte[] familyName = Bytes.toBytes(COLUMN_FAMILY_DEFAULT);
private IntegrationTestingUtility util; private IntegrationTestingUtility util;
private Random random = new Random();
private Admin admin; private Admin admin;
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
@ -227,17 +226,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException { private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException {
LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000); LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName); BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
Random rand = ThreadLocalRandom.current();
byte[] value = new byte[300]; byte[] value = new byte[300];
for (int x = 0; x < 5000; x++) { for (int x = 0; x < 5000; x++) {
Span span = TraceUtil.getGlobalTracer().spanBuilder("insertData").startSpan(); Span span = TraceUtil.getGlobalTracer().spanBuilder("insertData").startSpan();
try (Scope scope = span.makeCurrent()) { try (Scope scope = span.makeCurrent()) {
for (int i = 0; i < 5; i++) { for (int i = 0; i < 5; i++) {
long rk = random.nextLong(); long rk = rand.nextLong();
rowKeys.add(rk); rowKeys.add(rk);
Put p = new Put(Bytes.toBytes(rk)); Put p = new Put(Bytes.toBytes(rk));
for (int y = 0; y < 10; y++) { for (int y = 0; y < 10; y++) {
random.nextBytes(value); Bytes.random(value);
p.addColumn(familyName, Bytes.toBytes(random.nextLong()), value); p.addColumn(familyName, Bytes.toBytes(rand.nextLong()), value);
} }
ht.mutate(p); ht.mutate(p);
} }

View File

@ -45,6 +45,7 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
@ -1516,7 +1517,6 @@ public class PerformanceEvaluation extends Configured implements Tool {
static class AsyncRandomReadTest extends AsyncTableTest { static class AsyncRandomReadTest extends AsyncTableTest {
private final Consistency consistency; private final Consistency consistency;
private ArrayList<Get> gets; private ArrayList<Get> gets;
private Random rd = new Random();
AsyncRandomReadTest(AsyncConnection con, TestOptions options, Status status) { AsyncRandomReadTest(AsyncConnection con, TestOptions options, Status status) {
super(con, options, status); super(con, options, status);
@ -1530,7 +1530,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
@Override @Override
boolean testRow(final int i, final long startTime) throws IOException, InterruptedException { boolean testRow(final int i, final long startTime) throws IOException, InterruptedException {
if (opts.randomSleep > 0) { if (opts.randomSleep > 0) {
Thread.sleep(rd.nextInt(opts.randomSleep)); Thread.sleep(ThreadLocalRandom.current().nextInt(opts.randomSleep));
} }
Get get = new Get(getRandomRow(this.rand, opts.totalRows)); Get get = new Get(getRandomRow(this.rand, opts.totalRows));
for (int family = 0; family < opts.families; family++) { for (int family = 0; family < opts.families; family++) {
@ -1938,8 +1938,6 @@ public class PerformanceEvaluation extends Configured implements Tool {
static class RandomReadTest extends TableTest { static class RandomReadTest extends TableTest {
private final Consistency consistency; private final Consistency consistency;
private ArrayList<Get> gets; private ArrayList<Get> gets;
private Random rd = new Random();
private long numOfReplyFromReplica = 0;
RandomReadTest(Connection con, TestOptions options, Status status) { RandomReadTest(Connection con, TestOptions options, Status status) {
super(con, options, status); super(con, options, status);
@ -1953,7 +1951,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
@Override @Override
boolean testRow(final int i, final long startTime) throws IOException, InterruptedException { boolean testRow(final int i, final long startTime) throws IOException, InterruptedException {
if (opts.randomSleep > 0) { if (opts.randomSleep > 0) {
Thread.sleep(rd.nextInt(opts.randomSleep)); Thread.sleep(ThreadLocalRandom.current().nextInt(opts.randomSleep));
} }
Get get = new Get(getRandomRow(this.rand, opts.totalRows)); Get get = new Get(getRandomRow(this.rand, opts.totalRows));
for (int family = 0; family < opts.families; family++) { for (int family = 0; family < opts.families; family++) {

View File

@ -37,6 +37,8 @@ import java.util.LinkedList;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
import java.util.Queue; import java.util.Queue;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -153,11 +155,11 @@ public class TestPerformanceEvaluation {
opts.setNumClientThreads(2); opts.setNumClientThreads(2);
opts = PerformanceEvaluation.calculateRowsAndSize(opts); opts = PerformanceEvaluation.calculateRowsAndSize(opts);
assertEquals(1000, opts.getPerClientRunRows()); assertEquals(1000, opts.getPerClientRunRows());
Random random = new Random();
// assuming we will get one before this loop expires // assuming we will get one before this loop expires
boolean foundValue = false; boolean foundValue = false;
Random rand = ThreadLocalRandom.current();
for (int i = 0; i < 10000000; i++) { for (int i = 0; i < 10000000; i++) {
int randomRow = PerformanceEvaluation.generateRandomRow(random, opts.totalRows); int randomRow = PerformanceEvaluation.generateRandomRow(rand, opts.totalRows);
if (randomRow > 1000) { if (randomRow > 1000) {
foundValue = true; foundValue = true;
break; break;

View File

@ -41,6 +41,7 @@ import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -96,7 +97,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem; import org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
import org.apache.hadoop.hbase.security.SecurityConstants;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
@ -198,14 +198,13 @@ public class TestHFileOutputFormat2 {
int taskId = context.getTaskAttemptID().getTaskID().getId(); int taskId = context.getTaskAttemptID().getTaskID().getId();
assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
Random random = new Random();
byte[] key; byte[] key;
for (int j = 0; j < tables.length; ++j) { for (int j = 0; j < tables.length; ++j) {
for (int i = 0; i < ROWSPERSPLIT; i++) { for (int i = 0; i < ROWSPERSPLIT; i++) {
random.nextBytes(keyBytes); Bytes.random(keyBytes);
// Ensure that unique tasks generate unique keys // Ensure that unique tasks generate unique keys
keyBytes[keyLength - 1] = (byte) (taskId & 0xFF); keyBytes[keyLength - 1] = (byte) (taskId & 0xFF);
random.nextBytes(valBytes); Bytes.random(valBytes);
key = keyBytes; key = keyBytes;
if (multiTableMapper) { if (multiTableMapper) {
key = MultiTableHFileOutputFormat.createCompositeKey(tables[j].getName(), keyBytes); key = MultiTableHFileOutputFormat.createCompositeKey(tables[j].getName(), keyBytes);
@ -268,14 +267,13 @@ public class TestHFileOutputFormat2 {
int taskId = context.getTaskAttemptID().getTaskID().getId(); int taskId = context.getTaskAttemptID().getTaskID().getId();
assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
Random random = new Random();
byte[] key; byte[] key;
for (int j = 0; j < tables.length; ++j) { for (int j = 0; j < tables.length; ++j) {
for (int i = 0; i < ROWSPERSPLIT; i++) { for (int i = 0; i < ROWSPERSPLIT; i++) {
random.nextBytes(keyBytes); Bytes.random(keyBytes);
// Ensure that unique tasks generate unique keys // Ensure that unique tasks generate unique keys
keyBytes[keyLength - 1] = (byte) (taskId & 0xFF); keyBytes[keyLength - 1] = (byte) (taskId & 0xFF);
random.nextBytes(valBytes); Bytes.random(valBytes);
key = keyBytes; key = keyBytes;
if (multiTableMapper) { if (multiTableMapper) {
key = MultiTableHFileOutputFormat.createCompositeKey(tables[j].getName(), keyBytes); key = MultiTableHFileOutputFormat.createCompositeKey(tables[j].getName(), keyBytes);
@ -556,7 +554,7 @@ public class TestHFileOutputFormat2 {
} }
private byte [][] generateRandomStartKeys(int numKeys) { private byte [][] generateRandomStartKeys(int numKeys) {
Random random = new Random(); Random random = ThreadLocalRandom.current();
byte[][] ret = new byte[numKeys][]; byte[][] ret = new byte[numKeys][];
// first region start key is always empty // first region start key is always empty
ret[0] = HConstants.EMPTY_BYTE_ARRAY; ret[0] = HConstants.EMPTY_BYTE_ARRAY;
@ -568,7 +566,7 @@ public class TestHFileOutputFormat2 {
} }
private byte[][] generateRandomSplitKeys(int numKeys) { private byte[][] generateRandomSplitKeys(int numKeys) {
Random random = new Random(); Random random = ThreadLocalRandom.current();
byte[][] ret = new byte[numKeys][]; byte[][] ret = new byte[numKeys][];
for (int i = 0; i < numKeys; i++) { for (int i = 0; i < numKeys; i++) {
ret[i] = ret[i] =
@ -1222,13 +1220,10 @@ public class TestHFileOutputFormat2 {
int taskId = context.getTaskAttemptID().getTaskID().getId(); int taskId = context.getTaskAttemptID().getTaskID().getId();
assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
final byte [] qualifier = Bytes.toBytes("data"); final byte [] qualifier = Bytes.toBytes("data");
Random random = new Random();
for (int i = 0; i < numRows; i++) { for (int i = 0; i < numRows; i++) {
Bytes.putInt(keyBytes, 0, i); Bytes.putInt(keyBytes, 0, i);
random.nextBytes(valBytes); Bytes.random(valBytes);
ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes); ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
for (byte[] family : families) { for (byte[] family : families) {
Cell kv = new KeyValue(keyBytes, family, qualifier, valBytes); Cell kv = new KeyValue(keyBytes, family, qualifier, valBytes);
writer.write(key, kv); writer.write(key, kv);

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
import java.security.SecureRandom;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
@ -294,7 +293,7 @@ public class LoadTestTool extends AbstractHBaseTool {
} }
if (cipher != null) { if (cipher != null) {
byte[] keyBytes = new byte[cipher.getKeyLength()]; byte[] keyBytes = new byte[cipher.getKeyLength()];
new SecureRandom().nextBytes(keyBytes); Bytes.secureRandom(keyBytes);
columnDescBuilder.setEncryptionType(cipher.getName()); columnDescBuilder.setEncryptionType(cipher.getName());
columnDescBuilder.setEncryptionKey( columnDescBuilder.setEncryptionKey(
EncryptionUtil.wrapKey(conf, EncryptionUtil.wrapKey(conf,

View File

@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals;
import java.util.Arrays; import java.util.Arrays;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
@ -71,7 +73,7 @@ public class TestFastLongHistogram {
// assumes the uniform distribution // assumes the uniform distribution
FastLongHistogram hist = new FastLongHistogram(100, 0, 100); FastLongHistogram hist = new FastLongHistogram(100, 0, 100);
Random rand = new Random(); Random rand = ThreadLocalRandom.current();
for (int n = 0; n < 10; n++) { for (int n = 0; n < 10; n++) {
for (int i = 0; i < 900; i++) { for (int i = 0; i < 900; i++) {

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.procedure2.store; package org.apache.hadoop.hbase.procedure2.store;
import java.io.IOException; import java.io.IOException;
import java.util.Random;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
@ -31,6 +30,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
@ -106,7 +106,7 @@ public abstract class ProcedureStorePerformanceEvaluation<T extends ProcedureSto
syncType) : "sync argument can only accept one of these three values: hsync, hflush, nosync"; syncType) : "sync argument can only accept one of these three values: hsync, hflush, nosync";
stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE); stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE);
SERIALIZED_STATE = new byte[stateSize]; SERIALIZED_STATE = new byte[stateSize];
new Random(12345).nextBytes(SERIALIZED_STATE); Bytes.random(SERIALIZED_STATE);
} }
private void setUpProcedureStore() throws IOException { private void setUpProcedureStore() throws IOException {

View File

@ -22,8 +22,8 @@ import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -136,14 +136,13 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool {
* value denotes delete state. * value denotes delete state.
*/ */
private List<Integer> shuffleProcWriteSequence() { private List<Integer> shuffleProcWriteSequence() {
Random rand = new Random();
List<Integer> procStatesSequence = new ArrayList<>(); List<Integer> procStatesSequence = new ArrayList<>();
Set<Integer> toBeDeletedProcs = new HashSet<>(); Set<Integer> toBeDeletedProcs = new HashSet<>();
// Add n + 1 entries of the proc id for insert + updates. If proc is chosen for delete, add // Add n + 1 entries of the proc id for insert + updates. If proc is chosen for delete, add
// extra entry which is marked -ve in the loop after shuffle. // extra entry which is marked -ve in the loop after shuffle.
for (int procId = 1; procId <= numProcs; ++procId) { for (int procId = 1; procId <= numProcs; ++procId) {
procStatesSequence.addAll(Collections.nCopies(updatesPerProc + 1, procId)); procStatesSequence.addAll(Collections.nCopies(updatesPerProc + 1, procId));
if (rand.nextFloat() < deleteProcsFraction) { if (ThreadLocalRandom.current().nextFloat() < deleteProcsFraction) {
procStatesSequence.add(procId); procStatesSequence.add(procId);
toBeDeletedProcs.add(procId); toBeDeletedProcs.add(procId);
} }

View File

@ -21,7 +21,8 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -153,13 +154,12 @@ public class TestProcedureStoreTracker {
final ProcedureStoreTracker tracker = new ProcedureStoreTracker(); final ProcedureStoreTracker tracker = new ProcedureStoreTracker();
Random rand = new Random(1);
for (int i = 0; i < NRUNS; ++i) { for (int i = 0; i < NRUNS; ++i) {
assertTrue(tracker.isEmpty()); assertTrue(tracker.isEmpty());
int count = 0; int count = 0;
while (count < NPROCEDURES) { while (count < NPROCEDURES) {
long procId = rand.nextLong(); long procId = ThreadLocalRandom.current().nextLong();
if (procId < 1) { if (procId < 1) {
continue; continue;
} }

View File

@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -97,12 +98,12 @@ public class TestStressWALProcedureStore {
public void testInsertUpdateDelete() throws Exception { public void testInsertUpdateDelete() throws Exception {
final long LAST_PROC_ID = 19999; final long LAST_PROC_ID = 19999;
final Thread[] thread = new Thread[PROCEDURE_STORE_SLOTS]; final Thread[] thread = new Thread[PROCEDURE_STORE_SLOTS];
final AtomicLong procCounter = new AtomicLong((long)Math.round(Math.random() * 100)); final Random rand = ThreadLocalRandom.current();
final AtomicLong procCounter = new AtomicLong(rand.nextInt(100));
for (int i = 0; i < thread.length; ++i) { for (int i = 0; i < thread.length; ++i) {
thread[i] = new Thread() { thread[i] = new Thread() {
@Override @Override
public void run() { public void run() {
Random rand = new Random();
TestProcedure proc; TestProcedure proc;
do { do {
// After HBASE-15579 there may be gap in the procId sequence, trying to simulate that. // After HBASE-15579 there may be gap in the procId sequence, trying to simulate that.

View File

@ -59,7 +59,7 @@ public class TestZKReplicationPeerStorage {
HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class); HBaseClassTestRule.forClass(TestZKReplicationPeerStorage.class);
private static final HBaseZKTestingUtility UTIL = new HBaseZKTestingUtility(); private static final HBaseZKTestingUtility UTIL = new HBaseZKTestingUtility();
private static final Random RNG = new Random(); // Seed may be set with Random#setSeed
private static ZKReplicationPeerStorage STORAGE; private static ZKReplicationPeerStorage STORAGE;
@BeforeClass @BeforeClass
@ -96,12 +96,12 @@ public class TestZKReplicationPeerStorage {
} }
private ReplicationPeerConfig getConfig(int seed) { private ReplicationPeerConfig getConfig(int seed) {
Random rand = new Random(seed); RNG.setSeed(seed);
return ReplicationPeerConfig.newBuilder().setClusterKey(Long.toHexString(rand.nextLong())) return ReplicationPeerConfig.newBuilder().setClusterKey(Long.toHexString(RNG.nextLong()))
.setReplicationEndpointImpl(Long.toHexString(rand.nextLong())) .setReplicationEndpointImpl(Long.toHexString(RNG.nextLong()))
.setNamespaces(randNamespaces(rand)).setExcludeNamespaces(randNamespaces(rand)) .setNamespaces(randNamespaces(RNG)).setExcludeNamespaces(randNamespaces(RNG))
.setTableCFsMap(randTableCFs(rand)).setReplicateAllUserTables(rand.nextBoolean()) .setTableCFsMap(randTableCFs(RNG)).setReplicateAllUserTables(RNG.nextBoolean())
.setBandwidth(rand.nextInt(1000)).build(); .setBandwidth(RNG.nextInt(1000)).build();
} }
private void assertSetEquals(Set<String> expected, Set<String> actual) { private void assertSetEquals(Set<String> expected, Set<String> actual) {

View File

@ -29,6 +29,8 @@ import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException; import javax.xml.bind.JAXBException;
import javax.xml.bind.Marshaller; import javax.xml.bind.Marshaller;
@ -94,7 +96,7 @@ public class TestScannerResource {
static int insertData(Configuration conf, TableName tableName, String column, double prob) static int insertData(Configuration conf, TableName tableName, String column, double prob)
throws IOException { throws IOException {
Random rng = new Random(); Random rng = ThreadLocalRandom.current();
byte[] k = new byte[3]; byte[] k = new byte[3];
byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column));
List<Put> puts = new ArrayList<>(); List<Put> puts = new ArrayList<>();

View File

@ -23,14 +23,15 @@ import static org.junit.Assert.assertTrue;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.security.SecureRandom;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
@ -52,7 +53,6 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
@ -61,7 +61,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
*/ */
public class RSGroupableBalancerTestBase extends BalancerTestBase{ public class RSGroupableBalancerTestBase extends BalancerTestBase{
static SecureRandom rand = new SecureRandom();
static String[] groups = new String[] {RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4"}; static String[] groups = new String[] {RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4"};
static TableName table0 = TableName.valueOf("dt0"); static TableName table0 = TableName.valueOf("dt0");
static TableName[] tables = static TableName[] tables =
@ -305,10 +304,10 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{
protected List<RegionInfo> randomRegions(int numRegions) { protected List<RegionInfo> randomRegions(int numRegions) {
List<RegionInfo> regions = new ArrayList<>(numRegions); List<RegionInfo> regions = new ArrayList<>(numRegions);
byte[] start = new byte[16]; byte[] start = new byte[16];
Bytes.random(start);
byte[] end = new byte[16]; byte[] end = new byte[16];
rand.nextBytes(start); Bytes.random(end);
rand.nextBytes(end); int regionIdx = ThreadLocalRandom.current().nextInt(tables.length);
int regionIdx = rand.nextInt(tables.length);
for (int i = 0; i < numRegions; i++) { for (int i = 0; i < numRegions; i++) {
Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(start, 0, numRegions << 1);
Bytes.putInt(end, 0, (numRegions << 1) + 1); Bytes.putInt(end, 0, (numRegions << 1) + 1);
@ -351,6 +350,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{
protected static List<ServerName> generateServers(int numServers) { protected static List<ServerName> generateServers(int numServers) {
List<ServerName> servers = new ArrayList<>(numServers); List<ServerName> servers = new ArrayList<>(numServers);
Random rand = ThreadLocalRandom.current();
for (int i = 0; i < numServers; i++) { for (int i = 0; i < numServers; i++) {
String host = "server" + rand.nextInt(100000); String host = "server" + rand.nextInt(100000);
int port = rand.nextInt(60000); int port = rand.nextInt(60000);
@ -378,6 +378,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{
groupMap.put(grpName, RSGroupInfo); groupMap.put(grpName, RSGroupInfo);
index++; index++;
} }
Random rand = ThreadLocalRandom.current();
while (index < servers.size()) { while (index < servers.size()) {
int grpIndex = rand.nextInt(groups.length); int grpIndex = rand.nextInt(groups.length);
groupMap.get(groups[grpIndex]).addServer( groupMap.get(groups[grpIndex]).addServer(
@ -394,6 +395,7 @@ public class RSGroupableBalancerTestBase extends BalancerTestBase{
*/ */
protected static List<TableDescriptor> constructTableDesc(boolean hasBogusTable) { protected static List<TableDescriptor> constructTableDesc(boolean hasBogusTable) {
List<TableDescriptor> tds = Lists.newArrayList(); List<TableDescriptor> tds = Lists.newArrayList();
Random rand = ThreadLocalRandom.current();
int index = rand.nextInt(groups.length); int index = rand.nextInt(groups.length);
for (int i = 0; i < tables.length; i++) { for (int i = 0; i < tables.length; i++) {
TableDescriptor htd = TableDescriptorBuilder.newBuilder(tables[i]).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tables[i]).build();

View File

@ -24,10 +24,9 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.atomic.LongAdder;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.mutable.MutableInt; import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -267,7 +266,7 @@ public class ZkSplitLogWorkerCoordination extends ZKListener implements
// after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks // after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks
try { try {
int sleepTime = RandomUtils.nextInt(0, 500) + 500; int sleepTime = ThreadLocalRandom.current().nextInt(500) + 500;
Thread.sleep(sleepTime); Thread.sleep(sleepTime);
} catch (InterruptedException e) { } catch (InterruptedException e) {
LOG.warn("Interrupted while yielding for other region servers", e); LOG.warn("Interrupted while yielding for other region servers", e);

View File

@ -19,13 +19,13 @@
package org.apache.hadoop.hbase.io.hfile; package org.apache.hadoop.hbase.io.hfile;
import java.util.Map; import java.util.Map;
import java.util.Random;
import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -71,8 +71,6 @@ public final class PrefetchExecutor {
}); });
} }
private static final Random RNG = new Random();
// TODO: We want HFile, which is where the blockcache lives, to handle // TODO: We want HFile, which is where the blockcache lives, to handle
// prefetching of file blocks but the Store level is where path convention // prefetching of file blocks but the Store level is where path convention
// knowledge should be contained // knowledge should be contained
@ -93,7 +91,8 @@ public final class PrefetchExecutor {
long delay; long delay;
if (prefetchDelayMillis > 0) { if (prefetchDelayMillis > 0) {
delay = (long)((prefetchDelayMillis * (1.0f - (prefetchDelayVariation/2))) + delay = (long)((prefetchDelayMillis * (1.0f - (prefetchDelayVariation/2))) +
(prefetchDelayMillis * (prefetchDelayVariation/2) * RNG.nextFloat())); (prefetchDelayMillis * (prefetchDelayVariation/2) *
ThreadLocalRandom.current().nextFloat()));
} else { } else {
delay = 0; delay = 0;
} }

View File

@ -30,6 +30,8 @@ import java.util.Random;
import java.util.Scanner; import java.util.Scanner;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -518,7 +520,7 @@ public class RegionPlacementMaintainer {
public RandomizedMatrix(int rows, int cols) { public RandomizedMatrix(int rows, int cols) {
this.rows = rows; this.rows = rows;
this.cols = cols; this.cols = cols;
Random random = new Random(); Random random = ThreadLocalRandom.current();
rowTransform = new int[rows]; rowTransform = new int[rows];
rowInverse = new int[rows]; rowInverse = new int[rows];
for (int i = 0; i < rows; i++) { for (int i = 0; i < rows; i++) {

View File

@ -18,7 +18,7 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -53,7 +53,6 @@ public class AdaptiveMemStoreCompactionStrategy extends MemStoreCompactionStrate
private double compactionThreshold; private double compactionThreshold;
private double initialCompactionProbability; private double initialCompactionProbability;
private double compactionProbability; private double compactionProbability;
private Random rand = new Random();
private double numCellsInVersionedList = 0; private double numCellsInVersionedList = 0;
private boolean compacted = false; private boolean compacted = false;
@ -66,9 +65,10 @@ public class AdaptiveMemStoreCompactionStrategy extends MemStoreCompactionStrate
resetStats(); resetStats();
} }
@Override public Action getAction(VersionedSegmentsList versionedList) { @Override
public Action getAction(VersionedSegmentsList versionedList) {
if (versionedList.getEstimatedUniquesFrac() < 1.0 - compactionThreshold) { if (versionedList.getEstimatedUniquesFrac() < 1.0 - compactionThreshold) {
double r = rand.nextDouble(); double r = ThreadLocalRandom.current().nextDouble();
if(r < compactionProbability) { if(r < compactionProbability) {
numCellsInVersionedList = versionedList.getNumOfCells(); numCellsInVersionedList = versionedList.getNumOfCells();
compacted = true; compacted = true;

View File

@ -52,13 +52,13 @@ import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import javax.management.MalformedObjectNameException; import javax.management.MalformedObjectNameException;
import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServlet;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.SystemUtils; import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -1964,14 +1964,14 @@ public class HRegionServer extends Thread implements
if (r.shouldFlush(whyFlush)) { if (r.shouldFlush(whyFlush)) {
FlushRequester requester = server.getFlushRequester(); FlushRequester requester = server.getFlushRequester();
if (requester != null) { if (requester != null) {
long randomDelay = RandomUtils.nextLong(0, rangeOfDelayMs) + MIN_DELAY_TIME; long delay = ThreadLocalRandom.current().nextLong(rangeOfDelayMs) + MIN_DELAY_TIME;
//Throttle the flushes by putting a delay. If we don't throttle, and there //Throttle the flushes by putting a delay. If we don't throttle, and there
//is a balanced write-load on the regions in a table, we might end up //is a balanced write-load on the regions in a table, we might end up
//overwhelming the filesystem with too many flushes at once. //overwhelming the filesystem with too many flushes at once.
if (requester.requestDelayedFlush(r, randomDelay)) { if (requester.requestDelayedFlush(r, delay)) {
LOG.info("{} requesting flush of {} because {} after random delay {} ms", LOG.info("{} requesting flush of {} because {} after random delay {} ms",
getName(), r.getRegionInfo().getRegionNameAsString(), whyFlush.toString(), getName(), r.getRegionInfo().getRegionNameAsString(), whyFlush.toString(),
randomDelay); delay);
} }
} }
} }

View File

@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.regionserver.StoreUtils;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
@ -35,6 +34,8 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
private static final Logger LOG = LoggerFactory.getLogger(SortedCompactionPolicy.class); private static final Logger LOG = LoggerFactory.getLogger(SortedCompactionPolicy.class);
private static final Random RNG = new Random();
public SortedCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) { public SortedCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) {
super(conf, storeConfigInfo); super(conf, storeConfigInfo);
} }
@ -109,11 +110,6 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
public abstract boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact) public abstract boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
throws IOException; throws IOException;
/**
* Used calculation jitter
*/
private final Random random = new Random();
/** /**
* @param filesToCompact * @param filesToCompact
* @return When to run next major compaction * @return When to run next major compaction
@ -137,14 +133,12 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy {
// deterministic jitter avoids a major compaction storm on restart // deterministic jitter avoids a major compaction storm on restart
OptionalInt seed = StoreUtils.getDeterministicRandomSeed(filesToCompact); OptionalInt seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
if (seed.isPresent()) { if (seed.isPresent()) {
// Synchronized to ensure one user of random instance at a time.
double rnd;
synchronized (this) {
this.random.setSeed(seed.getAsInt());
rnd = this.random.nextDouble();
}
long jitter = Math.round(period * jitterPct); long jitter = Math.round(period * jitterPct);
return period + jitter - Math.round(2L * jitter * rnd); // Synchronized to ensure one user of random instance at a time.
synchronized (RNG) {
RNG.setSeed(seed.getAsInt());
return period + jitter - Math.round(2L * jitter * RNG.nextDouble());
}
} else { } else {
return 0L; return 0L;
} }

View File

@ -23,9 +23,7 @@ import static org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.WAL_TRA
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.security.Key; import java.security.Key;
import java.security.SecureRandom;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -110,11 +108,8 @@ public abstract class AbstractProtobufLogWriter {
throw new RuntimeException("Cipher '" + cipherName + "' is not available"); throw new RuntimeException("Cipher '" + cipherName + "' is not available");
} }
// Generate an encryption key for this WAL // Generate a random encryption key for this WAL
SecureRandom rng = new SecureRandom(); Key key = cipher.getRandomKey();
byte[] keyBytes = new byte[cipher.getKeyLength()];
rng.nextBytes(keyBytes);
Key key = new SecretKeySpec(keyBytes, cipher.getName());
builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(EncryptionUtil.wrapKey(conf, builder.setEncryptionKey(UnsafeByteOperations.unsafeWrap(EncryptionUtil.wrapKey(conf,
conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY, conf.get(HConstants.CRYPTO_WAL_KEY_NAME_CONF_KEY,
conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY,

View File

@ -22,7 +22,6 @@ import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.security.SecureRandom;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -151,7 +150,7 @@ public class SecureWALCellCodec extends WALCellCodec {
@Override @Override
protected byte[] initialValue() { protected byte[] initialValue() {
byte[] iv = new byte[encryptor.getIvLength()]; byte[] iv = new byte[encryptor.getIvLength()];
new SecureRandom().nextBytes(iv); Bytes.secureRandom(iv);
return iv; return iv;
} }
}; };

View File

@ -15,7 +15,6 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.math.BigInteger; import java.math.BigInteger;
import java.security.SecureRandom;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Deque; import java.util.Deque;
import java.util.HashMap; import java.util.HashMap;
@ -26,6 +25,7 @@ import java.util.Map.Entry;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -333,7 +333,7 @@ public class HFileReplicator implements Closeable {
int RANDOM_RADIX = 32; int RANDOM_RADIX = 32;
String doubleUnderScore = UNDERSCORE + UNDERSCORE; String doubleUnderScore = UNDERSCORE + UNDERSCORE;
String randomDir = user.getShortName() + doubleUnderScore + tblName + doubleUnderScore String randomDir = user.getShortName() + doubleUnderScore + tblName + doubleUnderScore
+ (new BigInteger(RANDOM_WIDTH, new SecureRandom()).toString(RANDOM_RADIX)); + (new BigInteger(RANDOM_WIDTH, ThreadLocalRandom.current()).toString(RANDOM_RADIX));
return createStagingDir(baseDir, user, randomDir); return createStagingDir(baseDir, user, randomDir);
} }

View File

@ -34,7 +34,6 @@ import java.util.HashSet;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
@ -100,7 +99,6 @@ import org.apache.zookeeper.client.ConnectStringParser;
import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
/** /**
@ -604,14 +602,13 @@ public class CanaryTool implements Tool, Canary {
if (rowToCheck.length == 0) { if (rowToCheck.length == 0) {
rowToCheck = new byte[]{0x0}; rowToCheck = new byte[]{0x0};
} }
int writeValueSize = int writeValueSize = connection.getConfiguration()
connection.getConfiguration().getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10); .getInt(HConstants.HBASE_CANARY_WRITE_VALUE_SIZE_KEY, 10);
for (ColumnFamilyDescriptor column : tableDesc.getColumnFamilies()) { for (ColumnFamilyDescriptor column : tableDesc.getColumnFamilies()) {
Put put = new Put(rowToCheck); Put put = new Put(rowToCheck);
byte[] value = new byte[writeValueSize]; byte[] value = new byte[writeValueSize];
Bytes.random(value); Bytes.random(value);
put.addColumn(column.getName(), HConstants.EMPTY_BYTE_ARRAY, value); put.addColumn(column.getName(), HConstants.EMPTY_BYTE_ARRAY, value);
LOG.debug("Writing to {} {} {} {}", LOG.debug("Writing to {} {} {} {}",
tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(), tableDesc.getTableName(), region.getRegionNameAsString(), column.getNameAsString(),
Bytes.toStringBinary(rowToCheck)); Bytes.toStringBinary(rowToCheck));
@ -1832,7 +1829,6 @@ public class CanaryTool implements Tool, Canary {
RegionServerStdOutSink regionServerSink) { RegionServerStdOutSink regionServerSink) {
List<RegionServerTask> tasks = new ArrayList<>(); List<RegionServerTask> tasks = new ArrayList<>();
Map<String, AtomicLong> successMap = new HashMap<>(); Map<String, AtomicLong> successMap = new HashMap<>();
Random rand = new Random();
for (Map.Entry<String, List<RegionInfo>> entry : rsAndRMap.entrySet()) { for (Map.Entry<String, List<RegionInfo>> entry : rsAndRMap.entrySet()) {
String serverName = entry.getKey(); String serverName = entry.getKey();
AtomicLong successes = new AtomicLong(0); AtomicLong successes = new AtomicLong(0);
@ -1849,7 +1845,8 @@ public class CanaryTool implements Tool, Canary {
} }
} else { } else {
// random select a region if flag not set // random select a region if flag not set
RegionInfo region = entry.getValue().get(rand.nextInt(entry.getValue().size())); RegionInfo region = entry.getValue()
.get(ThreadLocalRandom.current().nextInt(entry.getValue().size()));
tasks.add(new RegionServerTask(this.connection, tasks.add(new RegionServerTask(this.connection,
serverName, serverName,
region, region,

View File

@ -135,7 +135,7 @@ public class EncryptionTest {
byte[] iv = null; byte[] iv = null;
if (context.getCipher().getIvLength() > 0) { if (context.getCipher().getIvLength() > 0) {
iv = new byte[context.getCipher().getIvLength()]; iv = new byte[context.getCipher().getIvLength()];
Bytes.random(iv); Bytes.secureRandom(iv);
} }
byte[] plaintext = new byte[1024]; byte[] plaintext = new byte[1024];
Bytes.random(plaintext); Bytes.random(plaintext);

View File

@ -23,6 +23,8 @@ import java.util.Collection;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.ClusterMetrics.Option;
@ -164,10 +166,10 @@ public class HBaseFsckRepair {
Table meta = conn.getTable(TableName.META_TABLE_NAME); Table meta = conn.getTable(TableName.META_TABLE_NAME);
Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime()); Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
if (numReplicas > 1) { if (numReplicas > 1) {
Random r = new Random(); Random rand = ThreadLocalRandom.current();
ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]); ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]);
for (int i = 1; i < numReplicas; i++) { for (int i = 1; i < numReplicas; i++) {
ServerName sn = serversArr[r.nextInt(serversArr.length)]; ServerName sn = serversArr[rand.nextInt(serversArr.length)];
// the column added here is just to make sure the master is able to // the column added here is just to make sure the master is able to
// see the additional replicas when it is asked to assign. The // see the additional replicas when it is asked to assign. The
// final value of these columns will be different and will be updated // final value of these columns will be different and will be updated

View File

@ -19,10 +19,10 @@ package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.concurrent.BlockingQueue; import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
@ -137,7 +137,6 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool {
* Thread that does random full-row writes into a table. * Thread that does random full-row writes into a table.
*/ */
public static class AtomicityWriter extends RepeatingTestThread { public static class AtomicityWriter extends RepeatingTestThread {
Random rand = new Random();
byte data[] = new byte[10]; byte data[] = new byte[10];
byte[][] targetRows; byte[][] targetRows;
byte[][] targetFamilies; byte[][] targetFamilies;
@ -157,10 +156,9 @@ public class AcidGuaranteesTestTool extends AbstractHBaseTool {
@Override @Override
public void doAnAction() throws Exception { public void doAnAction() throws Exception {
// Pick a random row to write into // Pick a random row to write into
byte[] targetRow = targetRows[rand.nextInt(targetRows.length)]; byte[] targetRow = targetRows[ThreadLocalRandom.current().nextInt(targetRows.length)];
Put p = new Put(targetRow); Put p = new Put(targetRow);
rand.nextBytes(data); Bytes.random(data);
for (byte[] family : targetFamilies) { for (byte[] family : targetFamilies) {
for (int i = 0; i < NUM_COLS_TO_CHECK; i++) { for (int i = 0; i < NUM_COLS_TO_CHECK; i++) {
byte qualifier[] = Bytes.toBytes("col" + i); byte qualifier[] = Bytes.toBytes("col" + i);

View File

@ -48,6 +48,7 @@ import java.util.Properties;
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BooleanSupplier; import java.util.function.BooleanSupplier;
@ -2419,10 +2420,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows) public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
throws IOException { throws IOException {
Random r = new Random();
byte[] row = new byte[rowSize]; byte[] row = new byte[rowSize];
for (int i = 0; i < totalRows; i++) { for (int i = 0; i < totalRows; i++) {
r.nextBytes(row); Bytes.random(row);
Put put = new Put(row); Put put = new Put(row);
put.addColumn(f, new byte[]{0}, new byte[]{0}); put.addColumn(f, new byte[]{0}, new byte[]{0});
t.put(put); t.put(put);
@ -3295,7 +3295,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
// There are chances that before we get the region for the table from an RS the region may // There are chances that before we get the region for the table from an RS the region may
// be going for CLOSE. This may be because online schema change is enabled // be going for CLOSE. This may be because online schema change is enabled
if (regCount > 0) { if (regCount > 0) {
idx = random.nextInt(regCount); idx = ThreadLocalRandom.current().nextInt(regCount);
// if we have just tried this region, there is no need to try again // if we have just tried this region, there is no need to try again
if (attempted.contains(idx)) { if (attempted.contains(idx)) {
continue; continue;
@ -3894,7 +3894,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions + numRowsPerFlush + " rows per flush, maxVersions=" + maxVersions +
"\n"); "\n");
final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L);
final int numCF = families.size(); final int numCF = families.size();
final byte[][] cfBytes = new byte[numCF][]; final byte[][] cfBytes = new byte[numCF][];
{ {
@ -3922,6 +3921,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
BufferedMutator mutator = getConnection().getBufferedMutator(tableName); BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
final Random rand = ThreadLocalRandom.current();
for (int iFlush = 0; iFlush < numFlushes; ++iFlush) { for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) { for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
final byte[] row = Bytes.toBytes(String.format(keyFormat, final byte[] row = Bytes.toBytes(String.format(keyFormat,
@ -3967,8 +3967,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
public static int randomFreePort() { public static int randomFreePort() {
return HBaseCommonTestingUtility.randomFreePort(); return HBaseCommonTestingUtility.randomFreePort();
} }
public static String randomMultiCastAddress() { public static String randomMultiCastAddress() {
return "226.1.1." + random.nextInt(254); return "226.1.1." + ThreadLocalRandom.current().nextInt(254);
} }
public static void waitForHostPort(String host, int port) public static void waitForHostPort(String host, int port)

View File

@ -19,8 +19,7 @@
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.security.SecureRandom; import java.util.concurrent.ThreadLocalRandom;
import java.util.Random;
import org.apache.commons.math3.random.RandomData; import org.apache.commons.math3.random.RandomData;
import org.apache.commons.math3.random.RandomDataImpl; import org.apache.commons.math3.random.RandomDataImpl;
@ -337,7 +336,6 @@ public class HFilePerformanceEvaluation {
static class SequentialWriteBenchmark extends RowOrientedBenchmark { static class SequentialWriteBenchmark extends RowOrientedBenchmark {
protected HFile.Writer writer; protected HFile.Writer writer;
private Random random = new Random();
private byte[] bytes = new byte[ROW_LENGTH]; private byte[] bytes = new byte[ROW_LENGTH];
public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf, public SequentialWriteBenchmark(Configuration conf, FileSystem fs, Path mf,
@ -354,7 +352,7 @@ public class HFilePerformanceEvaluation {
if (cipher == "aes") { if (cipher == "aes") {
byte[] cipherKey = new byte[AES.KEY_LENGTH]; byte[] cipherKey = new byte[AES.KEY_LENGTH];
new SecureRandom().nextBytes(cipherKey); Bytes.secureRandom(cipherKey);
builder.withEncryptionContext(Encryption.newContext(conf) builder.withEncryptionContext(Encryption.newContext(conf)
.setCipher(Encryption.getCipher(conf, cipher)) .setCipher(Encryption.getCipher(conf, cipher))
.setKey(cipherKey)); .setKey(cipherKey));
@ -376,7 +374,7 @@ public class HFilePerformanceEvaluation {
} }
private byte[] generateValue() { private byte[] generateValue() {
random.nextBytes(bytes); Bytes.random(bytes);
return bytes; return bytes;
} }
@ -447,8 +445,6 @@ public class HFilePerformanceEvaluation {
static class UniformRandomReadBenchmark extends ReadBenchmark { static class UniformRandomReadBenchmark extends ReadBenchmark {
private Random random = new Random();
public UniformRandomReadBenchmark(Configuration conf, FileSystem fs, public UniformRandomReadBenchmark(Configuration conf, FileSystem fs,
Path mf, int totalRows) { Path mf, int totalRows) {
super(conf, fs, mf, totalRows); super(conf, fs, mf, totalRows);
@ -469,12 +465,11 @@ public class HFilePerformanceEvaluation {
} }
private byte [] getRandomRow() { private byte [] getRandomRow() {
return format(random.nextInt(totalRows)); return format(ThreadLocalRandom.current().nextInt(totalRows));
} }
} }
static class UniformRandomSmallScan extends ReadBenchmark { static class UniformRandomSmallScan extends ReadBenchmark {
private Random random = new Random();
public UniformRandomSmallScan(Configuration conf, FileSystem fs, public UniformRandomSmallScan(Configuration conf, FileSystem fs,
Path mf, int totalRows) { Path mf, int totalRows) {
@ -507,7 +502,7 @@ public class HFilePerformanceEvaluation {
} }
private byte [] getRandomRow() { private byte [] getRandomRow() {
return format(random.nextInt(totalRows)); return format(ThreadLocalRandom.current().nextInt(totalRows));
} }
} }

View File

@ -428,7 +428,7 @@ public class TestHBaseTestingUtility {
when(portChecker.available(anyInt())).thenReturn(true); when(portChecker.available(anyInt())).thenReturn(true);
HBaseTestingUtility.PortAllocator portAllocator = HBaseTestingUtility.PortAllocator portAllocator =
new HBaseTestingUtility.PortAllocator(random, portChecker); new HBaseTestingUtility.PortAllocator(portChecker);
int port1 = portAllocator.randomFreePort(); int port1 = portAllocator.randomFreePort();
int port2 = portAllocator.randomFreePort(); int port2 = portAllocator.randomFreePort();

View File

@ -38,6 +38,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
@ -89,7 +90,6 @@ public class TestMetaTableAccessor {
private static final Logger LOG = LoggerFactory.getLogger(TestMetaTableAccessor.class); private static final Logger LOG = LoggerFactory.getLogger(TestMetaTableAccessor.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static Connection connection; private static Connection connection;
private Random random = new Random();
@Rule @Rule
public TestName name = new TestName(); public TestName name = new TestName();
@ -440,9 +440,11 @@ public class TestMetaTableAccessor {
@Test @Test
public void testMetaLocationsForRegionReplicas() throws IOException { public void testMetaLocationsForRegionReplicas() throws IOException {
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong()); Random rand = ThreadLocalRandom.current();
ServerName serverName1 = ServerName.valueOf("bar", 60010, random.nextLong());
ServerName serverName100 = ServerName.valueOf("baz", 60010, random.nextLong()); ServerName serverName0 = ServerName.valueOf("foo", 60010, rand.nextLong());
ServerName serverName1 = ServerName.valueOf("bar", 60010, rand.nextLong());
ServerName serverName100 = ServerName.valueOf("baz", 60010, rand.nextLong());
long regionId = EnvironmentEdgeManager.currentTime(); long regionId = EnvironmentEdgeManager.currentTime();
RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
@ -467,9 +469,9 @@ public class TestMetaTableAccessor {
.setReplicaId(100) .setReplicaId(100)
.build(); .build();
long seqNum0 = random.nextLong(); long seqNum0 = rand.nextLong();
long seqNum1 = random.nextLong(); long seqNum1 = rand.nextLong();
long seqNum100 = random.nextLong(); long seqNum100 = rand.nextLong();
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) { try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
MetaTableAccessor.updateRegionLocation(connection, primary, serverName0, seqNum0, MetaTableAccessor.updateRegionLocation(connection, primary, serverName0, seqNum0,
@ -555,7 +557,8 @@ public class TestMetaTableAccessor {
@Test @Test
public void testMetaLocationForRegionReplicasIsAddedAtRegionSplit() throws IOException { public void testMetaLocationForRegionReplicasIsAddedAtRegionSplit() throws IOException {
long regionId = EnvironmentEdgeManager.currentTime(); long regionId = EnvironmentEdgeManager.currentTime();
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong()); ServerName serverName0 = ServerName.valueOf("foo", 60010,
ThreadLocalRandom.current().nextLong());
RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(HConstants.EMPTY_START_ROW) .setStartKey(HConstants.EMPTY_START_ROW)
.setEndKey(HConstants.EMPTY_END_ROW) .setEndKey(HConstants.EMPTY_END_ROW)
@ -595,7 +598,8 @@ public class TestMetaTableAccessor {
@Test @Test
public void testMetaLocationForRegionReplicasIsAddedAtRegionMerge() throws IOException { public void testMetaLocationForRegionReplicasIsAddedAtRegionMerge() throws IOException {
long regionId = EnvironmentEdgeManager.currentTime(); long regionId = EnvironmentEdgeManager.currentTime();
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong()); ServerName serverName0 = ServerName.valueOf("foo", 60010,
ThreadLocalRandom.current().nextLong());
RegionInfo parentA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) RegionInfo parentA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(Bytes.toBytes("a")) .setStartKey(Bytes.toBytes("a"))
@ -882,7 +886,8 @@ public class TestMetaTableAccessor {
@Test @Test
public void testEmptyMetaDaughterLocationDuringSplit() throws IOException { public void testEmptyMetaDaughterLocationDuringSplit() throws IOException {
long regionId = EnvironmentEdgeManager.currentTime(); long regionId = EnvironmentEdgeManager.currentTime();
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong()); ServerName serverName0 = ServerName.valueOf("foo", 60010,
ThreadLocalRandom.current().nextLong());
RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo")) RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo"))
.setStartKey(HConstants.EMPTY_START_ROW) .setStartKey(HConstants.EMPTY_START_ROW)
.setEndKey(HConstants.EMPTY_END_ROW) .setEndKey(HConstants.EMPTY_END_ROW)

View File

@ -30,7 +30,7 @@ import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -643,9 +643,7 @@ public class TestAdmin2 extends TestAdminBase {
@Test @Test
public void testAbortProcedureFail() throws Exception { public void testAbortProcedureFail() throws Exception {
Random randomGenerator = new Random(); long procId = ThreadLocalRandom.current().nextLong();
long procId = randomGenerator.nextLong();
boolean abortResult = ADMIN.abortProcedure(procId, true); boolean abortResult = ADMIN.abortProcedure(procId, true);
assertFalse(abortResult); assertFalse(abortResult);
} }

View File

@ -32,7 +32,6 @@ import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
@ -47,7 +46,6 @@ import org.junit.BeforeClass;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.hbase.thirdparty.io.netty.util.Timeout;
@ -80,7 +78,7 @@ public class TestAsyncBufferMutator {
TEST_UTIL.createTable(TABLE_NAME, CF); TEST_UTIL.createTable(TABLE_NAME, CF);
TEST_UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, CF); TEST_UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, CF);
CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get(); CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
ThreadLocalRandom.current().nextBytes(VALUE); Bytes.random(VALUE);
} }
@AfterClass @AfterClass

View File

@ -197,11 +197,11 @@ public class TestAsyncNonMetaRegionLocator {
assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName, assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, locateType, false).get()); getDefaultRegionLocation(TABLE_NAME, EMPTY_START_ROW, locateType, false).get());
} }
byte[] randKey = new byte[ThreadLocalRandom.current().nextInt(128)]; byte[] key = new byte[ThreadLocalRandom.current().nextInt(128)];
ThreadLocalRandom.current().nextBytes(randKey); Bytes.random(key);
for (RegionLocateType locateType : RegionLocateType.values()) { for (RegionLocateType locateType : RegionLocateType.values()) {
assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName, assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
getDefaultRegionLocation(TABLE_NAME, randKey, locateType, false).get()); getDefaultRegionLocation(TABLE_NAME, key, locateType, false).get());
} }
} }

View File

@ -24,7 +24,8 @@ import static org.junit.Assert.assertTrue;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
@ -116,8 +117,7 @@ public class TestAsyncProcedureAdminApi extends TestAsyncAdminBase {
@Test @Test
public void abortProcedure() throws Exception { public void abortProcedure() throws Exception {
Random randomGenerator = new Random(); long procId = ThreadLocalRandom.current().nextLong();
long procId = randomGenerator.nextLong();
boolean abortResult = admin.abortProcedure(procId, true).get(); boolean abortResult = admin.abortProcedure(procId, true).get();
assertFalse(abortResult); assertFalse(abortResult);
} }

View File

@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -69,7 +68,7 @@ public class TestAsyncTableBatchRetryImmediately {
UTIL.startMiniCluster(1); UTIL.startMiniCluster(1);
Table table = UTIL.createTable(TABLE_NAME, FAMILY); Table table = UTIL.createTable(TABLE_NAME, FAMILY);
UTIL.waitTableAvailable(TABLE_NAME); UTIL.waitTableAvailable(TABLE_NAME);
ThreadLocalRandom.current().nextBytes(VALUE_PREFIX); Bytes.random(VALUE_PREFIX);
for (int i = 0; i < COUNT; i++) { for (int i = 0; i < COUNT; i++) {
table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, table.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL,
Bytes.add(VALUE_PREFIX, Bytes.toBytes(i)))); Bytes.add(VALUE_PREFIX, Bytes.toBytes(i))));

View File

@ -27,11 +27,11 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -146,7 +146,7 @@ public class TestAsyncTableGetMultiThreaded {
return null; return null;
}))); })));
LOG.info("====== Scheduled {} read threads ======", numThreads); LOG.info("====== Scheduled {} read threads ======", numThreads);
Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123)); Collections.shuffle(Arrays.asList(SPLIT_KEYS), ThreadLocalRandom.current());
Admin admin = TEST_UTIL.getAdmin(); Admin admin = TEST_UTIL.getAdmin();
for (byte[] splitPoint : SPLIT_KEYS) { for (byte[] splitPoint : SPLIT_KEYS) {
int oldRegionCount = admin.getRegions(TABLE_NAME).size(); int oldRegionCount = admin.getRegions(TABLE_NAME).size();

View File

@ -92,7 +92,6 @@ public class TestFromClientSide3 {
= new HBaseTestingUtility(); = new HBaseTestingUtility();
private static final int WAITTABLE_MILLIS = 10000; private static final int WAITTABLE_MILLIS = 10000;
private static byte[] FAMILY = Bytes.toBytes("testFamily"); private static byte[] FAMILY = Bytes.toBytes("testFamily");
private static Random random = new Random();
private static int SLAVES = 3; private static int SLAVES = 3;
private static final byte[] ROW = Bytes.toBytes("testRow"); private static final byte[] ROW = Bytes.toBytes("testRow");
private static final byte[] ANOTHERROW = Bytes.toBytes("anotherrow"); private static final byte[] ANOTHERROW = Bytes.toBytes("anotherrow");
@ -144,9 +143,10 @@ public class TestFromClientSide3 {
private void randomCFPuts(Table table, byte[] row, byte[] family, int nPuts) private void randomCFPuts(Table table, byte[] row, byte[] family, int nPuts)
throws Exception { throws Exception {
Put put = new Put(row); Put put = new Put(row);
Random rand = ThreadLocalRandom.current();
for (int i = 0; i < nPuts; i++) { for (int i = 0; i < nPuts; i++) {
byte[] qualifier = Bytes.toBytes(random.nextInt()); byte[] qualifier = Bytes.toBytes(rand.nextInt());
byte[] value = Bytes.toBytes(random.nextInt()); byte[] value = Bytes.toBytes(rand.nextInt());
put.addColumn(family, qualifier, value); put.addColumn(family, qualifier, value);
} }
table.put(put); table.put(put);
@ -286,7 +286,7 @@ public class TestFromClientSide3 {
ClusterConnection connection = (ClusterConnection) TEST_UTIL.getConnection(); ClusterConnection connection = (ClusterConnection) TEST_UTIL.getConnection();
// Create 3 store files. // Create 3 store files.
byte[] row = Bytes.toBytes(random.nextInt()); byte[] row = Bytes.toBytes(ThreadLocalRandom.current().nextInt());
performMultiplePutAndFlush((HBaseAdmin) admin, table, row, FAMILY, 3, 100); performMultiplePutAndFlush((HBaseAdmin) admin, table, row, FAMILY, 3, 100);
try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {

View File

@ -21,7 +21,6 @@ import static junit.framework.TestCase.assertEquals;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellBuilderType;
@ -153,7 +152,7 @@ public class TestMultiRespectsLimits {
// however the block being reference will be larger than MAX_SIZE. // however the block being reference will be larger than MAX_SIZE.
// This should cause the regionserver to try and send a result immediately. // This should cause the regionserver to try and send a result immediately.
byte[] value = new byte[MAX_SIZE - 100]; byte[] value = new byte[MAX_SIZE - 100];
ThreadLocalRandom.current().nextBytes(value); Bytes.random(value);
for (byte[] col:cols) { for (byte[] col:cols) {
Put p = new Put(row); Put p = new Put(row);

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.ipc.RpcServer.MAX_REQUEST_SIZE; import static org.apache.hadoop.hbase.ipc.RpcServer.MAX_REQUEST_SIZE;
import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertThrows;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -33,7 +32,6 @@ import org.junit.BeforeClass;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
@Category({ MediumTests.class, ClientTests.class }) @Category({ MediumTests.class, ClientTests.class })
@ -68,7 +66,7 @@ public class TestRequestTooBigException {
@Test @Test
public void testHbasePutDeleteCell() throws Exception { public void testHbasePutDeleteCell() throws Exception {
byte[] value = new byte[1024]; byte[] value = new byte[1024];
ThreadLocalRandom.current().nextBytes(value); Bytes.random(value);
for (int m = 0; m < 100; m++) { for (int m = 0; m < 100; m++) {
Put p = new Put(Bytes.toBytes("bigrow-" + m)); Put p = new Put(Bytes.toBytes("bigrow-" + m));
// max request is 10K, big request = 100 * 1K // max request is 10K, big request = 100 * 1K

View File

@ -43,8 +43,6 @@ import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* Run tests related to {@link TimestampsFilter} using HBase client APIs. * Run tests related to {@link TimestampsFilter} using HBase client APIs.
@ -58,7 +56,6 @@ public class TestTimestampsFilter {
public static final HBaseClassTestRule CLASS_RULE = public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestTimestampsFilter.class); HBaseClassTestRule.forClass(TestTimestampsFilter.class);
private static final Logger LOG = LoggerFactory.getLogger(TestTimestampsFilter.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@Rule @Rule

View File

@ -28,7 +28,7 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
import java.util.Random; import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Abortable;
@ -97,7 +97,7 @@ public class TestEntityLocks {
admin = getAdmin(); admin = getAdmin();
lockReqArgCaptor = ArgumentCaptor.forClass(LockRequest.class); lockReqArgCaptor = ArgumentCaptor.forClass(LockRequest.class);
lockHeartbeatReqArgCaptor = ArgumentCaptor.forClass(LockHeartbeatRequest.class); lockHeartbeatReqArgCaptor = ArgumentCaptor.forClass(LockHeartbeatRequest.class);
procId = new Random().nextLong(); procId = ThreadLocalRandom.current().nextLong();
} }
private boolean waitLockTimeOut(EntityLock lock, long maxWaitTimeMillis) { private boolean waitLockTimeOut(EntityLock lock, long maxWaitTimeMillis) {

View File

@ -34,6 +34,8 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import javax.management.MBeanAttributeInfo; import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo; import javax.management.MBeanInfo;
import javax.management.MBeanServerConnection; import javax.management.MBeanServerConnection;
@ -99,7 +101,7 @@ public class TestMetaTableMetrics {
UTIL.getConfiguration().set("hbase.coprocessor.region.classes", UTIL.getConfiguration().set("hbase.coprocessor.region.classes",
MetaTableMetrics.class.getName()); MetaTableMetrics.class.getName());
conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, JMXListener.class.getName()); conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, JMXListener.class.getName());
Random rand = new Random(); Random rand = ThreadLocalRandom.current();
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
do { do {
int sign = i % 2 == 0 ? 1 : -1; int sign = i % 2 == 0 ? 1 : -1;

View File

@ -20,8 +20,10 @@ package org.apache.hadoop.hbase.io.compress;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.security.SecureRandom;
import java.util.List; import java.util.List;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -44,7 +46,6 @@ public class HFileTestBase {
protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
protected static final Logger LOG = LoggerFactory.getLogger(HFileTestBase.class); protected static final Logger LOG = LoggerFactory.getLogger(HFileTestBase.class);
protected static final SecureRandom RNG = new SecureRandom();
protected static FileSystem FS; protected static FileSystem FS;
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
@ -105,13 +106,14 @@ public class HFileTestBase {
assertEquals("Did not read back as many KVs as written", i, testKvs.size()); assertEquals("Did not read back as many KVs as written", i, testKvs.size());
// Test random seeks with pread // Test random seeks with pread
Random rand = ThreadLocalRandom.current();
LOG.info("Random seeking with " + fileContext); LOG.info("Random seeking with " + fileContext);
reader = HFile.createReader(FS, path, cacheConf, true, conf); reader = HFile.createReader(FS, path, cacheConf, true, conf);
try { try {
scanner = reader.getScanner(conf, false, true); scanner = reader.getScanner(conf, false, true);
assertTrue("Initial seekTo failed", scanner.seekTo()); assertTrue("Initial seekTo failed", scanner.seekTo());
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size())); KeyValue kv = testKvs.get(rand.nextInt(testKvs.size()));
assertEquals("Unable to find KV as expected: " + kv, 0, scanner.seekTo(kv)); assertEquals("Unable to find KV as expected: " + kv, 0, scanner.seekTo(kv));
} }
} finally { } finally {

View File

@ -25,6 +25,8 @@ import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -252,7 +254,7 @@ public class TestChangingEncoding {
@Test @Test
public void testCrazyRandomChanges() throws Exception { public void testCrazyRandomChanges() throws Exception {
prepareTest("RandomChanges"); prepareTest("RandomChanges");
Random rand = new Random(2934298742974297L); Random rand = ThreadLocalRandom.current();
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
int encodingOrdinal = rand.nextInt(DataBlockEncoding.values().length); int encodingOrdinal = rand.nextInt(DataBlockEncoding.values().length);
DataBlockEncoding encoding = DataBlockEncoding.values()[encodingOrdinal]; DataBlockEncoding encoding = DataBlockEncoding.values()[encodingOrdinal];

View File

@ -30,6 +30,7 @@ import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.ArrayBackedTag;
@ -87,7 +88,6 @@ public class TestDataBlockEncoders {
private final Configuration conf = HBaseConfiguration.create(); private final Configuration conf = HBaseConfiguration.create();
private final RedundantKVGenerator generator = new RedundantKVGenerator(); private final RedundantKVGenerator generator = new RedundantKVGenerator();
private final Random randomizer = new Random(42L);
private final boolean includesMemstoreTS; private final boolean includesMemstoreTS;
private final boolean includesTags; private final boolean includesTags;
private final boolean useOffheapData; private final boolean useOffheapData;
@ -217,13 +217,14 @@ public class TestDataBlockEncoders {
LOG.info("Testing it!"); LOG.info("Testing it!");
// test it! // test it!
// try a few random seeks // try a few random seeks
Random rand = ThreadLocalRandom.current();
for (boolean seekBefore : new boolean[] { false, true }) { for (boolean seekBefore : new boolean[] { false, true }) {
for (int i = 0; i < NUM_RANDOM_SEEKS; ++i) { for (int i = 0; i < NUM_RANDOM_SEEKS; ++i) {
int keyValueId; int keyValueId;
if (!seekBefore) { if (!seekBefore) {
keyValueId = randomizer.nextInt(sampleKv.size()); keyValueId = rand.nextInt(sampleKv.size());
} else { } else {
keyValueId = randomizer.nextInt(sampleKv.size() - 1) + 1; keyValueId = rand.nextInt(sampleKv.size() - 1) + 1;
} }
KeyValue keyValue = sampleKv.get(keyValueId); KeyValue keyValue = sampleKv.get(keyValueId);

View File

@ -30,8 +30,8 @@ import java.util.Arrays;
import java.util.HashSet; import java.util.HashSet;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.MultithreadedTestUtil;
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ChecksumType;
public class CacheTestUtils { public class CacheTestUtils {
@ -282,11 +283,11 @@ public class CacheTestUtils {
public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) { public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) {
HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks]; HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks];
Random rand = new Random(); Random rand = ThreadLocalRandom.current();
HashSet<String> usedStrings = new HashSet<>(); HashSet<String> usedStrings = new HashSet<>();
for (int i = 0; i < numBlocks; i++) { for (int i = 0; i < numBlocks; i++) {
ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize); ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize);
rand.nextBytes(cachedBuffer.array()); Bytes.random(cachedBuffer.array());
cachedBuffer.rewind(); cachedBuffer.rewind();
int onDiskSizeWithoutHeader = blockSize; int onDiskSizeWithoutHeader = blockSize;
int uncompressedSizeWithoutHeader = blockSize; int uncompressedSizeWithoutHeader = blockSize;

View File

@ -39,6 +39,8 @@ import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.Random; import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
@ -280,16 +282,15 @@ public class TestHFile {
StoreFileWriter sfw = StoreFileWriter sfw =
new StoreFileWriter.Builder(conf, fs).withOutputDir(storeFileParentDir) new StoreFileWriter.Builder(conf, fs).withOutputDir(storeFileParentDir)
.withFileContext(meta).build(); .withFileContext(meta).build();
final int rowLen = 32; final int rowLen = 32;
Random RNG = new Random(); Random rand = ThreadLocalRandom.current();
for (int i = 0; i < 1000; ++i) { for (int i = 0; i < 1000; ++i) {
byte[] k = RandomKeyValueUtil.randomOrderedKey(RNG, i); byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i);
byte[] v = RandomKeyValueUtil.randomValue(RNG); byte[] v = RandomKeyValueUtil.randomValue(rand);
int cfLen = RNG.nextInt(k.length - rowLen + 1); int cfLen = rand.nextInt(k.length - rowLen + 1);
KeyValue kv = KeyValue kv =
new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen, new KeyValue(k, 0, rowLen, k, rowLen, cfLen, k, rowLen + cfLen,
k.length - rowLen - cfLen, RNG.nextLong(), generateKeyType(RNG), v, 0, v.length); k.length - rowLen - cfLen, rand.nextLong(), generateKeyType(rand), v, 0, v.length);
sfw.append(kv); sfw.append(kv);
} }

Some files were not shown because too many files have changed in this diff Show More