HBASE-10699 Set capacity on ArrayList where possible and use isEmpty instead of size() == 0
Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
parent
c74cf12925
commit
55a1aa1e73
|
@ -53,7 +53,7 @@ final class StabilityOptions {
|
|||
}
|
||||
|
||||
public static String[][] filterOptions(String[][] options) {
|
||||
List<String[]> optionsList = new ArrayList<String[]>();
|
||||
List<String[]> optionsList = new ArrayList<String[]>(options.length);
|
||||
for (int i = 0; i < options.length; i++) {
|
||||
if (!options[i][0].equalsIgnoreCase(UNSTABLE_OPTION)
|
||||
&& !options[i][0].equalsIgnoreCase(EVOLVING_OPTION)
|
||||
|
|
|
@ -1387,7 +1387,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
|
|||
* @return The list of co-processors classNames
|
||||
*/
|
||||
public List<String> getCoprocessors() {
|
||||
List<String> result = new ArrayList<String>();
|
||||
List<String> result = new ArrayList<String>(this.values.entrySet().size());
|
||||
Matcher keyMatcher;
|
||||
for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
|
||||
keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));
|
||||
|
|
|
@ -1897,7 +1897,7 @@ public class MetaTableAccessor {
|
|||
*/
|
||||
public static void updateReplicationPositions(Connection connection, String peerId,
|
||||
Map<String, Long> positions) throws IOException {
|
||||
List<Put> puts = new ArrayList<>();
|
||||
List<Put> puts = new ArrayList<>(positions.entrySet().size());
|
||||
for (Map.Entry<String, Long> entry : positions.entrySet()) {
|
||||
long value = Math.abs(entry.getValue());
|
||||
Put put = new Put(Bytes.toBytes(entry.getKey()));
|
||||
|
@ -2169,7 +2169,7 @@ public class MetaTableAccessor {
|
|||
Result result;
|
||||
while ((result = scanner.next()) != null) {
|
||||
String key = Bytes.toString(result.getRow());
|
||||
List<Long> list = new ArrayList<>();
|
||||
List<Long> list = new ArrayList<>(result.rawCells().length);
|
||||
for (Cell cell : result.rawCells()) {
|
||||
list.add(Bytes.toLong(cell.getQualifierArray(), cell.getQualifierOffset(),
|
||||
cell.getQualifierLength()));
|
||||
|
|
|
@ -123,7 +123,7 @@ public class Append extends Mutation {
|
|||
byte [] family = CellUtil.cloneFamily(cell);
|
||||
List<Cell> list = this.familyMap.get(family);
|
||||
if (list == null) {
|
||||
list = new ArrayList<Cell>();
|
||||
list = new ArrayList<Cell>(1);
|
||||
}
|
||||
// find where the new entry should be placed in the List
|
||||
list.add(cell);
|
||||
|
|
|
@ -287,8 +287,8 @@ class AsyncProcess {
|
|||
}
|
||||
loc = locs.getDefaultRegionLocation();
|
||||
} catch (IOException ex) {
|
||||
locationErrors = new ArrayList<Exception>();
|
||||
locationErrorRows = new ArrayList<Integer>();
|
||||
locationErrors = new ArrayList<Exception>(1);
|
||||
locationErrorRows = new ArrayList<Integer>(1);
|
||||
LOG.error("Failed to get region location ", ex);
|
||||
// This action failed before creating ars. Retain it, but do not add to submit list.
|
||||
// We will then add it to ars in an already-failed state.
|
||||
|
|
|
@ -463,7 +463,7 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
|
|||
if (loc == null || loc.getServerName() == null) {
|
||||
if (isReplica) {
|
||||
if (unknownReplicaActions == null) {
|
||||
unknownReplicaActions = new ArrayList<Action>();
|
||||
unknownReplicaActions = new ArrayList<Action>(1);
|
||||
}
|
||||
unknownReplicaActions.add(action);
|
||||
} else {
|
||||
|
|
|
@ -172,7 +172,7 @@ public class Delete extends Mutation implements Comparable<Row> {
|
|||
byte [] family = CellUtil.cloneFamily(kv);
|
||||
List<Cell> list = familyMap.get(family);
|
||||
if (list == null) {
|
||||
list = new ArrayList<Cell>();
|
||||
list = new ArrayList<Cell>(1);
|
||||
}
|
||||
list.add(kv);
|
||||
familyMap.put(family, list);
|
||||
|
@ -209,7 +209,7 @@ public class Delete extends Mutation implements Comparable<Row> {
|
|||
}
|
||||
List<Cell> list = familyMap.get(family);
|
||||
if(list == null) {
|
||||
list = new ArrayList<Cell>();
|
||||
list = new ArrayList<Cell>(1);
|
||||
} else if(!list.isEmpty()) {
|
||||
list.clear();
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ public class Delete extends Mutation implements Comparable<Row> {
|
|||
public Delete addFamilyVersion(final byte [] family, final long timestamp) {
|
||||
List<Cell> list = familyMap.get(family);
|
||||
if(list == null) {
|
||||
list = new ArrayList<Cell>();
|
||||
list = new ArrayList<Cell>(1);
|
||||
}
|
||||
list.add(new KeyValue(row, family, null, timestamp,
|
||||
KeyValue.Type.DeleteFamilyVersion));
|
||||
|
@ -262,7 +262,7 @@ public class Delete extends Mutation implements Comparable<Row> {
|
|||
}
|
||||
List<Cell> list = familyMap.get(family);
|
||||
if (list == null) {
|
||||
list = new ArrayList<Cell>();
|
||||
list = new ArrayList<Cell>(1);
|
||||
}
|
||||
list.add(new KeyValue(this.row, family, qualifier, timestamp,
|
||||
KeyValue.Type.DeleteColumn));
|
||||
|
@ -297,7 +297,7 @@ public class Delete extends Mutation implements Comparable<Row> {
|
|||
}
|
||||
List<Cell> list = familyMap.get(family);
|
||||
if(list == null) {
|
||||
list = new ArrayList<Cell>();
|
||||
list = new ArrayList<Cell>(1);
|
||||
}
|
||||
KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.Delete);
|
||||
list.add(kv);
|
||||
|
|
|
@ -400,7 +400,7 @@ public class Get extends Query
|
|||
@Override
|
||||
public Map<String, Object> getFingerprint() {
|
||||
Map<String, Object> map = new HashMap<String, Object>();
|
||||
List<String> families = new ArrayList<String>();
|
||||
List<String> families = new ArrayList<String>(this.familyMap.entrySet().size());
|
||||
map.put("families", families);
|
||||
for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
|
||||
this.familyMap.entrySet()) {
|
||||
|
@ -428,7 +428,7 @@ public class Get extends Query
|
|||
map.put("row", Bytes.toStringBinary(this.row));
|
||||
map.put("maxVersions", this.maxVersions);
|
||||
map.put("cacheBlocks", this.cacheBlocks);
|
||||
List<Long> timeRange = new ArrayList<Long>();
|
||||
List<Long> timeRange = new ArrayList<Long>(2);
|
||||
timeRange.add(this.tr.getMin());
|
||||
timeRange.add(this.tr.getMax());
|
||||
map.put("timeRange", timeRange);
|
||||
|
|
|
@ -3865,7 +3865,7 @@ public class HBaseAdmin implements Admin {
|
|||
|
||||
@Override
|
||||
public void drainRegionServers(List<ServerName> servers) throws IOException {
|
||||
final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>();
|
||||
final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>(servers.size());
|
||||
for (ServerName server : servers) {
|
||||
// Parse to ServerName to do simple validation.
|
||||
ServerName.parseServerName(server.toString());
|
||||
|
@ -3902,7 +3902,7 @@ public class HBaseAdmin implements Admin {
|
|||
|
||||
@Override
|
||||
public void removeDrainFromRegionServers(List<ServerName> servers) throws IOException {
|
||||
final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>();
|
||||
final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>(servers.size());
|
||||
for (ServerName server : servers) {
|
||||
pbServers.add(ProtobufUtil.toServerName(server));
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
|
|||
@Override
|
||||
public Map<String, Object> getFingerprint() {
|
||||
Map<String, Object> map = new HashMap<String, Object>();
|
||||
List<String> families = new ArrayList<String>();
|
||||
List<String> families = new ArrayList<String>(this.familyMap.entrySet().size());
|
||||
// ideally, we would also include table information, but that information
|
||||
// is not stored in each Operation instance.
|
||||
map.put("families", families);
|
||||
|
@ -227,7 +227,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
|
|||
stringMap.put("vlen", c.getValueLength());
|
||||
List<Tag> tags = CellUtil.getTags(c);
|
||||
if (tags != null) {
|
||||
List<String> tagsString = new ArrayList<String>();
|
||||
List<String> tagsString = new ArrayList<String>(tags.size());
|
||||
for (Tag t : tags) {
|
||||
tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(TagUtil.cloneValue(t)));
|
||||
}
|
||||
|
|
|
@ -908,7 +908,7 @@ public class Scan extends Query {
|
|||
map.put("maxResultSize", this.maxResultSize);
|
||||
map.put("cacheBlocks", this.cacheBlocks);
|
||||
map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
|
||||
List<Long> timeRange = new ArrayList<Long>();
|
||||
List<Long> timeRange = new ArrayList<Long>(2);
|
||||
timeRange.add(this.tr.getMin());
|
||||
timeRange.add(this.tr.getMax());
|
||||
map.put("timeRange", timeRange);
|
||||
|
|
|
@ -68,7 +68,7 @@ public final class ReplicationSerDeHelper {
|
|||
if (tableCfs == null) {
|
||||
return null;
|
||||
}
|
||||
List<ReplicationProtos.TableCF> tableCFList = new ArrayList<>();
|
||||
List<ReplicationProtos.TableCF> tableCFList = new ArrayList<>(tableCfs.entrySet().size());
|
||||
ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder();
|
||||
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
|
||||
tableCFBuilder.clear();
|
||||
|
@ -100,10 +100,11 @@ public final class ReplicationSerDeHelper {
|
|||
if (tableCFsConfig == null || tableCFsConfig.trim().length() == 0) {
|
||||
return null;
|
||||
}
|
||||
List<ReplicationProtos.TableCF> tableCFList = new ArrayList<>();
|
||||
ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder();
|
||||
|
||||
ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder();
|
||||
String[] tables = tableCFsConfig.split(";");
|
||||
List<ReplicationProtos.TableCF> tableCFList = new ArrayList<>(tables.length);
|
||||
|
||||
for (String tab : tables) {
|
||||
// 1 ignore empty table config
|
||||
tab = tab.trim();
|
||||
|
|
|
@ -177,7 +177,7 @@ public abstract class CompareFilter extends FilterBase {
|
|||
" can only be used with EQUAL and NOT_EQUAL");
|
||||
}
|
||||
}
|
||||
ArrayList<Object> arguments = new ArrayList<Object>();
|
||||
ArrayList<Object> arguments = new ArrayList<Object>(2);
|
||||
arguments.add(compareOp);
|
||||
arguments.add(comparator);
|
||||
return arguments;
|
||||
|
|
|
@ -157,7 +157,7 @@ public class TimestampsFilter extends FilterBase {
|
|||
}
|
||||
|
||||
public static Filter createFilterFromArguments(ArrayList<byte []> filterArguments) {
|
||||
ArrayList<Long> timestamps = new ArrayList<Long>();
|
||||
ArrayList<Long> timestamps = new ArrayList<Long>(filterArguments.size());
|
||||
for (int i = 0; i<filterArguments.size(); i++) {
|
||||
long timestamp = ParseFilter.convertByteArrayToLong(filterArguments.get(i));
|
||||
timestamps.add(timestamp);
|
||||
|
|
|
@ -128,7 +128,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
|
|||
|
||||
ZKUtil.createWithParents(this.zookeeper, this.peersZNode);
|
||||
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(2);
|
||||
ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id),
|
||||
ReplicationSerDeHelper.toByteArray(peerConfig));
|
||||
// b/w PeerWatcher and ReplicationZookeeper#add method to create the
|
||||
|
|
|
@ -327,8 +327,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
|
|||
if (debugEnabled) {
|
||||
LOG.debug("Adding hfile references " + pairs + " in queue " + peerZnode);
|
||||
}
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
|
||||
|
||||
int size = pairs.size();
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(size);
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
listOfOps.add(ZKUtilOp.createAndFailSilent(
|
||||
ZKUtil.joinZNode(peerZnode, pairs.get(i).getSecond().getName()),
|
||||
|
@ -352,8 +354,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
|
|||
if (debugEnabled) {
|
||||
LOG.debug("Removing hfile references " + files + " from queue " + peerZnode);
|
||||
}
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
|
||||
|
||||
int size = files.size();
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(size);
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i))));
|
||||
}
|
||||
|
|
|
@ -1875,7 +1875,7 @@ public final class ProtobufUtil {
|
|||
*/
|
||||
static List<HRegionInfo> getRegionInfos(final GetOnlineRegionResponse proto) {
|
||||
if (proto == null) return null;
|
||||
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
|
||||
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>(proto.getRegionInfoList().size());
|
||||
for (RegionInfo regionInfo: proto.getRegionInfoList()) {
|
||||
regionInfos.add(HRegionInfo.convert(regionInfo));
|
||||
}
|
||||
|
@ -2691,7 +2691,7 @@ public final class ProtobufUtil {
|
|||
|
||||
public static List<ReplicationLoadSource> toReplicationLoadSourceList(
|
||||
List<ClusterStatusProtos.ReplicationLoadSource> clsList) {
|
||||
ArrayList<ReplicationLoadSource> rlsList = new ArrayList<ReplicationLoadSource>();
|
||||
ArrayList<ReplicationLoadSource> rlsList = new ArrayList<ReplicationLoadSource>(clsList.size());
|
||||
for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) {
|
||||
rlsList.add(toReplicationLoadSource(cls));
|
||||
}
|
||||
|
|
|
@ -232,7 +232,7 @@ public final class ResponseConverter {
|
|||
public static List<RegionOpeningState> getRegionOpeningStateList(
|
||||
final OpenRegionResponse proto) {
|
||||
if (proto == null) return null;
|
||||
List<RegionOpeningState> regionOpeningStates = new ArrayList<RegionOpeningState>();
|
||||
List<RegionOpeningState> regionOpeningStates = new ArrayList<RegionOpeningState>(proto.getOpeningStateCount());
|
||||
for (int i = 0; i < proto.getOpeningStateCount(); i++) {
|
||||
regionOpeningStates.add(RegionOpeningState.valueOf(
|
||||
proto.getOpeningState(i).name()));
|
||||
|
|
|
@ -68,7 +68,7 @@ public class Writables {
|
|||
* @throws IOException e
|
||||
*/
|
||||
public static byte [] getBytes(final Writable... ws) throws IOException {
|
||||
List<byte []> bytes = new ArrayList<byte []>();
|
||||
List<byte []> bytes = new ArrayList<byte []>(ws.length);
|
||||
int size = 0;
|
||||
for (Writable w: ws) {
|
||||
byte [] b = getBytes(w);
|
||||
|
|
|
@ -106,7 +106,7 @@ public class MetaTableLocator {
|
|||
public List<Pair<HRegionInfo, ServerName>> getMetaRegionsAndLocations(ZooKeeperWatcher zkw,
|
||||
int replicaId) {
|
||||
ServerName serverName = getMetaRegionLocation(zkw, replicaId);
|
||||
List<Pair<HRegionInfo, ServerName>> list = new ArrayList<Pair<HRegionInfo, ServerName>>();
|
||||
List<Pair<HRegionInfo, ServerName>> list = new ArrayList<Pair<HRegionInfo, ServerName>>(1);
|
||||
list.add(new Pair<HRegionInfo, ServerName>(RegionReplicaUtil.getRegionInfoForReplica(
|
||||
HRegionInfo.FIRST_META_REGIONINFO, replicaId), serverName));
|
||||
return list;
|
||||
|
|
|
@ -682,7 +682,7 @@ public class TestAsyncProcess {
|
|||
ClusterConnection hc = createHConnection();
|
||||
MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(1);
|
||||
puts.add(createPut(1, true));
|
||||
|
||||
ap.submit(null, DUMMY_TABLE, puts, false, null, false);
|
||||
|
@ -701,7 +701,7 @@ public class TestAsyncProcess {
|
|||
};
|
||||
MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(1);
|
||||
puts.add(createPut(1, true));
|
||||
|
||||
final AsyncRequestFuture ars = ap.submit(null, DUMMY_TABLE, puts, false, cb, false);
|
||||
|
@ -718,7 +718,7 @@ public class TestAsyncProcess {
|
|||
SimpleRequestController.class.getName());
|
||||
MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
|
||||
SimpleRequestController controller = (SimpleRequestController) ap.requestController;
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(1);
|
||||
puts.add(createPut(1, true));
|
||||
|
||||
for (int i = 0; i != controller.maxConcurrentTasksPerRegion; ++i) {
|
||||
|
@ -747,7 +747,7 @@ public class TestAsyncProcess {
|
|||
SimpleRequestController controller = (SimpleRequestController) ap.requestController;
|
||||
controller.taskCounterPerServer.put(sn2, new AtomicInteger(controller.maxConcurrentTasksPerServer));
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(4);
|
||||
puts.add(createPut(1, true));
|
||||
puts.add(createPut(3, true)); // <== this one won't be taken, the rs is busy
|
||||
puts.add(createPut(1, true)); // <== this one will make it, the region is already in
|
||||
|
@ -769,7 +769,7 @@ public class TestAsyncProcess {
|
|||
public void testFail() throws Exception {
|
||||
MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(1);
|
||||
Put p = createPut(1, false);
|
||||
puts.add(p);
|
||||
|
||||
|
@ -817,7 +817,7 @@ public class TestAsyncProcess {
|
|||
}
|
||||
};
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(1);
|
||||
Put p = createPut(1, true);
|
||||
puts.add(p);
|
||||
|
||||
|
@ -843,7 +843,7 @@ public class TestAsyncProcess {
|
|||
public void testFailAndSuccess() throws Exception {
|
||||
MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(3);
|
||||
puts.add(createPut(1, false));
|
||||
puts.add(createPut(1, true));
|
||||
puts.add(createPut(1, true));
|
||||
|
@ -870,7 +870,7 @@ public class TestAsyncProcess {
|
|||
public void testFlush() throws Exception {
|
||||
MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(3);
|
||||
puts.add(createPut(1, false));
|
||||
puts.add(createPut(1, true));
|
||||
puts.add(createPut(1, true));
|
||||
|
@ -955,7 +955,7 @@ public class TestAsyncProcess {
|
|||
}
|
||||
};
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(1);
|
||||
puts.add(createPut(1, true));
|
||||
|
||||
t.start();
|
||||
|
@ -1171,7 +1171,7 @@ public class TestAsyncProcess {
|
|||
HTable ht = new HTable(conn, mutator);
|
||||
ht.multiAp = new MyAsyncProcess(conn, CONF, false);
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(7);
|
||||
puts.add(createPut(1, true));
|
||||
puts.add(createPut(1, true));
|
||||
puts.add(createPut(1, true));
|
||||
|
@ -1517,7 +1517,7 @@ public class TestAsyncProcess {
|
|||
}
|
||||
|
||||
private static List<Get> makeTimelineGets(byte[]... rows) {
|
||||
List<Get> result = new ArrayList<Get>();
|
||||
List<Get> result = new ArrayList<Get>(rows.length);
|
||||
for (byte[] row : rows) {
|
||||
Get get = new Get(row);
|
||||
get.setConsistency(Consistency.TIMELINE);
|
||||
|
@ -1611,7 +1611,7 @@ public class TestAsyncProcess {
|
|||
new LinkedBlockingQueue<Runnable>(200));
|
||||
AsyncProcess ap = new AsyncProcessForThrowableCheck(hc, CONF);
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(1);
|
||||
puts.add(createPut(1, true));
|
||||
AsyncProcessTask task = AsyncProcessTask.newBuilder()
|
||||
.setPool(myPool)
|
||||
|
|
|
@ -48,7 +48,7 @@ public class TestKeyOnlyFilter {
|
|||
|
||||
@Parameters
|
||||
public static Collection<Object[]> parameters() {
|
||||
List<Object[]> paramList = new ArrayList<Object[]>();
|
||||
List<Object[]> paramList = new ArrayList<Object[]>(2);
|
||||
{
|
||||
paramList.add(new Object[] { false });
|
||||
paramList.add(new Object[] { true });
|
||||
|
|
|
@ -39,8 +39,9 @@ public class TestHBaseRpcControllerImpl {
|
|||
|
||||
@Test
|
||||
public void testListOfCellScannerables() throws IOException {
|
||||
List<CellScannable> cells = new ArrayList<CellScannable>();
|
||||
final int count = 10;
|
||||
List<CellScannable> cells = new ArrayList<CellScannable>(count);
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
cells.add(createCell(i));
|
||||
}
|
||||
|
|
|
@ -348,7 +348,7 @@ public class ChoreService implements ChoreServicer {
|
|||
}
|
||||
|
||||
private void cancelAllChores(final boolean mayInterruptIfRunning) {
|
||||
ArrayList<ScheduledChore> choresToCancel = new ArrayList<ScheduledChore>();
|
||||
ArrayList<ScheduledChore> choresToCancel = new ArrayList<ScheduledChore>(scheduledChores.keySet().size());
|
||||
// Build list of chores to cancel so we can iterate through a set that won't change
|
||||
// as chores are cancelled. If we tried to cancel each chore while iterating through
|
||||
// keySet the results would be undefined because the keySet would be changing
|
||||
|
|
|
@ -1184,7 +1184,7 @@ public class KeyValue implements ExtendedCell {
|
|||
stringMap.put("vlen", getValueLength());
|
||||
List<Tag> tags = getTags();
|
||||
if (tags != null) {
|
||||
List<String> tagsString = new ArrayList<String>();
|
||||
List<String> tagsString = new ArrayList<String>(tags.size());
|
||||
for (Tag t : tags) {
|
||||
tagsString.add(t.toString());
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
|
|||
}
|
||||
|
||||
CommandLine cmd;
|
||||
List<String> argsList = new ArrayList<>();
|
||||
List<String> argsList = new ArrayList<>(args.length);
|
||||
for (String arg : args) {
|
||||
argsList.add(arg);
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ public class ResourceChecker {
|
|||
* - logs them.
|
||||
*/
|
||||
public void start() {
|
||||
if (ras.size() == 0) {
|
||||
if (ras.isEmpty()) {
|
||||
LOG.info("No resource analyzer");
|
||||
return;
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ public class ResourceChecker {
|
|||
* - logs them.
|
||||
*/
|
||||
public void end() {
|
||||
if (ras.size() == 0) {
|
||||
if (ras.isEmpty()) {
|
||||
LOG.info("No resource analyzer");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -133,13 +133,13 @@ public class ClassLoaderTestHelper {
|
|||
|
||||
// compile it by JavaCompiler
|
||||
JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
|
||||
ArrayList<String> srcFileNames = new ArrayList<String>();
|
||||
ArrayList<String> srcFileNames = new ArrayList<String>(1);
|
||||
srcFileNames.add(sourceCodeFile.toString());
|
||||
StandardJavaFileManager fm = compiler.getStandardFileManager(null, null,
|
||||
null);
|
||||
Iterable<? extends JavaFileObject> cu =
|
||||
fm.getJavaFileObjects(sourceCodeFile);
|
||||
List<String> options = new ArrayList<String>();
|
||||
List<String> options = new ArrayList<String>(2);
|
||||
options.add("-classpath");
|
||||
// only add hbase classes to classpath. This is a little bit tricky: assume
|
||||
// the classpath is {hbaseSrc}/target/classes.
|
||||
|
|
|
@ -498,7 +498,7 @@ public class TestBytes extends TestCase {
|
|||
}
|
||||
|
||||
public void testToFromHex() {
|
||||
List<String> testStrings = new ArrayList<String>();
|
||||
List<String> testStrings = new ArrayList<String>(8);
|
||||
testStrings.addAll(Arrays.asList(new String[] {
|
||||
"",
|
||||
"00",
|
||||
|
@ -517,7 +517,7 @@ public class TestBytes extends TestCase {
|
|||
Assert.assertTrue(testString.equalsIgnoreCase(result));
|
||||
}
|
||||
|
||||
List<byte[]> testByteData = new ArrayList<byte[]>();
|
||||
List<byte[]> testByteData = new ArrayList<byte[]>(5);
|
||||
testByteData.addAll(Arrays.asList(new byte[][] {
|
||||
new byte[0],
|
||||
new byte[1],
|
||||
|
|
|
@ -587,7 +587,7 @@ public class AggregationClient implements Closeable {
|
|||
S sumVal = null, sumSqVal = null;
|
||||
|
||||
public synchronized Pair<List<S>, Long> getStdParams() {
|
||||
List<S> l = new ArrayList<S>();
|
||||
List<S> l = new ArrayList<S>(2);
|
||||
l.add(sumVal);
|
||||
l.add(sumSqVal);
|
||||
Pair<List<S>, Long> p = new Pair<List<S>, Long>(l, rowCountVal);
|
||||
|
@ -704,7 +704,7 @@ public class AggregationClient implements Closeable {
|
|||
S sumVal = null, sumWeights = null;
|
||||
|
||||
public synchronized Pair<NavigableMap<byte[], List<S>>, List<S>> getMedianParams() {
|
||||
List<S> l = new ArrayList<S>();
|
||||
List<S> l = new ArrayList<S>(2);
|
||||
l.add(sumVal);
|
||||
l.add(sumWeights);
|
||||
Pair<NavigableMap<byte[], List<S>>, List<S>> p =
|
||||
|
|
|
@ -353,7 +353,7 @@ public class TestRowProcessorEndpoint {
|
|||
Scan scan = new Scan(row, row);
|
||||
scan.addColumn(FAM, COUNTER);
|
||||
doScan(region, scan, kvs);
|
||||
counter = kvs.size() == 0 ? 0 :
|
||||
counter = kvs.isEmpty() ? 0 :
|
||||
Bytes.toInt(CellUtil.cloneValue(kvs.iterator().next()));
|
||||
|
||||
// Assert counter value
|
||||
|
@ -497,7 +497,7 @@ public class TestRowProcessorEndpoint {
|
|||
|
||||
@Override
|
||||
public Collection<byte[]> getRowsToLock() {
|
||||
List<byte[]> rows = new ArrayList<byte[]>();
|
||||
List<byte[]> rows = new ArrayList<byte[]>(2);
|
||||
rows.add(row1);
|
||||
rows.add(row2);
|
||||
return rows;
|
||||
|
@ -538,7 +538,7 @@ public class TestRowProcessorEndpoint {
|
|||
swapped = !swapped;
|
||||
|
||||
// Add and delete keyvalues
|
||||
List<List<Cell>> kvs = new ArrayList<List<Cell>>();
|
||||
List<List<Cell>> kvs = new ArrayList<List<Cell>>(2);
|
||||
kvs.add(kvs1);
|
||||
kvs.add(kvs2);
|
||||
byte[][] rows = new byte[][]{row1, row2};
|
||||
|
|
|
@ -133,7 +133,7 @@ public class SecureBulkLoadEndpointClient {
|
|||
}
|
||||
|
||||
List<ClientProtos.BulkLoadHFileRequest.FamilyPath> protoFamilyPaths =
|
||||
new ArrayList<ClientProtos.BulkLoadHFileRequest.FamilyPath>();
|
||||
new ArrayList<ClientProtos.BulkLoadHFileRequest.FamilyPath>(familyPaths.size());
|
||||
for(Pair<byte[], String> el: familyPaths) {
|
||||
protoFamilyPaths.add(ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder()
|
||||
.setFamily(ByteStringer.wrap(el.getFirst()))
|
||||
|
|
|
@ -154,7 +154,7 @@ public class DemoClient {
|
|||
//
|
||||
// Create the demo table with two column families, entry: and unused:
|
||||
//
|
||||
ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
|
||||
ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>(2);
|
||||
ColumnDescriptor col;
|
||||
col = new ColumnDescriptor();
|
||||
col.name = ByteBuffer.wrap(bytes("entry:"));
|
||||
|
@ -194,7 +194,7 @@ public class DemoClient {
|
|||
|
||||
ArrayList<Mutation> mutations;
|
||||
// non-utf8 is fine for data
|
||||
mutations = new ArrayList<Mutation>();
|
||||
mutations = new ArrayList<Mutation>(1);
|
||||
mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")),
|
||||
ByteBuffer.wrap(invalid), writeToWal));
|
||||
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("foo")),
|
||||
|
@ -202,13 +202,13 @@ public class DemoClient {
|
|||
|
||||
|
||||
// this row name is valid utf8
|
||||
mutations = new ArrayList<Mutation>();
|
||||
mutations = new ArrayList<Mutation>(1);
|
||||
mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(valid), writeToWal));
|
||||
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes);
|
||||
|
||||
// non-utf8 is now allowed in row names because HBase stores values as binary
|
||||
|
||||
mutations = new ArrayList<Mutation>();
|
||||
mutations = new ArrayList<Mutation>(1);
|
||||
mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal));
|
||||
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(invalid), mutations, dummyAttributes);
|
||||
|
||||
|
@ -238,7 +238,7 @@ public class DemoClient {
|
|||
nf.setGroupingUsed(false);
|
||||
byte[] row = bytes(nf.format(i));
|
||||
|
||||
mutations = new ArrayList<Mutation>();
|
||||
mutations = new ArrayList<Mutation>(1);
|
||||
mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("unused:")), ByteBuffer.wrap(bytes("DELETE_ME")), writeToWal));
|
||||
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes);
|
||||
printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes));
|
||||
|
@ -251,14 +251,14 @@ public class DemoClient {
|
|||
// no-op
|
||||
}
|
||||
|
||||
mutations = new ArrayList<Mutation>();
|
||||
mutations = new ArrayList<Mutation>(2);
|
||||
mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), ByteBuffer.wrap(bytes("0")), writeToWal));
|
||||
mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(bytes("FOO")), writeToWal));
|
||||
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes);
|
||||
printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes));
|
||||
|
||||
Mutation m;
|
||||
mutations = new ArrayList<Mutation>();
|
||||
mutations = new ArrayList<Mutation>(2);
|
||||
m = new Mutation();
|
||||
m.column = ByteBuffer.wrap(bytes("entry:foo"));
|
||||
m.isDelete = true;
|
||||
|
|
|
@ -151,7 +151,7 @@ public class HttpDoAsClient {
|
|||
//
|
||||
// Create the demo table with two column families, entry: and unused:
|
||||
//
|
||||
ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
|
||||
ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>(2);
|
||||
ColumnDescriptor col;
|
||||
col = new ColumnDescriptor();
|
||||
col.name = ByteBuffer.wrap(bytes("entry:"));
|
||||
|
|
|
@ -126,7 +126,7 @@ public class DemoClient {
|
|||
columnValue.setFamily("family1".getBytes());
|
||||
columnValue.setQualifier("qualifier1".getBytes());
|
||||
columnValue.setValue("value1".getBytes());
|
||||
List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
|
||||
List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
|
||||
columnValues.add(columnValue);
|
||||
put.setColumnValues(columnValues);
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ public class MetricSampleQuantiles {
|
|||
|
||||
// Base case: no samples
|
||||
int start = 0;
|
||||
if (samples.size() == 0) {
|
||||
if (samples.isEmpty()) {
|
||||
SampleItem newItem = new SampleItem(buffer[0], 1, 0);
|
||||
samples.add(newItem);
|
||||
start++;
|
||||
|
@ -203,7 +203,7 @@ public class MetricSampleQuantiles {
|
|||
* @return Estimated value at that quantile.
|
||||
*/
|
||||
private long query(double quantile) throws IOException {
|
||||
if (samples.size() == 0) {
|
||||
if (samples.isEmpty()) {
|
||||
throw new IOException("No samples present");
|
||||
}
|
||||
|
||||
|
|
|
@ -924,7 +924,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
String numThreadKey = String.format(NUM_THREADS_KEY, this.getClass().getSimpleName());
|
||||
numThreads = util.getConfiguration().getInt(numThreadKey, DEFAULT_NUM_THREADS);
|
||||
|
||||
ArrayList<Worker> workers = new ArrayList<>();
|
||||
ArrayList<Worker> workers = new ArrayList<>(numThreads);
|
||||
for (int i = 0; i < numThreads; i++) {
|
||||
checkException(workers);
|
||||
Worker worker = new Worker();
|
||||
|
|
|
@ -225,7 +225,7 @@ public class IntegrationTestIngest extends IntegrationTestBase {
|
|||
|
||||
protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey,
|
||||
long numKeys) {
|
||||
List<String> args = new ArrayList<String>();
|
||||
List<String> args = new ArrayList<String>(11);
|
||||
args.add("-tn");
|
||||
args.add(getTablename().getNameAsString());
|
||||
args.add("-families");
|
||||
|
|
|
@ -45,28 +45,28 @@ public class IntegrationTestIngestWithVisibilityLabels extends IntegrationTestIn
|
|||
private static final List<List<String>> AUTHS = new ArrayList<List<String>>();
|
||||
|
||||
static {
|
||||
ArrayList<String> tmp = new ArrayList<String>();
|
||||
ArrayList<String> tmp = new ArrayList<String>(2);
|
||||
tmp.add("secret");
|
||||
tmp.add("confidential");
|
||||
AUTHS.add(tmp);
|
||||
tmp = new ArrayList<String>();
|
||||
tmp = new ArrayList<String>(1);
|
||||
tmp.add("topsecret");
|
||||
AUTHS.add(tmp);
|
||||
tmp = new ArrayList<String>();
|
||||
tmp = new ArrayList<String>(2);
|
||||
tmp.add("confidential");
|
||||
tmp.add("private");
|
||||
AUTHS.add(tmp);
|
||||
tmp = new ArrayList<String>();
|
||||
tmp = new ArrayList<String>(1);
|
||||
tmp.add("public");
|
||||
AUTHS.add(tmp);
|
||||
tmp = new ArrayList<String>();
|
||||
tmp = new ArrayList<String>(2);
|
||||
tmp.add("topsecret");
|
||||
tmp.add("private");
|
||||
AUTHS.add(tmp);
|
||||
tmp = new ArrayList<String>();
|
||||
tmp = new ArrayList<String>(1);
|
||||
tmp.add("confidential");
|
||||
AUTHS.add(tmp);
|
||||
tmp = new ArrayList<String>();
|
||||
tmp = new ArrayList<String>(2);
|
||||
tmp.add("topsecret");
|
||||
tmp.add("private");
|
||||
AUTHS.add(tmp);
|
||||
|
|
|
@ -392,7 +392,7 @@ public class IntegrationTestRpcClient {
|
|||
cluster.startServer();
|
||||
}
|
||||
|
||||
ArrayList<SimpleClient> clients = new ArrayList<>();
|
||||
ArrayList<SimpleClient> clients = new ArrayList<>(30);
|
||||
|
||||
// all threads should share the same rpc client
|
||||
AbstractRpcClient<?> rpcClient = createRpcClient(conf, isSyncClient);
|
||||
|
|
|
@ -1181,7 +1181,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
|
|||
// useless for debugging.
|
||||
context.getCounter("undef", keyString).increment(1);
|
||||
}
|
||||
} else if (defCount > 0 && refs.size() == 0) {
|
||||
} else if (defCount > 0 && refs.isEmpty()) {
|
||||
// node is defined but not referenced
|
||||
context.write(key, UNREF);
|
||||
context.getCounter(Counts.UNREFERENCED).increment(1);
|
||||
|
|
|
@ -43,7 +43,7 @@ public class TestRowDataExerciseFInts extends BaseTestRowData{
|
|||
|
||||
static List<ByteRange> rows;
|
||||
static{
|
||||
List<String> rowStrings = new ArrayList<String>();
|
||||
List<String> rowStrings = new ArrayList<String>(16);
|
||||
rowStrings.add("com.edsBlog/directoryAa/pageAaa");
|
||||
rowStrings.add("com.edsBlog/directoryAa/pageBbb");
|
||||
rowStrings.add("com.edsBlog/directoryAa/pageCcc");
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TestRowDataTrivialWithTags extends BaseTestRowData{
|
|||
|
||||
static List<KeyValue> d = Lists.newArrayList();
|
||||
static {
|
||||
List<Tag> tagList = new ArrayList<Tag>();
|
||||
List<Tag> tagList = new ArrayList<Tag>(2);
|
||||
Tag t = new ArrayBackedTag((byte) 1, "visisbility");
|
||||
tagList.add(t);
|
||||
t = new ArrayBackedTag((byte) 2, "ACL");
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestRowDataUrls extends BaseTestRowData{
|
|||
|
||||
static List<ByteRange> rows;
|
||||
static{
|
||||
List<String> rowStrings = new ArrayList<String>();
|
||||
List<String> rowStrings = new ArrayList<String>(16);
|
||||
rowStrings.add("com.edsBlog/directoryAa/pageAaa");
|
||||
rowStrings.add("com.edsBlog/directoryAa/pageBbb");
|
||||
rowStrings.add("com.edsBlog/directoryAa/pageCcc");
|
||||
|
|
|
@ -27,7 +27,7 @@ public class TestTimestampDataBasic implements TestTimestampData {
|
|||
|
||||
@Override
|
||||
public List<Long> getInputs() {
|
||||
List<Long> d = new ArrayList<Long>();
|
||||
List<Long> d = new ArrayList<Long>(5);
|
||||
d.add(5L);
|
||||
d.add(3L);
|
||||
d.add(0L);
|
||||
|
@ -43,7 +43,7 @@ public class TestTimestampDataBasic implements TestTimestampData {
|
|||
|
||||
@Override
|
||||
public List<Long> getOutputs() {
|
||||
List<Long> d = new ArrayList<Long>();
|
||||
List<Long> d = new ArrayList<Long>(4);
|
||||
d.add(0L);
|
||||
d.add(1L);
|
||||
d.add(3L);
|
||||
|
|
|
@ -29,7 +29,7 @@ public class TestTimestampDataNumbers implements TestTimestampData {
|
|||
|
||||
@Override
|
||||
public List<Long> getInputs() {
|
||||
List<Long> d = new ArrayList<Long>();
|
||||
List<Long> d = new ArrayList<Long>(5);
|
||||
d.add(5L << shift);
|
||||
d.add(3L << shift);
|
||||
d.add(7L << shift);
|
||||
|
@ -45,7 +45,7 @@ public class TestTimestampDataNumbers implements TestTimestampData {
|
|||
|
||||
@Override
|
||||
public List<Long> getOutputs() {
|
||||
List<Long> d = new ArrayList<Long>();
|
||||
List<Long> d = new ArrayList<Long>(4);
|
||||
d.add(1L << shift);
|
||||
d.add(3L << shift);
|
||||
d.add(5L << shift);
|
||||
|
|
|
@ -29,7 +29,7 @@ public class TestTimestampDataRepeats implements TestTimestampData {
|
|||
|
||||
@Override
|
||||
public List<Long> getInputs() {
|
||||
List<Long> d = new ArrayList<Long>();
|
||||
List<Long> d = new ArrayList<Long>(5);
|
||||
d.add(t);
|
||||
d.add(t);
|
||||
d.add(t);
|
||||
|
|
|
@ -163,7 +163,7 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool {
|
|||
files.add(new Path(cmd.getOptionValue("f")));
|
||||
}
|
||||
|
||||
if (files.size() == 0 || cmd.hasOption("h")) {
|
||||
if (files.isEmpty() || cmd.hasOption("h")) {
|
||||
HelpFormatter formatter = new HelpFormatter();
|
||||
formatter.printHelp("ProcedureWALPrettyPrinter ", options, true);
|
||||
return(-1);
|
||||
|
|
|
@ -105,7 +105,7 @@ public class MultiRowResource extends ResourceBase implements Constants {
|
|||
}
|
||||
}
|
||||
|
||||
if (model.getRows().size() == 0) {
|
||||
if (model.getRows().isEmpty()) {
|
||||
//If no rows found.
|
||||
servlet.getMetrics().incrementFailedGetRequests(1);
|
||||
return Response.status(Response.Status.NOT_FOUND)
|
||||
|
|
|
@ -175,7 +175,7 @@ public class RemoteHTable implements Table {
|
|||
protected Result[] buildResultFromModel(final CellSetModel model) {
|
||||
List<Result> results = new ArrayList<Result>();
|
||||
for (RowModel row: model.getRows()) {
|
||||
List<Cell> kvs = new ArrayList<Cell>();
|
||||
List<Cell> kvs = new ArrayList<Cell>(row.getCells().size());
|
||||
for (CellModel cell: row.getCells()) {
|
||||
byte[][] split = KeyValue.parseColumn(cell.getColumn());
|
||||
byte[] column = split[0];
|
||||
|
|
|
@ -87,7 +87,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan
|
|||
NamespaceDescriptor nd = admin.getNamespaceDescriptor(namespaceName);
|
||||
|
||||
// For properly formed JSON, if no properties, field has to be null (not just no elements).
|
||||
if(nd.getConfiguration().size() == 0){ return; }
|
||||
if(nd.getConfiguration().isEmpty()){ return; }
|
||||
|
||||
properties = new HashMap<String,String>();
|
||||
properties.putAll(nd.getConfiguration());
|
||||
|
|
|
@ -66,7 +66,7 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler {
|
|||
*/
|
||||
public NamespacesModel(Admin admin) throws IOException {
|
||||
NamespaceDescriptor[] nds = admin.listNamespaceDescriptors();
|
||||
namespaces = new ArrayList<String>();
|
||||
namespaces = new ArrayList<String>(nds.length);
|
||||
for (NamespaceDescriptor nd : nds) {
|
||||
namespaces.add(nd.getName());
|
||||
}
|
||||
|
|
|
@ -384,7 +384,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
|
|||
filter = new FamilyFilter(CompareOp.valueOf(op), comparator.build());
|
||||
break;
|
||||
case FilterList: {
|
||||
List<Filter> list = new ArrayList<Filter>();
|
||||
List<Filter> list = new ArrayList<Filter>(filters.size());
|
||||
for (FilterModel model: filters) {
|
||||
list.add(model.build());
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TestMultiRowResource {
|
|||
|
||||
@Parameterized.Parameters
|
||||
public static Collection<Object[]> data() {
|
||||
List<Object[]> params = new ArrayList<Object[]>();
|
||||
List<Object[]> params = new ArrayList<Object[]>(2);
|
||||
params.add(new Object[] {Boolean.TRUE});
|
||||
params.add(new Object[] {Boolean.FALSE});
|
||||
return params;
|
||||
|
|
|
@ -957,7 +957,7 @@ public class TestScannersWithFilters {
|
|||
// Test getting a single row, single key using Row, Qualifier, and Value
|
||||
// regular expression and substring filters
|
||||
// Use must pass all
|
||||
List<Filter> filters = new ArrayList<Filter>();
|
||||
List<Filter> filters = new ArrayList<Filter>(3);
|
||||
filters.add(new RowFilter(CompareOp.EQUAL,
|
||||
new RegexStringComparator(".+-2")));
|
||||
filters.add(new QualifierFilter(CompareOp.EQUAL,
|
||||
|
|
|
@ -94,7 +94,7 @@ public class TestScannersWithLabels {
|
|||
byte[] k = new byte[3];
|
||||
byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
|
||||
|
||||
List<Put> puts = new ArrayList<>();
|
||||
List<Put> puts = new ArrayList<>(9);
|
||||
for (int i = 0; i < 9; i++) {
|
||||
Put put = new Put(Bytes.toBytes("row" + i));
|
||||
put.setDurability(Durability.SKIP_WAL);
|
||||
|
|
|
@ -73,7 +73,7 @@ public class TestSchemaResource {
|
|||
|
||||
@Parameterized.Parameters
|
||||
public static Collection<Object[]> data() {
|
||||
List<Object[]> params = new ArrayList<Object[]>();
|
||||
List<Object[]> params = new ArrayList<Object[]>(2);
|
||||
params.add(new Object[] {Boolean.TRUE});
|
||||
params.add(new Object[] {Boolean.FALSE});
|
||||
return params;
|
||||
|
|
|
@ -262,7 +262,7 @@ public class TestRemoteTable {
|
|||
|
||||
@Test
|
||||
public void testMultiGet() throws Exception {
|
||||
ArrayList<Get> gets = new ArrayList<Get>();
|
||||
ArrayList<Get> gets = new ArrayList<Get>(2);
|
||||
gets.add(new Get(ROW_1));
|
||||
gets.add(new Get(ROW_2));
|
||||
Result[] results = remoteTable.get(gets);
|
||||
|
@ -272,7 +272,7 @@ public class TestRemoteTable {
|
|||
assertEquals(2, results[1].size());
|
||||
|
||||
//Test Versions
|
||||
gets = new ArrayList<Get>();
|
||||
gets = new ArrayList<Get>(2);
|
||||
Get g = new Get(ROW_1);
|
||||
g.setMaxVersions(3);
|
||||
gets.add(g);
|
||||
|
@ -284,13 +284,13 @@ public class TestRemoteTable {
|
|||
assertEquals(3, results[1].size());
|
||||
|
||||
//404
|
||||
gets = new ArrayList<Get>();
|
||||
gets = new ArrayList<Get>(1);
|
||||
gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
|
||||
results = remoteTable.get(gets);
|
||||
assertNotNull(results);
|
||||
assertEquals(0, results.length);
|
||||
|
||||
gets = new ArrayList<Get>();
|
||||
gets = new ArrayList<Get>(3);
|
||||
gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
|
||||
gets.add(new Get(ROW_1));
|
||||
gets.add(new Get(ROW_2));
|
||||
|
@ -314,7 +314,7 @@ public class TestRemoteTable {
|
|||
|
||||
// multiput
|
||||
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(3);
|
||||
put = new Put(ROW_3);
|
||||
put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
|
||||
puts.add(put);
|
||||
|
@ -408,7 +408,7 @@ public class TestRemoteTable {
|
|||
*/
|
||||
@Test
|
||||
public void testScanner() throws IOException {
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(4);
|
||||
Put put = new Put(ROW_1);
|
||||
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
|
||||
puts.add(put);
|
||||
|
@ -499,7 +499,7 @@ public class TestRemoteTable {
|
|||
*/
|
||||
@Test
|
||||
public void testIteratorScaner() throws IOException {
|
||||
List<Put> puts = new ArrayList<Put>();
|
||||
List<Put> puts = new ArrayList<Put>(4);
|
||||
Put put = new Put(ROW_1);
|
||||
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
|
||||
puts.add(put);
|
||||
|
|
|
@ -309,7 +309,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
|
|||
|
||||
List<TableName> specialTables;
|
||||
if(!master.isInitialized()) {
|
||||
specialTables = new ArrayList<TableName>();
|
||||
specialTables = new ArrayList<TableName>(4);
|
||||
specialTables.add(AccessControlLists.ACL_TABLE_NAME);
|
||||
specialTables.add(TableName.META_TABLE_NAME);
|
||||
specialTables.add(TableName.NAMESPACE_TABLE_NAME);
|
||||
|
|
|
@ -589,7 +589,7 @@ public abstract class TestRSGroupsBase {
|
|||
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
|
||||
@Override
|
||||
public boolean evaluate() throws Exception {
|
||||
return cluster.getClusterStatus().getRegionsInTransition().size() == 0;
|
||||
return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
|
||||
}
|
||||
});
|
||||
Set<HostAndPort> newServers = Sets.newHashSet();
|
||||
|
@ -606,7 +606,7 @@ public abstract class TestRSGroupsBase {
|
|||
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
|
||||
@Override
|
||||
public boolean evaluate() throws Exception {
|
||||
return cluster.getClusterStatus().getRegionsInTransition().size() == 0;
|
||||
return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -229,7 +229,7 @@ public class HFileArchiver {
|
|||
}
|
||||
|
||||
// short circuit if we don't have any files to delete
|
||||
if (compactedFiles.size() == 0) {
|
||||
if (compactedFiles.isEmpty()) {
|
||||
LOG.debug("No store files to dispose, done!");
|
||||
return;
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ public class HFileArchiver {
|
|||
private static List<File> resolveAndArchive(FileSystem fs, Path baseArchiveDir,
|
||||
Collection<File> toArchive, long start) throws IOException {
|
||||
// short circuit if no files to move
|
||||
if (toArchive.size() == 0) return Collections.emptyList();
|
||||
if (toArchive.isEmpty()) return Collections.emptyList();
|
||||
|
||||
if (LOG.isTraceEnabled()) LOG.trace("moving files to the archive directory: " + baseArchiveDir);
|
||||
|
||||
|
|
|
@ -181,7 +181,7 @@ public class ForeignException extends IOException {
|
|||
* the sender).
|
||||
*/
|
||||
private static StackTraceElement[] toStackTrace(List<StackTraceElementMessage> traceList) {
|
||||
if (traceList == null || traceList.size() == 0) {
|
||||
if (traceList == null || traceList.isEmpty()) {
|
||||
return new StackTraceElement[0]; // empty array
|
||||
}
|
||||
StackTraceElement[] trace = new StackTraceElement[traceList.size()];
|
||||
|
|
|
@ -562,7 +562,7 @@ public class FavoredNodeAssignmentHelper {
|
|||
|
||||
// Is the rack valid? Do we recognize it?
|
||||
if (rack == null || getServersFromRack(rack) == null ||
|
||||
getServersFromRack(rack).size() == 0) {
|
||||
getServersFromRack(rack).isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -577,7 +577,7 @@ public class FavoredNodeAssignmentHelper {
|
|||
serversToChooseFrom.remove(StartcodeAgnosticServerName.valueOf(sn));
|
||||
}
|
||||
// Do we have any servers left to choose from?
|
||||
if (serversToChooseFrom.size() == 0) {
|
||||
if (serversToChooseFrom.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ public class FavoredNodesPlan {
|
|||
* @param servers
|
||||
*/
|
||||
public void updateFavoredNodesMap(HRegionInfo region, List<ServerName> servers) {
|
||||
if (region == null || servers == null || servers.size() == 0) {
|
||||
if (region == null || servers == null || servers.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
this.favoredNodesMap.put(region.getRegionNameAsString(), servers);
|
||||
|
|
|
@ -360,12 +360,12 @@ public class HttpServer implements FilterContainer {
|
|||
}
|
||||
}
|
||||
|
||||
if (endpoints.size() == 0 && connector == null) {
|
||||
if (endpoints.isEmpty() && connector == null) {
|
||||
throw new HadoopIllegalArgumentException("No endpoints specified");
|
||||
}
|
||||
|
||||
if (hostName == null) {
|
||||
hostName = endpoints.size() == 0 ? connector.getHost() : endpoints.get(
|
||||
hostName = endpoints.isEmpty() ? connector.getHost() : endpoints.get(
|
||||
0).getHost();
|
||||
}
|
||||
|
||||
|
@ -1179,7 +1179,7 @@ public class HttpServer implements FilterContainer {
|
|||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
if (listeners.size() == 0) {
|
||||
if (listeners.isEmpty()) {
|
||||
return "Inactive HttpServer";
|
||||
} else {
|
||||
StringBuilder sb = new StringBuilder("HttpServer (")
|
||||
|
|
|
@ -807,7 +807,7 @@ public class TableMapReduceUtil {
|
|||
throw new IllegalArgumentException("Must provide a configuration object.");
|
||||
}
|
||||
Set<String> paths = new HashSet<String>(conf.getStringCollection("tmpjars"));
|
||||
if (paths.size() == 0) {
|
||||
if (paths.isEmpty()) {
|
||||
throw new IllegalArgumentException("Configuration contains no tmpjars.");
|
||||
}
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
|
|
@ -112,7 +112,7 @@ public class CatalogJanitor extends ScheduledChore {
|
|||
&& !this.services.isInMaintenanceMode()
|
||||
&& am != null
|
||||
&& am.isFailoverCleanupDone()
|
||||
&& am.getRegionStates().getRegionsInTransition().size() == 0) {
|
||||
&& am.getRegionStates().getRegionsInTransition().isEmpty()) {
|
||||
scan();
|
||||
} else {
|
||||
LOG.warn("CatalogJanitor disabled! Not running scan.");
|
||||
|
|
|
@ -2902,7 +2902,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
final String namespace, final String regex, final List<TableName> tableNameList,
|
||||
final boolean includeSysTables)
|
||||
throws IOException {
|
||||
if (tableNameList == null || tableNameList.size() == 0) {
|
||||
if (tableNameList == null || tableNameList.isEmpty()) {
|
||||
// request for all TableDescriptors
|
||||
Collection<HTableDescriptor> allHtds;
|
||||
if (namespace != null && namespace.length() > 0) {
|
||||
|
|
|
@ -808,7 +808,7 @@ public class RegionStates {
|
|||
TableName table = hri.getTable();
|
||||
Map<String, RegionState> indexMap = regionStatesTableIndex.get(table);
|
||||
indexMap.remove(encodedName);
|
||||
if (indexMap.size() == 0)
|
||||
if (indexMap.isEmpty())
|
||||
regionStatesTableIndex.remove(table);
|
||||
lastAssignments.remove(encodedName);
|
||||
ServerName sn = regionAssignments.remove(hri);
|
||||
|
|
|
@ -554,7 +554,7 @@ public class ServerManager {
|
|||
|
||||
try {
|
||||
List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode);
|
||||
if (servers == null || servers.size() == 0 || (servers.size() == 1
|
||||
if (servers == null || servers.isEmpty() || (servers.size() == 1
|
||||
&& servers.contains(sn.toString()))) {
|
||||
LOG.info("ZK shows there is only the master self online, exiting now");
|
||||
// Master could have lost some ZK events, no need to wait more.
|
||||
|
|
|
@ -762,7 +762,7 @@ public class SplitLogManager {
|
|||
EnvironmentEdgeManager.currentTime()
|
||||
- getSplitLogManagerCoordination().getLastRecoveryTime();
|
||||
if (!failedRecoveringRegionDeletions.isEmpty()
|
||||
|| (tot == 0 && tasks.size() == 0 && (timeInterval > checkRecoveringTimeThreshold))) {
|
||||
|| (tot == 0 && tasks.isEmpty() && (timeInterval > checkRecoveringTimeThreshold))) {
|
||||
// inside the function there have more checks before GC anything
|
||||
if (!failedRecoveringRegionDeletions.isEmpty()) {
|
||||
List<Pair<Set<ServerName>, Boolean>> previouslyFailedDeletions =
|
||||
|
|
|
@ -484,7 +484,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
|
|||
setLoad(serverLoadList, i, balanceInfo.getNumRegionsAdded());
|
||||
if (balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() == max) {
|
||||
HRegionInfo hriToPlan;
|
||||
if (balanceInfo.getHriList().size() == 0) {
|
||||
if (balanceInfo.getHriList().isEmpty()) {
|
||||
LOG.debug("During balanceOverall, we found " + serverload.getServerName()
|
||||
+ " has no HRegionInfo, no operation needed");
|
||||
continue;
|
||||
|
|
|
@ -423,7 +423,7 @@ public class CompactionTool extends Configured implements Tool {
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (toCompactDirs.size() == 0) {
|
||||
if (toCompactDirs.isEmpty()) {
|
||||
printUsage("No directories to compact specified.");
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -3698,7 +3698,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
|
||||
boolean matches = false;
|
||||
long cellTs = 0;
|
||||
if (result.size() == 0 && valueIsNull) {
|
||||
if (result.isEmpty() && valueIsNull) {
|
||||
matches = true;
|
||||
} else if (result.size() > 0 && result.get(0).getValueLength() == 0 && valueIsNull) {
|
||||
matches = true;
|
||||
|
|
|
@ -522,7 +522,7 @@ public class HStore implements Store {
|
|||
}
|
||||
|
||||
private List<StoreFile> openStoreFiles(Collection<StoreFileInfo> files) throws IOException {
|
||||
if (files == null || files.size() == 0) {
|
||||
if (files == null || files.isEmpty()) {
|
||||
return new ArrayList<StoreFile>();
|
||||
}
|
||||
// initialize the thread pool for opening store files in parallel..
|
||||
|
|
|
@ -114,7 +114,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (bestSelection.size() == 0 && mightBeStuck) {
|
||||
if (bestSelection.isEmpty() && mightBeStuck) {
|
||||
LOG.debug("Exploring compaction algorithm has selected " + smallest.size()
|
||||
+ " files of size "+ smallestSize + " because the store might be stuck");
|
||||
return new ArrayList<StoreFile>(smallest);
|
||||
|
|
|
@ -136,7 +136,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
|
|||
return selectNewStripesCompaction(si);
|
||||
}
|
||||
|
||||
boolean canDropDeletesNoL0 = l0Files.size() == 0;
|
||||
boolean canDropDeletesNoL0 = l0Files.isEmpty();
|
||||
if (shouldCompactL0) {
|
||||
if (!canDropDeletesNoL0) {
|
||||
// If we need to compact L0, see if we can add something to it, and drop deletes.
|
||||
|
|
|
@ -371,7 +371,7 @@ public class LegacyScanQueryMatcher extends ScanQueryMatcher {
|
|||
int maxVersions = Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions());
|
||||
boolean hasNullColumn;
|
||||
ColumnTracker columnTracker;
|
||||
if (columns == null || columns.size() == 0) {
|
||||
if (columns == null || columns.isEmpty()) {
|
||||
// there is always a null column in the wildcard column query.
|
||||
hasNullColumn = true;
|
||||
// use a specialized scan for wildcard column tracker.
|
||||
|
|
|
@ -198,7 +198,7 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher {
|
|||
: Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions());
|
||||
boolean hasNullColumn;
|
||||
ColumnTracker columnTracker;
|
||||
if (columns == null || columns.size() == 0) {
|
||||
if (columns == null || columns.isEmpty()) {
|
||||
// there is always a null column in the wildcard column query.
|
||||
hasNullColumn = true;
|
||||
// use a specialized scan for wildcard column tracker.
|
||||
|
|
|
@ -94,7 +94,7 @@ public class WALEditsReplaySink {
|
|||
* @throws IOException on IO failure
|
||||
*/
|
||||
public void replayEntries(List<Pair<HRegionLocation, Entry>> entries) throws IOException {
|
||||
if (entries.size() == 0) {
|
||||
if (entries.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ public class TableCFsUpdater extends ReplicationStateZKBase {
|
|||
String peerNode = getPeerNode(peerId);
|
||||
ReplicationPeerConfig rpc = getReplicationPeerConig(peerNode);
|
||||
// We only need to copy data from tableCFs node to rpc Node the first time hmaster start.
|
||||
if (rpc.getTableCFsMap() == null || rpc.getTableCFsMap().size() == 0) {
|
||||
if (rpc.getTableCFsMap() == null || rpc.getTableCFsMap().isEmpty()) {
|
||||
// we copy TableCFs node into PeerNode
|
||||
LOG.info("copy tableCFs into peerNode:" + peerId);
|
||||
ReplicationProtos.TableCF[] tableCFs =
|
||||
|
|
|
@ -184,7 +184,7 @@ public class ReplicationSink {
|
|||
CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(),
|
||||
cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(),
|
||||
cell.getRowLength());
|
||||
List<UUID> clusterIds = new ArrayList<UUID>();
|
||||
List<UUID> clusterIds = new ArrayList<UUID>(entry.getKey().getClusterIdsList().size());
|
||||
for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
|
||||
clusterIds.add(toUUID(clusterId));
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ public class ReplicationSink {
|
|||
|
||||
private void addFamilyAndItsHFilePathToTableInMap(byte[] family, String pathToHfileFromNS,
|
||||
List<Pair<byte[], List<String>>> familyHFilePathsList) {
|
||||
List<String> hfilePaths = new ArrayList<String>();
|
||||
List<String> hfilePaths = new ArrayList<String>(1);
|
||||
hfilePaths.add(pathToHfileFromNS);
|
||||
familyHFilePathsList.add(new Pair<byte[], List<String>>(family, hfilePaths));
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ public class ReplicationSink {
|
|||
private void addNewTableEntryInMap(
|
||||
final Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap, byte[] family,
|
||||
String pathToHfileFromNS, String tableName) {
|
||||
List<String> hfilePaths = new ArrayList<String>();
|
||||
List<String> hfilePaths = new ArrayList<String>(1);
|
||||
hfilePaths.add(pathToHfileFromNS);
|
||||
Pair<byte[], List<String>> newFamilyHFilePathsPair =
|
||||
new Pair<byte[], List<String>>(family, hfilePaths);
|
||||
|
|
|
@ -614,7 +614,7 @@ public class ReplicationSource extends Thread
|
|||
//We take the snapshot now so that we are protected against races
|
||||
//where a new file gets enqueued while the current file is being processed
|
||||
//(and where we just finished reading the current file).
|
||||
if (!this.replicationQueueInfo.isQueueRecovered() && queue.size() == 0) {
|
||||
if (!this.replicationQueueInfo.isQueueRecovered() && queue.isEmpty()) {
|
||||
currentWALisBeingWrittenTo = true;
|
||||
}
|
||||
// Open a reader on it
|
||||
|
@ -1075,7 +1075,7 @@ public class ReplicationSource extends Thread
|
|||
*/
|
||||
private boolean isCurrentLogEmpty() {
|
||||
return (this.repLogReader.getPosition() == 0 &&
|
||||
!this.replicationQueueInfo.isQueueRecovered() && queue.size() == 0);
|
||||
!this.replicationQueueInfo.isQueueRecovered() && queue.isEmpty());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -780,7 +780,7 @@ public class ReplicationSourceManager implements ReplicationListener {
|
|||
@Override
|
||||
public void run() {
|
||||
List<String> currentReplicators = replicationQueues.getListOfReplicators();
|
||||
if (currentReplicators == null || currentReplicators.size() == 0) {
|
||||
if (currentReplicators == null || currentReplicators.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
List<String> otherRegionServers = replicationTracker.getListOfRegionServers();
|
||||
|
|
|
@ -553,7 +553,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
|
|||
Byte deleteTagsFormat) throws IOException {
|
||||
if ((deleteTagsFormat != null && deleteTagsFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)
|
||||
&& (putTagsFormat == null || putTagsFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)) {
|
||||
if (putVisTags.size() == 0) {
|
||||
if (putVisTags.isEmpty()) {
|
||||
// Early out if there are no tags in the cell
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -399,7 +399,7 @@ public class RestoreSnapshotHelper {
|
|||
*/
|
||||
private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<HRegionInfo> regions)
|
||||
throws IOException {
|
||||
if (regions == null || regions.size() == 0) return;
|
||||
if (regions == null || regions.isEmpty()) return;
|
||||
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
|
||||
@Override
|
||||
public void editRegion(final HRegionInfo hri) throws IOException {
|
||||
|
@ -414,7 +414,7 @@ public class RestoreSnapshotHelper {
|
|||
private void restoreHdfsRegions(final ThreadPoolExecutor exec,
|
||||
final Map<String, SnapshotRegionManifest> regionManifests,
|
||||
final List<HRegionInfo> regions) throws IOException {
|
||||
if (regions == null || regions.size() == 0) return;
|
||||
if (regions == null || regions.isEmpty()) return;
|
||||
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
|
||||
@Override
|
||||
public void editRegion(final HRegionInfo hri) throws IOException {
|
||||
|
@ -429,7 +429,7 @@ public class RestoreSnapshotHelper {
|
|||
private void restoreHdfsMobRegions(final ThreadPoolExecutor exec,
|
||||
final Map<String, SnapshotRegionManifest> regionManifests,
|
||||
final List<HRegionInfo> regions) throws IOException {
|
||||
if (regions == null || regions.size() == 0) return;
|
||||
if (regions == null || regions.isEmpty()) return;
|
||||
ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
|
||||
@Override
|
||||
public void editRegion(final HRegionInfo hri) throws IOException {
|
||||
|
@ -562,7 +562,7 @@ public class RestoreSnapshotHelper {
|
|||
private HRegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec,
|
||||
final Map<String, SnapshotRegionManifest> regionManifests,
|
||||
final List<HRegionInfo> regions) throws IOException {
|
||||
if (regions == null || regions.size() == 0) return null;
|
||||
if (regions == null || regions.isEmpty()) return null;
|
||||
|
||||
final Map<String, HRegionInfo> snapshotRegions =
|
||||
new HashMap<String, HRegionInfo>(regions.size());
|
||||
|
|
|
@ -686,7 +686,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
|
|||
List<SnapshotDescription> snapshotList = getSnapshotList(conf);
|
||||
|
||||
|
||||
if (snapshotList.size() == 0) {
|
||||
if (snapshotList.isEmpty()) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
|
|
|
@ -425,7 +425,7 @@ public final class SnapshotManifest {
|
|||
* This is an helper to get a map with the region encoded name
|
||||
*/
|
||||
public Map<String, SnapshotRegionManifest> getRegionManifestsMap() {
|
||||
if (regionManifests == null || regionManifests.size() == 0) return null;
|
||||
if (regionManifests == null || regionManifests.isEmpty()) return null;
|
||||
|
||||
HashMap<String, SnapshotRegionManifest> regionsMap =
|
||||
new HashMap<String, SnapshotRegionManifest>(regionManifests.size());
|
||||
|
|
|
@ -112,7 +112,7 @@ public final class SnapshotReferenceUtil {
|
|||
throws IOException {
|
||||
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc);
|
||||
List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
|
||||
if (regionManifests == null || regionManifests.size() == 0) {
|
||||
if (regionManifests == null || regionManifests.isEmpty()) {
|
||||
LOG.debug("No manifest files present: " + snapshotDir);
|
||||
return;
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ public final class SnapshotReferenceUtil {
|
|||
|
||||
final Path snapshotDir = manifest.getSnapshotDir();
|
||||
List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
|
||||
if (regionManifests == null || regionManifests.size() == 0) {
|
||||
if (regionManifests == null || regionManifests.isEmpty()) {
|
||||
LOG.debug("No manifest files present: " + snapshotDir);
|
||||
return;
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ public final class SnapshotReferenceUtil {
|
|||
final Path snapshotDir = manifest.getSnapshotDir();
|
||||
|
||||
List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
|
||||
if (regionManifests == null || regionManifests.size() == 0) {
|
||||
if (regionManifests == null || regionManifests.isEmpty()) {
|
||||
LOG.debug("No manifest files present: " + snapshotDir);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -1318,7 +1318,7 @@ public final class Canary implements Tool {
|
|||
"option, tablenames:" + foundTableNames.toString());
|
||||
this.errorCode = USAGE_EXIT_CODE;
|
||||
}
|
||||
return foundTableNames.size() == 0;
|
||||
return foundTableNames.isEmpty();
|
||||
}
|
||||
|
||||
private void monitorRegionServers(Map<String, List<HRegionInfo>> rsAndRMap) {
|
||||
|
|
|
@ -1331,7 +1331,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
public void fixOrphanTables() throws IOException {
|
||||
if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
|
||||
|
||||
List<TableName> tmpList = new ArrayList<TableName>();
|
||||
List<TableName> tmpList = new ArrayList<TableName>(orphanTableDirs.keySet().size());
|
||||
tmpList.addAll(orphanTableDirs.keySet());
|
||||
HTableDescriptor[] htds = getHTableDescriptors(tmpList);
|
||||
Iterator<Entry<TableName, Set<String>>> iter =
|
||||
|
@ -2531,7 +2531,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
// the region chain in META
|
||||
//if (hbi.foundRegionDir == null) continue;
|
||||
//if (hbi.deployedOn.size() != 1) continue;
|
||||
if (hbi.deployedOn.size() == 0) continue;
|
||||
if (hbi.deployedOn.isEmpty()) continue;
|
||||
|
||||
// We should be safe here
|
||||
TableName tableName = hbi.metaEntry.getTable();
|
||||
|
@ -3089,7 +3089,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
byte[] prevKey = null;
|
||||
byte[] problemKey = null;
|
||||
|
||||
if (splits.size() == 0) {
|
||||
if (splits.isEmpty()) {
|
||||
// no region for this table
|
||||
handler.handleHoleInRegionChain(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
||||
}
|
||||
|
@ -3145,7 +3145,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
} else if (ranges.size() == 0) {
|
||||
} else if (ranges.isEmpty()) {
|
||||
if (problemKey != null) {
|
||||
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
|
||||
}
|
||||
|
@ -3377,7 +3377,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
}
|
||||
if (servers.size() != 1) {
|
||||
noProblem = false;
|
||||
if (servers.size() == 0) {
|
||||
if (servers.isEmpty()) {
|
||||
assignMetaReplica(i);
|
||||
} else if (servers.size() > 1) {
|
||||
errors
|
||||
|
@ -4466,7 +4466,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
* Empty list means all tables are included.
|
||||
*/
|
||||
boolean isTableIncluded(TableName table) {
|
||||
return (tablesIncluded.size() == 0) || tablesIncluded.contains(table);
|
||||
return (tablesIncluded.isEmpty()) || tablesIncluded.contains(table);
|
||||
}
|
||||
|
||||
public void includeTable(TableName table) {
|
||||
|
|
|
@ -118,7 +118,7 @@ public class IdLock {
|
|||
|
||||
/** For testing */
|
||||
void assertMapEmpty() {
|
||||
assert map.size() == 0;
|
||||
assert map.isEmpty();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
|
|
@ -463,17 +463,17 @@ public class RegionMover extends AbstractHBaseTool {
|
|||
boolean ack, List<HRegionInfo> movedRegions) throws Exception {
|
||||
List<HRegionInfo> regionsToMove = new ArrayList<HRegionInfo>();// FindBugs: DLS_DEAD_LOCAL_STORE
|
||||
regionsToMove = getRegions(this.conf, server);
|
||||
if (regionsToMove.size() == 0) {
|
||||
if (regionsToMove.isEmpty()) {
|
||||
LOG.info("No Regions to move....Quitting now");
|
||||
return;
|
||||
} else if (regionServers.size() == 0) {
|
||||
} else if (regionServers.isEmpty()) {
|
||||
LOG.warn("No Regions were moved - no servers available");
|
||||
throw new Exception("No online region servers");
|
||||
}
|
||||
while (true) {
|
||||
regionsToMove = getRegions(this.conf, server);
|
||||
regionsToMove.removeAll(movedRegions);
|
||||
if (regionsToMove.size() == 0) {
|
||||
if (regionsToMove.isEmpty()) {
|
||||
break;
|
||||
}
|
||||
int counter = 0;
|
||||
|
@ -823,7 +823,7 @@ public class RegionMover extends AbstractHBaseTool {
|
|||
private ArrayList<String> getServers(Admin admin) throws IOException {
|
||||
ArrayList<ServerName> serverInfo =
|
||||
new ArrayList<ServerName>(admin.getClusterStatus().getServers());
|
||||
ArrayList<String> regionServers = new ArrayList<String>();
|
||||
ArrayList<String> regionServers = new ArrayList<String>(serverInfo.size());
|
||||
for (ServerName server : serverInfo) {
|
||||
regionServers.add(server.getServerName());
|
||||
}
|
||||
|
|
|
@ -174,7 +174,7 @@ public class HFileCorruptionChecker {
|
|||
|
||||
List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
|
||||
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
|
||||
if (hfs.size() == 0 && !fs.exists(cfDir)) {
|
||||
if (hfs.isEmpty() && !fs.exists(cfDir)) {
|
||||
LOG.warn("Colfam Directory " + cfDir +
|
||||
" does not exist. Likely due to concurrent split/compaction. Skipping.");
|
||||
missing.add(cfDir);
|
||||
|
@ -207,7 +207,7 @@ public class HFileCorruptionChecker {
|
|||
|
||||
List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
|
||||
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
|
||||
if (hfs.size() == 0 && !fs.exists(cfDir)) {
|
||||
if (hfs.isEmpty() && !fs.exists(cfDir)) {
|
||||
LOG.warn("Mob colfam Directory " + cfDir +
|
||||
" does not exist. Likely the table is deleted. Skipping.");
|
||||
missedMobFiles.add(cfDir);
|
||||
|
@ -311,7 +311,7 @@ public class HFileCorruptionChecker {
|
|||
|
||||
List<FileStatus> cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs));
|
||||
// Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
|
||||
if (cfs.size() == 0 && !fs.exists(regionDir)) {
|
||||
if (cfs.isEmpty() && !fs.exists(regionDir)) {
|
||||
LOG.warn("Region Directory " + regionDir +
|
||||
" does not exist. Likely due to concurrent split/compaction. Skipping.");
|
||||
missing.add(regionDir);
|
||||
|
@ -343,7 +343,7 @@ public class HFileCorruptionChecker {
|
|||
}
|
||||
|
||||
// Parallelize check at the region dir level
|
||||
List<RegionDirChecker> rdcs = new ArrayList<RegionDirChecker>();
|
||||
List<RegionDirChecker> rdcs = new ArrayList<RegionDirChecker>(rds.size() + 1);
|
||||
List<Future<Void>> rdFutures;
|
||||
|
||||
for (FileStatus rdFs : rds) {
|
||||
|
@ -541,7 +541,7 @@ public class HFileCorruptionChecker {
|
|||
out.print(" " + mq);
|
||||
}
|
||||
|
||||
String initialState = (corrupted.size() == 0) ? "OK" : "CORRUPTED";
|
||||
String initialState = (corrupted.isEmpty()) ? "OK" : "CORRUPTED";
|
||||
String fixedState = (corrupted.size() == quarantined.size()) ? "OK"
|
||||
: "CORRUPTED";
|
||||
|
||||
|
@ -560,7 +560,7 @@ public class HFileCorruptionChecker {
|
|||
for (Path mq : missedMobFiles) {
|
||||
out.print(" " + mq);
|
||||
}
|
||||
String initialMobState = (corruptedMobFiles.size() == 0) ? "OK" : "CORRUPTED";
|
||||
String initialMobState = (corruptedMobFiles.isEmpty()) ? "OK" : "CORRUPTED";
|
||||
String fixedMobState = (corruptedMobFiles.size() == quarantinedMobFiles.size()) ? "OK"
|
||||
: "CORRUPTED";
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ public abstract class AbstractFSWALProvider<T extends AbstractFSWAL<?>> implemen
|
|||
if (wal == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
List<WAL> wals = new ArrayList<WAL>();
|
||||
List<WAL> wals = new ArrayList<WAL>(1);
|
||||
wals.add(wal);
|
||||
return wals;
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ class DisabledWALProvider implements WALProvider {
|
|||
|
||||
@Override
|
||||
public List<WAL> getWALs() throws IOException {
|
||||
List<WAL> wals = new ArrayList<WAL>();
|
||||
List<WAL> wals = new ArrayList<WAL>(1);
|
||||
wals.add(disabled);
|
||||
return wals;
|
||||
}
|
||||
|
|
|
@ -229,7 +229,7 @@ public class WALKey implements SequenceId, Comparable<WALKey> {
|
|||
public WALKey(final byte[] encodedRegionName, final TableName tablename,
|
||||
long logSeqNum,
|
||||
final long now, UUID clusterId) {
|
||||
List<UUID> clusterIds = new ArrayList<UUID>();
|
||||
List<UUID> clusterIds = new ArrayList<UUID>(1);
|
||||
clusterIds.add(clusterId);
|
||||
init(encodedRegionName, tablename, logSeqNum, now, clusterIds,
|
||||
HConstants.NO_NONCE, HConstants.NO_NONCE, null, null);
|
||||
|
|
|
@ -293,7 +293,7 @@ public class WALPrettyPrinter {
|
|||
actions.add(op);
|
||||
}
|
||||
}
|
||||
if (actions.size() == 0)
|
||||
if (actions.isEmpty())
|
||||
continue;
|
||||
txn.put("actions", actions);
|
||||
if (outputJSON) {
|
||||
|
@ -381,7 +381,7 @@ public class WALPrettyPrinter {
|
|||
try {
|
||||
CommandLine cmd = parser.parse(options, args);
|
||||
files = cmd.getArgList();
|
||||
if (files.size() == 0 || cmd.hasOption("h")) {
|
||||
if (files.isEmpty() || cmd.hasOption("h")) {
|
||||
HelpFormatter formatter = new HelpFormatter();
|
||||
formatter.printHelp("WAL <filename...>", options, true);
|
||||
System.exit(-1);
|
||||
|
|
|
@ -350,7 +350,7 @@ public class MiniZooKeeperCluster {
|
|||
LOG.info("Kill the current active ZK servers in the cluster " +
|
||||
"on client port: " + clientPort);
|
||||
|
||||
if (standaloneServerFactoryList.size() == 0) {
|
||||
if (standaloneServerFactoryList.isEmpty()) {
|
||||
// there is no backup servers;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -681,7 +681,7 @@ public class TestPartialResultsFromClientSide {
|
|||
LOG.info("Actual count: " + result.size());
|
||||
}
|
||||
|
||||
if (expKvList.size() == 0) return;
|
||||
if (expKvList.isEmpty()) return;
|
||||
|
||||
int i = 0;
|
||||
for (Cell kv : result.rawCells()) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue