HBASE-19195 error-prone fixes for client, mr, and server
This commit is contained in:
parent
d20ab88592
commit
0d4f33ca27
|
@ -90,10 +90,12 @@ public class FilterListWithAND extends FilterListBase {
|
|||
* code of current sub-filter.
|
||||
*/
|
||||
private ReturnCode mergeReturnCode(ReturnCode rc, ReturnCode localRC) {
|
||||
if (rc == ReturnCode.SEEK_NEXT_USING_HINT || localRC == ReturnCode.SEEK_NEXT_USING_HINT) {
|
||||
if (rc == ReturnCode.SEEK_NEXT_USING_HINT) {
|
||||
return ReturnCode.SEEK_NEXT_USING_HINT;
|
||||
}
|
||||
switch (localRC) {
|
||||
case SEEK_NEXT_USING_HINT:
|
||||
return ReturnCode.SEEK_NEXT_USING_HINT;
|
||||
case INCLUDE:
|
||||
return rc;
|
||||
case INCLUDE_AND_NEXT_COL:
|
||||
|
|
|
@ -268,8 +268,9 @@ public abstract class TableInputFormatBase
|
|||
}
|
||||
|
||||
//The default value of "hbase.mapreduce.input.autobalance" is false.
|
||||
if (context.getConfiguration().getBoolean(MAPREDUCE_INPUT_AUTOBALANCE, false) != false) {
|
||||
long maxAveRegionSize = context.getConfiguration().getInt(MAX_AVERAGE_REGION_SIZE, 8*1073741824);
|
||||
if (context.getConfiguration().getBoolean(MAPREDUCE_INPUT_AUTOBALANCE, false)) {
|
||||
long maxAveRegionSize = context.getConfiguration()
|
||||
.getLong(MAX_AVERAGE_REGION_SIZE, 8L*1073741824); //8GB
|
||||
return calculateAutoBalancedSplits(splits, maxAveRegionSize);
|
||||
}
|
||||
|
||||
|
|
|
@ -1068,9 +1068,13 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
}
|
||||
|
||||
int getValueLength(final Random r) {
|
||||
if (this.opts.isValueRandom()) return Math.abs(r.nextInt() % opts.valueSize);
|
||||
else if (this.opts.isValueZipf()) return Math.abs(this.zipf.nextInt());
|
||||
else return opts.valueSize;
|
||||
if (this.opts.isValueRandom()) {
|
||||
return r.nextInt(opts.valueSize);
|
||||
} else if (this.opts.isValueZipf()) {
|
||||
return Math.abs(this.zipf.nextInt());
|
||||
} else {
|
||||
return opts.valueSize;
|
||||
}
|
||||
}
|
||||
|
||||
void updateValueSize(final Result [] rs) throws IOException {
|
||||
|
|
|
@ -38,7 +38,7 @@ public class TestSplitTable {
|
|||
public TestName name = new TestName();
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("deprecation")
|
||||
@SuppressWarnings({"deprecation", "SelfComparison"})
|
||||
public void testSplitTableCompareTo() {
|
||||
TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"),
|
||||
Bytes.toBytes("aaa"), Bytes.toBytes("ddd"), "locationA");
|
||||
|
@ -49,9 +49,9 @@ public class TestSplitTable {
|
|||
TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"),
|
||||
Bytes.toBytes("lll"), Bytes.toBytes("zzz"), "locationA");
|
||||
|
||||
assertTrue(aTableSplit.compareTo(aTableSplit) == 0);
|
||||
assertTrue(bTableSplit.compareTo(bTableSplit) == 0);
|
||||
assertTrue(cTableSplit.compareTo(cTableSplit) == 0);
|
||||
assertEquals(0, aTableSplit.compareTo(aTableSplit));
|
||||
assertEquals(0, bTableSplit.compareTo(bTableSplit));
|
||||
assertEquals(0, cTableSplit.compareTo(cTableSplit));
|
||||
|
||||
assertTrue(aTableSplit.compareTo(bTableSplit) < 0);
|
||||
assertTrue(bTableSplit.compareTo(aTableSplit) > 0);
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
|
@ -430,7 +431,7 @@ public class TestImportTsv implements Configurable {
|
|||
|
||||
// run the import
|
||||
Tool tool = new ImportTsv();
|
||||
LOG.debug("Running ImportTsv with arguments: " + argsArray);
|
||||
LOG.debug("Running ImportTsv with arguments: " + Arrays.toString(argsArray));
|
||||
assertEquals(0, ToolRunner.run(conf, tool, argsArray));
|
||||
|
||||
// Perform basic validation. If the input args did not include
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.hbase.mapreduce;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -43,10 +44,10 @@ public class TsvImporterCustomTestMapperForOprAttr extends TsvImporterMapper {
|
|||
for (String attr : attributes) {
|
||||
String[] split = attr.split(ImportTsv.DEFAULT_ATTRIBUTES_SEPERATOR);
|
||||
if (split == null || split.length <= 1) {
|
||||
throw new BadTsvLineException("Invalid attributes seperator specified" + attributes);
|
||||
throw new BadTsvLineException(msg(attributes));
|
||||
} else {
|
||||
if (split[0].length() <= 0 || split[1].length() <= 0) {
|
||||
throw new BadTsvLineException("Invalid attributes seperator specified" + attributes);
|
||||
throw new BadTsvLineException(msg(attributes));
|
||||
}
|
||||
put.setAttribute(split[0], Bytes.toBytes(split[1]));
|
||||
}
|
||||
|
@ -54,4 +55,8 @@ public class TsvImporterCustomTestMapperForOprAttr extends TsvImporterMapper {
|
|||
}
|
||||
put.add(kv);
|
||||
}
|
||||
|
||||
private String msg(Object[] attributes) {
|
||||
return "Invalid attributes separator specified: " + Arrays.toString(attributes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,13 +83,13 @@ public class TestFromClientSide3 {
|
|||
private static byte[] FAMILY = Bytes.toBytes("testFamily");
|
||||
private static Random random = new Random();
|
||||
private static int SLAVES = 3;
|
||||
private static byte [] ROW = Bytes.toBytes("testRow");
|
||||
private static final byte[] ROW = Bytes.toBytes("testRow");
|
||||
private static final byte[] ANOTHERROW = Bytes.toBytes("anotherrow");
|
||||
private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
|
||||
private static byte [] VALUE = Bytes.toBytes("testValue");
|
||||
private final static byte[] COL_QUAL = Bytes.toBytes("f1");
|
||||
private final static byte[] VAL_BYTES = Bytes.toBytes("v1");
|
||||
private final static byte[] ROW_BYTES = Bytes.toBytes("r1");
|
||||
private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier");
|
||||
private static final byte[] VALUE = Bytes.toBytes("testValue");
|
||||
private static final byte[] COL_QUAL = Bytes.toBytes("f1");
|
||||
private static final byte[] VAL_BYTES = Bytes.toBytes("v1");
|
||||
private static final byte[] ROW_BYTES = Bytes.toBytes("r1");
|
||||
|
||||
@Rule
|
||||
public TestName name = new TestName();
|
||||
|
@ -361,7 +361,7 @@ public class TestFromClientSide3 {
|
|||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.debug("Waiting for region to come online: " + regionName);
|
||||
LOG.debug("Waiting for region to come online: " + Bytes.toString(regionName));
|
||||
}
|
||||
Thread.sleep(40);
|
||||
}
|
||||
|
@ -478,6 +478,7 @@ public class TestFromClientSide3 {
|
|||
assertEquals(exist, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHTableExistsMethodSingleRegionMultipleGets() throws Exception {
|
||||
Table table = TEST_UTIL.createTable(TableName.valueOf(
|
||||
name.getMethodName()), new byte[][] { FAMILY });
|
||||
|
@ -488,13 +489,11 @@ public class TestFromClientSide3 {
|
|||
|
||||
List<Get> gets = new ArrayList<>();
|
||||
gets.add(new Get(ROW));
|
||||
gets.add(null);
|
||||
gets.add(new Get(ANOTHERROW));
|
||||
|
||||
boolean[] results = table.existsAll(gets);
|
||||
assertEquals(results[0], true);
|
||||
assertEquals(results[1], false);
|
||||
assertEquals(results[2], false);
|
||||
boolean[] results = table.exists(gets);
|
||||
assertTrue(results[0]);
|
||||
assertFalse(results[1]);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -749,7 +748,7 @@ public class TestFromClientSide3 {
|
|||
try (Table table = con.getTable(tableName)) {
|
||||
table.append(append);
|
||||
fail("The APPEND should fail because the target lock is blocked by previous put");
|
||||
} catch (Throwable ex) {
|
||||
} catch (Exception ex) {
|
||||
}
|
||||
});
|
||||
appendService.shutdown();
|
||||
|
@ -802,6 +801,7 @@ public class TestFromClientSide3 {
|
|||
});
|
||||
ExecutorService cpService = Executors.newSingleThreadExecutor();
|
||||
cpService.execute(() -> {
|
||||
boolean threw;
|
||||
Put put1 = new Put(row);
|
||||
Put put2 = new Put(rowLocked);
|
||||
put1.addColumn(FAMILY, QUALIFIER, value1);
|
||||
|
@ -823,10 +823,13 @@ public class TestFromClientSide3 {
|
|||
exe.mutateRows(controller, request, rpcCallback);
|
||||
return rpcCallback.get();
|
||||
});
|
||||
fail("This cp should fail because the target lock is blocked by previous put");
|
||||
threw = false;
|
||||
} catch (Throwable ex) {
|
||||
// TODO!!!! Is this right? It catches everything including the above fail
|
||||
// if it happens (which it seems too....)
|
||||
threw = true;
|
||||
}
|
||||
if (!threw) {
|
||||
// Can't call fail() earlier because the catch would eat it.
|
||||
fail("This cp should fail because the target lock is blocked by previous put");
|
||||
}
|
||||
});
|
||||
cpService.shutdown();
|
||||
|
|
|
@ -450,6 +450,7 @@ public class TestMajorCompaction {
|
|||
* basically works.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testMajorCompactingToNoOutputWithReverseScan() throws IOException {
|
||||
createStoreFile(r);
|
||||
for (int i = 0; i < compactionThreshold; i++) {
|
||||
|
|
Loading…
Reference in New Issue