HBASE-10246 Wrap long lines in recently added source files
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1553786 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5123e92213
commit
ea7734dc90
|
@ -51,14 +51,17 @@ import org.junit.experimental.categories.Category;
|
||||||
*
|
*
|
||||||
* Then the test creates a snapshot from this table, and overrides the values in the original
|
* Then the test creates a snapshot from this table, and overrides the values in the original
|
||||||
* table with values 'after_snapshot_value'. The test, then runs a mapreduce job over the snapshot
|
* table with values 'after_snapshot_value'. The test, then runs a mapreduce job over the snapshot
|
||||||
* with a scan start row 'bbb' and stop row 'yyy'. The data is saved in a single reduce output file, and
|
* with a scan start row 'bbb' and stop row 'yyy'. The data is saved in a single reduce output
|
||||||
|
* file, and
|
||||||
* inspected later to verify that the MR job has seen all the values from the snapshot.
|
* inspected later to verify that the MR job has seen all the values from the snapshot.
|
||||||
*
|
*
|
||||||
* <p> These parameters can be used to configure the job:
|
* <p> These parameters can be used to configure the job:
|
||||||
* <br>"IntegrationTestTableSnapshotInputFormat.table" => the name of the table
|
* <br>"IntegrationTestTableSnapshotInputFormat.table" => the name of the table
|
||||||
* <br>"IntegrationTestTableSnapshotInputFormat.snapshot" => the name of the snapshot
|
* <br>"IntegrationTestTableSnapshotInputFormat.snapshot" => the name of the snapshot
|
||||||
* <br>"IntegrationTestTableSnapshotInputFormat.numRegions" => number of regions in the table to be created
|
* <br>"IntegrationTestTableSnapshotInputFormat.numRegions" => number of regions in the table
|
||||||
* <br>"IntegrationTestTableSnapshotInputFormat.tableDir" => temporary directory to restore the snapshot files
|
* to be created
|
||||||
|
* <br>"IntegrationTestTableSnapshotInputFormat.tableDir" => temporary directory to restore the
|
||||||
|
* snapshot files
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@Category(IntegrationTests.class)
|
@Category(IntegrationTests.class)
|
||||||
|
@ -70,10 +73,11 @@ public class IntegrationTestTableSnapshotInputFormat extends IntegrationTestBase
|
||||||
private static final String TABLE_NAME_KEY = "IntegrationTestTableSnapshotInputFormat.table";
|
private static final String TABLE_NAME_KEY = "IntegrationTestTableSnapshotInputFormat.table";
|
||||||
private static final String DEFAULT_TABLE_NAME = "IntegrationTestTableSnapshotInputFormat";
|
private static final String DEFAULT_TABLE_NAME = "IntegrationTestTableSnapshotInputFormat";
|
||||||
|
|
||||||
private static final String SNAPSHOT_NAME_KEY = "IntegrationTestTableSnapshotInputFormat.snapshot";
|
private static final String SNAPSHOT_NAME_KEY =
|
||||||
|
"IntegrationTestTableSnapshotInputFormat.snapshot";
|
||||||
|
private static final String NUM_REGIONS_KEY =
|
||||||
|
"IntegrationTestTableSnapshotInputFormat.numRegions";
|
||||||
|
|
||||||
|
|
||||||
private static final String NUM_REGIONS_KEY = "IntegrationTestTableSnapshotInputFormat.numRegions";
|
|
||||||
private static final int DEFAULT_NUM_REGIONS = 32;
|
private static final int DEFAULT_NUM_REGIONS = 32;
|
||||||
|
|
||||||
private static final String TABLE_DIR_KEY = "IntegrationTestTableSnapshotInputFormat.tableDir";
|
private static final String TABLE_DIR_KEY = "IntegrationTestTableSnapshotInputFormat.tableDir";
|
||||||
|
|
|
@ -48,7 +48,8 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
|
||||||
List<Cell> values;
|
List<Cell> values;
|
||||||
|
|
||||||
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
|
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
|
||||||
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics) throws IOException {
|
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
|
||||||
|
throws IOException {
|
||||||
|
|
||||||
this.scan = scan;
|
this.scan = scan;
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,8 @@ import com.google.protobuf.ZeroCopyLiteralByteString;
|
||||||
* while there are jobs reading from snapshot files.
|
* while there are jobs reading from snapshot files.
|
||||||
* <p>
|
* <p>
|
||||||
* Usage is similar to TableInputFormat, and
|
* Usage is similar to TableInputFormat, and
|
||||||
* {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, boolean, Path)}
|
* {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job,
|
||||||
|
* boolean, Path)}
|
||||||
* can be used to configure the job.
|
* can be used to configure the job.
|
||||||
* <pre>{@code
|
* <pre>{@code
|
||||||
* Job job = new Job(conf);
|
* Job job = new Job(conf);
|
||||||
|
@ -100,12 +101,13 @@ import com.google.protobuf.ZeroCopyLiteralByteString;
|
||||||
* <p>
|
* <p>
|
||||||
* HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from
|
* HBase owns all the data and snapshot files on the filesystem. Only the HBase user can read from
|
||||||
* snapshot files and data files. HBase also enforces security because all the requests are handled
|
* snapshot files and data files. HBase also enforces security because all the requests are handled
|
||||||
* by the server layer, and the user cannot read from the data files directly. To read from snapshot
|
* by the server layer, and the user cannot read from the data files directly.
|
||||||
* files directly from the file system, the user who is running the MR job must have sufficient
|
* To read from snapshot files directly from the file system, the user who is running the MR job
|
||||||
* permissions to access snapshot and reference files. This means that to run mapreduce over
|
* must have sufficient permissions to access snapshot and reference files.
|
||||||
* snapshot files, the MR job has to be run as the HBase user or the user must have group or other
|
* This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase
|
||||||
* priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from
|
* user or the user must have group or other priviledges in the filesystem (See HBASE-8369).
|
||||||
* snapshot/data files will completely circumvent the access control enforced by HBase.
|
* Note that, given other users access to read from snapshot/data files will completely circumvent
|
||||||
|
* the access control enforced by HBase.
|
||||||
* @see TableSnapshotScanner
|
* @see TableSnapshotScanner
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
@ -117,7 +119,8 @@ public class TableSnapshotInputFormat extends InputFormat<ImmutableBytesWritable
|
||||||
private static final Log LOG = LogFactory.getLog(TableSnapshotInputFormat.class);
|
private static final Log LOG = LogFactory.getLog(TableSnapshotInputFormat.class);
|
||||||
|
|
||||||
/** See {@link #getBestLocations(Configuration, HDFSBlocksDistribution)} */
|
/** See {@link #getBestLocations(Configuration, HDFSBlocksDistribution)} */
|
||||||
private static final String LOCALITY_CUTOFF_MULTIPLIER = "hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
|
private static final String LOCALITY_CUTOFF_MULTIPLIER =
|
||||||
|
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
|
||||||
private static final float DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
|
private static final float DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
|
||||||
|
|
||||||
private static final String SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
|
private static final String SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
|
||||||
|
@ -177,7 +180,8 @@ public class TableSnapshotInputFormat extends InputFormat<ImmutableBytesWritable
|
||||||
int len = in.readInt();
|
int len = in.readInt();
|
||||||
byte[] buf = new byte[len];
|
byte[] buf = new byte[len];
|
||||||
in.readFully(buf);
|
in.readFully(buf);
|
||||||
MapReduceProtos.TableSnapshotRegionSplit split = MapReduceProtos.TableSnapshotRegionSplit.PARSER.parseFrom(buf);
|
MapReduceProtos.TableSnapshotRegionSplit split =
|
||||||
|
MapReduceProtos.TableSnapshotRegionSplit.PARSER.parseFrom(buf);
|
||||||
this.regionName = Bytes.toString(split.getRegion().getValue().toByteArray());
|
this.regionName = Bytes.toString(split.getRegion().getValue().toByteArray());
|
||||||
List<String> locationsList = split.getLocationsList();
|
List<String> locationsList = split.getLocationsList();
|
||||||
this.locations = locationsList.toArray(new String[locationsList.size()]);
|
this.locations = locationsList.toArray(new String[locationsList.size()]);
|
||||||
|
@ -185,7 +189,8 @@ public class TableSnapshotInputFormat extends InputFormat<ImmutableBytesWritable
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
static class TableSnapshotRegionRecordReader extends RecordReader<ImmutableBytesWritable, Result> {
|
static class TableSnapshotRegionRecordReader extends
|
||||||
|
RecordReader<ImmutableBytesWritable, Result> {
|
||||||
private TableSnapshotRegionSplit split;
|
private TableSnapshotRegionSplit split;
|
||||||
private Scan scan;
|
private Scan scan;
|
||||||
private Result result = null;
|
private Result result = null;
|
||||||
|
@ -223,8 +228,9 @@ public class TableSnapshotInputFormat extends InputFormat<ImmutableBytesWritable
|
||||||
throw new IllegalArgumentException("A Scan is not configured for this job");
|
throw new IllegalArgumentException("A Scan is not configured for this job");
|
||||||
}
|
}
|
||||||
scan = TableMapReduceUtil.convertStringToScan(scanStr);
|
scan = TableMapReduceUtil.convertStringToScan(scanStr);
|
||||||
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); // region is immutable, this should be fine,
|
// region is immutable, this should be fine,
|
||||||
// otherwise we have to set the thread read point
|
// otherwise we have to set the thread read point
|
||||||
|
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
|
||||||
|
|
||||||
scanner = new ClientSideRegionScanner(conf, fs, tmpRootDir, htd, hri, scan, null);
|
scanner = new ClientSideRegionScanner(conf, fs, tmpRootDir, htd, hri, scan, null);
|
||||||
if (context != null) {
|
if (context != null) {
|
||||||
|
@ -336,7 +342,8 @@ public class TableSnapshotInputFormat extends InputFormat<ImmutableBytesWritable
|
||||||
* weights into account, thus will treat every location passed from the input split as equal. We
|
* weights into account, thus will treat every location passed from the input split as equal. We
|
||||||
* do not want to blindly pass all the locations, since we are creating one split per region, and
|
* do not want to blindly pass all the locations, since we are creating one split per region, and
|
||||||
* the region's blocks are all distributed throughout the cluster unless favorite node assignment
|
* the region's blocks are all distributed throughout the cluster unless favorite node assignment
|
||||||
* is used. On the expected stable case, only one location will contain most of the blocks as local.
|
* is used. On the expected stable case, only one location will contain most of the blocks as
|
||||||
|
* local.
|
||||||
* On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. Here
|
* On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. Here
|
||||||
* we are doing a simple heuristic, where we will pass all hosts which have at least 80%
|
* we are doing a simple heuristic, where we will pass all hosts which have at least 80%
|
||||||
* (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top
|
* (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top
|
||||||
|
|
|
@ -674,10 +674,14 @@ public final class ExportSnapshot extends Configured implements Tool {
|
||||||
System.err.println(" -snapshot NAME Snapshot to restore.");
|
System.err.println(" -snapshot NAME Snapshot to restore.");
|
||||||
System.err.println(" -copy-to NAME Remote destination hdfs://");
|
System.err.println(" -copy-to NAME Remote destination hdfs://");
|
||||||
System.err.println(" -no-checksum-verify Do not verify checksum.");
|
System.err.println(" -no-checksum-verify Do not verify checksum.");
|
||||||
System.err.println(" -chuser USERNAME Change the owner of the files to the specified one.");
|
System.err.println(" -chuser USERNAME Change the owner of the files " +
|
||||||
System.err.println(" -chgroup GROUP Change the group of the files to the specified one.");
|
"to the specified one.");
|
||||||
System.err.println(" -chmod MODE Change the permission of the files to the specified one.");
|
System.err.println(" -chgroup GROUP Change the group of the files to " +
|
||||||
System.err.println(" -mappers Number of mappers to use during the copy (mapreduce.job.maps).");
|
"the specified one.");
|
||||||
|
System.err.println(" -chmod MODE Change the permission of the files " +
|
||||||
|
"to the specified one.");
|
||||||
|
System.err.println(" -mappers Number of mappers to use during the " +
|
||||||
|
"copy (mapreduce.job.maps).");
|
||||||
System.err.println();
|
System.err.println();
|
||||||
System.err.println("Examples:");
|
System.err.println("Examples:");
|
||||||
System.err.println(" hbase " + getClass() + " \\");
|
System.err.println(" hbase " + getClass() + " \\");
|
||||||
|
|
|
@ -193,7 +193,8 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
||||||
addCommandDescriptor(ScanTest.class, "scan",
|
addCommandDescriptor(ScanTest.class, "scan",
|
||||||
"Run scan test (read every row)");
|
"Run scan test (read every row)");
|
||||||
addCommandDescriptor(FilteredScanTest.class, "filterScan",
|
addCommandDescriptor(FilteredScanTest.class, "filterScan",
|
||||||
"Run scan test using a filter to find a specific row based on it's value (make sure to use --rows=20)");
|
"Run scan test using a filter to find a specific row based on it's value " +
|
||||||
|
"(make sure to use --rows=20)");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void addCommandDescriptor(Class<? extends Test> cmdClass,
|
protected void addCommandDescriptor(Class<? extends Test> cmdClass,
|
||||||
|
@ -1584,13 +1585,13 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
||||||
this.useTags = Boolean.parseBoolean(cmd.substring(useTags.length()));
|
this.useTags = Boolean.parseBoolean(cmd.substring(useTags.length()));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
final String noOfTags = "--nooftags=";
|
final String noOfTags = "--nooftags=";
|
||||||
if (cmd.startsWith(noOfTags)) {
|
if (cmd.startsWith(noOfTags)) {
|
||||||
this.noOfTags = Integer.parseInt(cmd.substring(noOfTags.length()));
|
this.noOfTags = Integer.parseInt(cmd.substring(noOfTags.length()));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
Class<? extends Test> cmdClass = determineCommandClass(cmd);
|
Class<? extends Test> cmdClass = determineCommandClass(cmd);
|
||||||
if (cmdClass != null) {
|
if (cmdClass != null) {
|
||||||
getArgs(i + 1, args);
|
getArgs(i + 1, args);
|
||||||
|
|
|
@ -124,8 +124,8 @@ public class TestTableSnapshotScanner {
|
||||||
testScanner(UTIL, "testWithMultiRegion", 20, true);
|
testScanner(UTIL, "testWithMultiRegion", 20, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions, boolean shutdownCluster)
|
private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions,
|
||||||
throws Exception {
|
boolean shutdownCluster) throws Exception {
|
||||||
setupCluster();
|
setupCluster();
|
||||||
TableName tableName = TableName.valueOf("testScanner");
|
TableName tableName = TableName.valueOf("testScanner");
|
||||||
try {
|
try {
|
||||||
|
@ -138,7 +138,8 @@ public class TestTableSnapshotScanner {
|
||||||
Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
|
Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
|
||||||
Scan scan = new Scan(bbb, yyy); // limit the scan
|
Scan scan = new Scan(bbb, yyy); // limit the scan
|
||||||
|
|
||||||
TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir, snapshotName, scan);
|
TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir,
|
||||||
|
snapshotName, scan);
|
||||||
|
|
||||||
verifyScanner(scanner, bbb, yyy);
|
verifyScanner(scanner, bbb, yyy);
|
||||||
scanner.close();
|
scanner.close();
|
||||||
|
@ -154,7 +155,8 @@ public class TestTableSnapshotScanner {
|
||||||
private void verifyScanner(ResultScanner scanner, byte[] startRow, byte[] stopRow)
|
private void verifyScanner(ResultScanner scanner, byte[] startRow, byte[] stopRow)
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
|
|
||||||
HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
|
HBaseTestingUtility.SeenRowTracker rowTracker =
|
||||||
|
new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
Result result = scanner.next();
|
Result result = scanner.next();
|
||||||
|
|
|
@ -120,15 +120,18 @@ public class TestTableSnapshotInputFormat {
|
||||||
Assert.assertEquals(Lists.newArrayList("h1"), tsif.getBestLocations(conf, blockDistribution));
|
Assert.assertEquals(Lists.newArrayList("h1"), tsif.getBestLocations(conf, blockDistribution));
|
||||||
|
|
||||||
blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2);
|
blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2);
|
||||||
Assert.assertEquals(Lists.newArrayList("h1", "h2"), tsif.getBestLocations(conf, blockDistribution));
|
Assert.assertEquals(Lists.newArrayList("h1", "h2"),
|
||||||
|
tsif.getBestLocations(conf, blockDistribution));
|
||||||
|
|
||||||
blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3);
|
blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3);
|
||||||
Assert.assertEquals(Lists.newArrayList("h2", "h1"), tsif.getBestLocations(conf, blockDistribution));
|
Assert.assertEquals(Lists.newArrayList("h2", "h1"),
|
||||||
|
tsif.getBestLocations(conf, blockDistribution));
|
||||||
|
|
||||||
blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6);
|
blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6);
|
||||||
blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9);
|
blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9);
|
||||||
|
|
||||||
Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4", "h1"), tsif.getBestLocations(conf, blockDistribution));
|
Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4", "h1"),
|
||||||
|
tsif.getBestLocations(conf, blockDistribution));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static enum TestTableSnapshotCounters {
|
public static enum TestTableSnapshotCounters {
|
||||||
|
@ -148,7 +151,8 @@ public class TestTableSnapshotInputFormat {
|
||||||
|
|
||||||
public static class TestTableSnapshotReducer
|
public static class TestTableSnapshotReducer
|
||||||
extends Reducer<ImmutableBytesWritable, NullWritable, NullWritable, NullWritable> {
|
extends Reducer<ImmutableBytesWritable, NullWritable, NullWritable, NullWritable> {
|
||||||
HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(bbb, yyy);
|
HBaseTestingUtility.SeenRowTracker rowTracker =
|
||||||
|
new HBaseTestingUtility.SeenRowTracker(bbb, yyy);
|
||||||
@Override
|
@Override
|
||||||
protected void reduce(ImmutableBytesWritable key, Iterable<NullWritable> values,
|
protected void reduce(ImmutableBytesWritable key, Iterable<NullWritable> values,
|
||||||
Context context) throws IOException, InterruptedException {
|
Context context) throws IOException, InterruptedException {
|
||||||
|
@ -207,8 +211,8 @@ public class TestTableSnapshotInputFormat {
|
||||||
testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 8);
|
testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, int expectedNumSplits)
|
public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
|
||||||
throws Exception {
|
int numRegions, int expectedNumSplits) throws Exception {
|
||||||
setupCluster();
|
setupCluster();
|
||||||
TableName tableName = TableName.valueOf("testWithMockedMapReduce");
|
TableName tableName = TableName.valueOf("testWithMockedMapReduce");
|
||||||
try {
|
try {
|
||||||
|
@ -239,7 +243,8 @@ public class TestTableSnapshotInputFormat {
|
||||||
|
|
||||||
Assert.assertEquals(expectedNumSplits, splits.size());
|
Assert.assertEquals(expectedNumSplits, splits.size());
|
||||||
|
|
||||||
HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
|
HBaseTestingUtility.SeenRowTracker rowTracker =
|
||||||
|
new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
|
||||||
|
|
||||||
for (int i = 0; i < splits.size(); i++) {
|
for (int i = 0; i < splits.size(); i++) {
|
||||||
// validate input split
|
// validate input split
|
||||||
|
@ -249,7 +254,8 @@ public class TestTableSnapshotInputFormat {
|
||||||
// validate record reader
|
// validate record reader
|
||||||
TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);
|
TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);
|
||||||
when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration());
|
when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration());
|
||||||
RecordReader<ImmutableBytesWritable, Result> rr = tsif.createRecordReader(split, taskAttemptContext);
|
RecordReader<ImmutableBytesWritable, Result> rr =
|
||||||
|
tsif.createRecordReader(split, taskAttemptContext);
|
||||||
rr.initialize(split, taskAttemptContext);
|
rr.initialize(split, taskAttemptContext);
|
||||||
|
|
||||||
// validate we can read all the data back
|
// validate we can read all the data back
|
||||||
|
@ -266,7 +272,8 @@ public class TestTableSnapshotInputFormat {
|
||||||
rowTracker.validate();
|
rowTracker.validate();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void verifyRowFromMap(ImmutableBytesWritable key, Result result) throws IOException {
|
public static void verifyRowFromMap(ImmutableBytesWritable key, Result result)
|
||||||
|
throws IOException {
|
||||||
byte[] row = key.get();
|
byte[] row = key.get();
|
||||||
CellScanner scanner = result.cellScanner();
|
CellScanner scanner = result.cellScanner();
|
||||||
while (scanner.advance()) {
|
while (scanner.advance()) {
|
||||||
|
@ -317,8 +324,8 @@ public class TestTableSnapshotInputFormat {
|
||||||
|
|
||||||
// this is also called by the IntegrationTestTableSnapshotInputFormat
|
// this is also called by the IntegrationTestTableSnapshotInputFormat
|
||||||
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
|
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
|
||||||
String snapshotName, Path tableDir, int numRegions, int expectedNumSplits, boolean shutdownCluster)
|
String snapshotName, Path tableDir, int numRegions, int expectedNumSplits,
|
||||||
throws Exception {
|
boolean shutdownCluster) throws Exception {
|
||||||
|
|
||||||
//create the table and snapshot
|
//create the table and snapshot
|
||||||
createTableAndSnapshot(util, tableName, snapshotName, numRegions);
|
createTableAndSnapshot(util, tableName, snapshotName, numRegions);
|
||||||
|
@ -333,7 +340,8 @@ public class TestTableSnapshotInputFormat {
|
||||||
Scan scan = new Scan(bbb, yyy); // limit the scan
|
Scan scan = new Scan(bbb, yyy); // limit the scan
|
||||||
|
|
||||||
job.setJarByClass(util.getClass());
|
job.setJarByClass(util.getClass());
|
||||||
TableMapReduceUtil.addDependencyJars(job.getConfiguration(), TestTableSnapshotInputFormat.class);
|
TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
|
||||||
|
TestTableSnapshotInputFormat.class);
|
||||||
|
|
||||||
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
|
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
|
||||||
scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
|
scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
|
||||||
|
|
|
@ -190,7 +190,8 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
||||||
addCommandDescriptor(ScanTest.class, "scan",
|
addCommandDescriptor(ScanTest.class, "scan",
|
||||||
"Run scan test (read every row)");
|
"Run scan test (read every row)");
|
||||||
addCommandDescriptor(FilteredScanTest.class, "filterScan",
|
addCommandDescriptor(FilteredScanTest.class, "filterScan",
|
||||||
"Run scan test using a filter to find a specific row based on it's value (make sure to use --rows=20)");
|
"Run scan test using a filter to find a specific row based " +
|
||||||
|
"on it's value (make sure to use --rows=20)");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void addCommandDescriptor(Class<? extends Test> cmdClass,
|
protected void addCommandDescriptor(Class<? extends Test> cmdClass,
|
||||||
|
@ -1329,7 +1330,8 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
System.err.println("Usage: java " + this.getClass().getName() + " \\");
|
System.err.println("Usage: java " + this.getClass().getName() + " \\");
|
||||||
System.err.println(" [--nomapred] [--rows=ROWS] [--table=NAME] \\");
|
System.err.println(" [--nomapred] [--rows=ROWS] [--table=NAME] \\");
|
||||||
System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] [-D<property=value>]* <command> <nclients>");
|
System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] " +
|
||||||
|
"[-D<property=value>]* <command> <nclients>");
|
||||||
System.err.println();
|
System.err.println();
|
||||||
System.err.println("Options:");
|
System.err.println("Options:");
|
||||||
System.err.println(" nomapred Run multiple clients using threads " +
|
System.err.println(" nomapred Run multiple clients using threads " +
|
||||||
|
@ -1337,15 +1339,17 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
||||||
System.err.println(" rows Rows each client runs. Default: One million");
|
System.err.println(" rows Rows each client runs. Default: One million");
|
||||||
System.err.println(" table Alternate table name. Default: 'TestTable'");
|
System.err.println(" table Alternate table name. Default: 'TestTable'");
|
||||||
System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'");
|
System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'");
|
||||||
System.err.println(" flushCommits Used to determine if the test should flush the table. Default: false");
|
System.err.println(" flushCommits Used to determine if the test should flush the table. " +
|
||||||
|
"Default: false");
|
||||||
System.err.println(" writeToWAL Set writeToWAL on puts. Default: True");
|
System.err.println(" writeToWAL Set writeToWAL on puts. Default: True");
|
||||||
System.err.println(" presplit Create presplit table. Recommended for accurate perf analysis (see guide). Default: disabled");
|
System.err.println(" presplit Create presplit table. Recommended for accurate perf " +
|
||||||
System.err
|
"analysis (see guide). Default: disabled");
|
||||||
.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as possible. Not " +
|
System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " +
|
||||||
"guaranteed that reads are always served from inmemory. Default: false");
|
"possible. Not guaranteed that reads are always served from inmemory. Default: false");
|
||||||
System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. Default : false");
|
System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " +
|
||||||
System.err
|
"Default : false");
|
||||||
.println(" numoftags Specify the no of tags that would be needed. This works only if usetags is true.");
|
System.err.println(" numoftags Specify the no of tags that would be needed. " +
|
||||||
|
"This works only if usetags is true.");
|
||||||
System.err.println();
|
System.err.println();
|
||||||
System.err.println(" Note: -D properties will be applied to the conf used. ");
|
System.err.println(" Note: -D properties will be applied to the conf used. ");
|
||||||
System.err.println(" For example: ");
|
System.err.println(" For example: ");
|
||||||
|
|
Loading…
Reference in New Issue