diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 84b56769776..5935d78792b 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -195,6 +195,10 @@ org.apache.hbase hbase-server + + org.apache.hbase + hbase-mapreduce + org.apache.hbase diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml index 1592a3b73c3..a66237bf0d7 100644 --- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml +++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml @@ -50,6 +50,7 @@ org.apache.hbase:hbase-thrift org.apache.hbase:hbase-external-blockcache org.apache.hbase:hbase-backup + org.apache.hbase:hbase-mapreduce diff --git a/hbase-assembly/src/main/assembly/src.xml b/hbase-assembly/src/main/assembly/src.xml index 3fd7562c262..a2ca40ec318 100644 --- a/hbase-assembly/src/main/assembly/src.xml +++ b/hbase-assembly/src/main/assembly/src.xml @@ -60,6 +60,7 @@ org.apache.hbase:hbase-testing-util org.apache.hbase:hbase-thrift org.apache.hbase:hbase-backup + org.apache.hbase:hbase-mapreduce diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml index 88e06432164..0e3401f4ea1 100644 --- a/hbase-backup/pom.xml +++ b/hbase-backup/pom.xml @@ -107,6 +107,16 @@ org.apache.hbase hbase-server + + org.apache.hbase + hbase-mapreduce + test-jar + test + + + org.apache.hbase + hbase-mapreduce + org.apache.hbase hbase-common diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index fad2bf69144..7e6539e0148 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -144,6 +144,10 @@ org.apache.hbase hbase-server + + org.apache.hbase + hbase-mapreduce + org.apache.hbase hbase-endpoint diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 2ba3e0e868a..527de7f5972 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -198,6 +198,22 @@ org.apache.hbase hbase-client + + org.apache.hbase + hbase-mapreduce + + + org.apache.hbase + hbase-mapreduce + test-jar + test + + + org.apache.hbase + hbase-mapreduce + test-jar + test + org.apache.hbase hbase-rsgroup diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java index 76be4e8a56e..9bc31313819 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.hbase.util.LoadTestTool; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.StringUtils; @@ -70,7 +71,7 @@ public class IntegrationTestIngest extends IntegrationTestBase { protected String[] LOAD_TEST_TOOL_INIT_ARGS = { LoadTestTool.OPT_COLUMN_FAMILIES, LoadTestTool.OPT_COMPRESSION, - LoadTestTool.OPT_DATA_BLOCK_ENCODING, + HFileTestUtil.OPT_DATA_BLOCK_ENCODING, LoadTestTool.OPT_INMEMORY, LoadTestTool.OPT_ENCRYPTION, LoadTestTool.OPT_NUM_REGIONS_PER_SERVER, @@ -138,7 +139,7 @@ public class IntegrationTestIngest extends IntegrationTestBase { String familiesString = getConf().get( String.format("%s.%s", clazz, LoadTestTool.OPT_COLUMN_FAMILIES)); if (familiesString == null) { - for (byte[] family : LoadTestTool.DEFAULT_COLUMN_FAMILIES) { + for (byte[] family : HFileTestUtil.DEFAULT_COLUMN_FAMILIES) { families.add(Bytes.toString(family)); } } else { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestStripeCompactions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestStripeCompactions.java index d64fbb07952..fc79abb4410 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestStripeCompactions.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestStripeCompactions.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreEngine; import org.apache.hadoop.hbase.regionserver.StripeStoreEngine; import org.apache.hadoop.hbase.testclassification.IntegrationTests; -import org.apache.hadoop.hbase.util.LoadTestTool; +import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.util.ToolRunner; import org.junit.experimental.categories.Category; @@ -41,7 +41,7 @@ public class IntegrationTestIngestStripeCompactions extends IntegrationTestInges HTableDescriptor htd = new HTableDescriptor(getTablename()); htd.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName()); htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "100"); - HColumnDescriptor hcd = new HColumnDescriptor(LoadTestTool.DEFAULT_COLUMN_FAMILY); + HColumnDescriptor hcd = new HColumnDescriptor(HFileTestUtil.DEFAULT_COLUMN_FAMILY); HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), htd, hcd); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java index 5bbb12b14b9..010e4b90ea4 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.hbase.util.LoadTestDataGeneratorWithMOB; import org.apache.hadoop.hbase.util.LoadTestTool; import org.apache.hadoop.util.ToolRunner; @@ -44,7 +45,7 @@ import org.junit.experimental.categories.Category; public class IntegrationTestIngestWithMOB extends IntegrationTestIngest { private static final char COLON = ':'; - private byte[] mobColumnFamily = LoadTestTool.DEFAULT_COLUMN_FAMILY; + private byte[] mobColumnFamily = HFileTestUtil.DEFAULT_COLUMN_FAMILY; public static final String THRESHOLD = "threshold"; public static final String MIN_MOB_DATA_SIZE = "minMobDataSize"; public static final String MAX_MOB_DATA_SIZE = "maxMobDataSize"; @@ -56,7 +57,7 @@ public class IntegrationTestIngestWithMOB extends IntegrationTestIngest { //similar to LOAD_TEST_TOOL_INIT_ARGS except OPT_IN_MEMORY is removed protected String[] LOAD_TEST_TOOL_MOB_INIT_ARGS = { LoadTestTool.OPT_COMPRESSION, - LoadTestTool.OPT_DATA_BLOCK_ENCODING, + HFileTestUtil.OPT_DATA_BLOCK_ENCODING, LoadTestTool.OPT_ENCRYPTION, LoadTestTool.OPT_NUM_REGIONS_PER_SERVER, LoadTestTool.OPT_REGION_REPLICATION, diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java index d649bdb7e90..3135bd062e0 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java @@ -72,6 +72,7 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase { private static final String PRIMARY_TIMEOUT_DEFAULT = "" + 10 * 1000; // 10 ms private static final String NUM_RS_KEY = "numRs"; private static final String NUM_RS_DEFAULT = "" + 3; + public static final String FAMILY_NAME = "info"; /** Extract a descriptive statistic from a {@link com.codahale.metrics.Histogram}. */ private enum Stat { @@ -236,7 +237,7 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase { @Override protected Set getColumnFamilies() { - return Sets.newHashSet(Bytes.toString(PerformanceEvaluation.FAMILY_NAME)); + return Sets.newHashSet(FAMILY_NAME); } /** Compute the mean of the given {@code stat} from a timing results. */ diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java index 9d04bf91b0c..fb7acf42c6c 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestImportTsv.java @@ -29,7 +29,6 @@ import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java index f0425213ad6..b9d16a1e840 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.IntegrationTestBase; import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -55,7 +56,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.NMapInputFormat; -import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl; import org.apache.hadoop.hbase.util.AbstractHBaseTool; diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml new file mode 100644 index 00000000000..2d3f859d4b0 --- /dev/null +++ b/hbase-mapreduce/pom.xml @@ -0,0 +1,316 @@ + + + + 4.0.0 + + hbase + org.apache.hbase + 2.0.0-alpha3-SNAPSHOT + .. + + hbase-mapreduce + Apache HBase - MapReduce + + This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which + are needed for running MR jobs on tables, WALs, HFiles and other HBase specific constructs. + It also contains a bunch of tools: RowCounter, ImportTsv, Import, Export, CompactionTool, + ExportSnapshot, WALPlayer, etc + + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + maven-assembly-plugin + + true + + + + + maven-surefire-plugin + + + + listener + org.apache.hadoop.hbase.ServerResourceCheckerJUnitListener + + + + org.apache.hadoop.hbase.shaded. + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + + + + + + + org.apache.hbase + hbase-annotations + + + jdk.tools + jdk.tools + + + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-common + test-jar + + + org.apache.hbase + hbase-client + + + org.apache.hbase + hbase-hadoop-compat + test-jar + test + + + org.apache.hbase + hbase-hadoop2-compat + test-jar + test + + + org.apache.hbase + hbase-server + + + org.apache.hbase + hbase-server + test-jar + + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + test-jar + test + + + com.google.guava + guava + + + + + org.apache.hadoop + hadoop-hdfs + test-jar + test + + + com.google.guava + guava + + + + + log4j + log4j + + + org.mockito + mockito-all + test + + + + + + skipMapReduceTests + + + skipMapReduceTests + + + + true + true + + + + + hadoop-2.0 + + + + !hadoop.profile + + + + + com.github.stephenc.findbugs + findbugs-annotations + true + + + org.apache.hadoop + hadoop-common + + + net.java.dev.jets3t + jets3t + + + javax.servlet.jsp + jsp-api + + + org.mortbay.jetty + jetty + + + com.sun.jersey + jersey-server + + + com.sun.jersey + jersey-core + + + com.sun.jersey + jersey-json + + + javax.servlet + servlet-api + + + tomcat + jasper-compiler + + + tomcat + jasper-runtime + + + com.google.code.findbugs + jsr305 + + + + + org.apache.hadoop + hadoop-client + + + com.google.guava + guava + + + + + org.apache.hadoop + hadoop-minicluster + test + + + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + ${hadoop-three.version} + + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-minicluster + + + + + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index 0011a60520c..4f5323ad804 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Partitioner; - /** * This is used to partition the output keys into groups of keys. * Keys are grouped according to the regions that currently exist diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java similarity index 98% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java index 3e121fe97fd..81dbb1531aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java @@ -54,7 +54,7 @@ import java.util.Map; * the overall dataset for the job is defined by the concatenation of the regions and tables * included in each snapshot/scan * pair. - * {@link org.apache.hadoop.hbase.mapred.TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, * Class, Class, Class, JobConf, boolean, Path)} * can be used to configure the job. *
{@code
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index c65810ffc55..9811a973c8a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -167,10 +167,10 @@ implements InputFormat {
    * Calculates the splits that will serve as input for the map tasks.
    *
    * Splits are created in number equal to the smallest between numSplits and
-   * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. 
-   * If the number of splits is smaller than the number of 
+   * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
+   * If the number of splits is smaller than the number of
    * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across
-   * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s 
+   * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s
    * and are grouped the most evenly possible. In the
    * case splits are uneven the bigger splits are placed first in the
    * {@link InputSplit} array.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
index 8878eeeccb5..06b28ed55da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
@@ -102,7 +102,7 @@ public class TableOutputFormat extends FileOutputFormatMapReduce
 Input/OutputFormats, a table indexing MapReduce job, and utility methods.
 
 

See HBase and MapReduce -in the HBase Reference Guide for mapreduce over hbase documentation. +in the HBase Reference Guide for mapreduce over hbase documentation. */ package org.apache.hadoop.hbase.mapred; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index 21b85560ec6..9cccf8ce4a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -82,22 +82,22 @@ public class CopyTable extends Configured implements Tool { if (!doCommandLine(args)) { return null; } - + Job job = Job.getInstance(getConf(), getConf().get(JOB_NAME_CONF_KEY, NAME + "_" + tableName)); job.setJarByClass(CopyTable.class); Scan scan = new Scan(); - + scan.setBatch(batch); scan.setCacheBlocks(false); - + if (cacheRow > 0) { scan.setCaching(cacheRow); } else { scan.setCaching(getConf().getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, 100)); } - + scan.setTimeRange(startTime, endTime); - + if (allCells) { scan.setRaw(true); } @@ -259,13 +259,13 @@ public class CopyTable extends Configured implements Tool { endTime = Long.parseLong(cmd.substring(endTimeArgKey.length())); continue; } - + final String batchArgKey = "--batch="; if (cmd.startsWith(batchArgKey)) { batch = Integer.parseInt(cmd.substring(batchArgKey.length())); continue; } - + final String cacheRowArgKey = "--cacheRow="; if (cmd.startsWith(cacheRowArgKey)) { cacheRow = Integer.parseInt(cmd.substring(cacheRowArgKey.length())); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java similarity index 98% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java index 4c015281f9b..de6cf3a88db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java @@ -44,10 +44,10 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; /** -* Export an HBase table. -* Writes content to sequence files up in HDFS. Use {@link Import} to read it -* back in again. -*/ + * Export an HBase table. + * Writes content to sequence files up in HDFS. Use {@link Import} to read it + * back in again. + */ @InterfaceAudience.Public public class Export extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(Export.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java index 3475a48b5e3..3c3060b64da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java @@ -43,7 +43,7 @@ import org.apache.hadoop.mapreduce.Partitioner; *

This class is not suitable as partitioner creating hfiles * for incremental bulk loads as region spread will likely change between time of * hfile creation and load time. See {@link LoadIncrementalHFiles} - * and Bulk Load. + * and Bulk Load.

* * @param The type of the key. * @param The type of the value. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java similarity index 98% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java index dfac4715796..2c8caf503a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java @@ -64,27 +64,27 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.Ordering; public class HashTable extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(HashTable.class); - + private static final int DEFAULT_BATCH_SIZE = 8000; - + private final static String HASH_BATCH_SIZE_CONF_KEY = "hash.batch.size"; final static String PARTITIONS_FILE_NAME = "partitions"; final static String MANIFEST_FILE_NAME = "manifest"; final static String HASH_DATA_DIR = "hashes"; final static String OUTPUT_DATA_FILE_PREFIX = "part-r-"; private final static String TMP_MANIFEST_FILE_NAME = "manifest.tmp"; - + TableHash tableHash = new TableHash(); Path destPath; - + public HashTable(Configuration conf) { super(conf); } - + public static class TableHash { - + Path hashDir; - + String tableName; String families = null; long batchSize = DEFAULT_BATCH_SIZE; @@ -95,9 +95,9 @@ public class HashTable extends Configured implements Tool { int versions = -1; long startTime = 0; long endTime = 0; - + List partitions; - + public static TableHash read(Configuration conf, Path hashDir) throws IOException { TableHash tableHash = new TableHash(); FileSystem fs = hashDir.getFileSystem(conf); @@ -106,7 +106,7 @@ public class HashTable extends Configured implements Tool { tableHash.readPartitionFile(fs, conf, new Path(hashDir, PARTITIONS_FILE_NAME)); return tableHash; } - + void writePropertiesFile(FileSystem fs, Path path) throws IOException { Properties p = new Properties(); p.setProperty("table", tableName); @@ -133,7 +133,7 @@ public class HashTable extends Configured implements Tool { if (endTime != 0) { p.setProperty("endTimestamp", Long.toString(endTime)); } - + try (OutputStreamWriter osw = new OutputStreamWriter(fs.create(path), Charsets.UTF_8)) { p.store(osw, null); } @@ -150,7 +150,7 @@ public class HashTable extends Configured implements Tool { families = p.getProperty("columnFamilies"); batchSize = Long.parseLong(p.getProperty("targetBatchSize")); numHashFiles = Integer.parseInt(p.getProperty("numHashFiles")); - + String startRowHex = p.getProperty("startRowHex"); if (startRowHex != null) { startRow = Bytes.fromHex(startRowHex); @@ -159,28 +159,28 @@ public class HashTable extends Configured implements Tool { if (stopRowHex != null) { stopRow = Bytes.fromHex(stopRowHex); } - + String scanBatchString = p.getProperty("scanBatch"); if (scanBatchString != null) { scanBatch = Integer.parseInt(scanBatchString); } - + String versionString = p.getProperty("versions"); if (versionString != null) { versions = Integer.parseInt(versionString); } - + String startTimeString = p.getProperty("startTimestamp"); if (startTimeString != null) { startTime = Long.parseLong(startTimeString); } - + String endTimeString = p.getProperty("endTimestamp"); if (endTimeString != null) { endTime = Long.parseLong(endTimeString); } } - + Scan initScan() throws IOException { Scan scan = new Scan(); scan.setCacheBlocks(false); @@ -206,7 +206,7 @@ public class HashTable extends Configured implements Tool { } return scan; } - + /** * Choose partitions between row ranges to hash to a single output file * Selects region boundaries that fall within the scan range, and groups them @@ -217,7 +217,7 @@ public class HashTable extends Configured implements Tool { for (int i = 0; i < regionStartEndKeys.getFirst().length; i++) { byte[] regionStartKey = regionStartEndKeys.getFirst()[i]; byte[] regionEndKey = regionStartEndKeys.getSecond()[i]; - + // if scan begins after this region, or starts before this region, then drop this region // in other words: // IF (scan begins before the end of this region @@ -230,7 +230,7 @@ public class HashTable extends Configured implements Tool { startKeys.add(regionStartKey); } } - + int numRegions = startKeys.size(); if (numHashFiles == 0) { numHashFiles = numRegions / 100; @@ -242,7 +242,7 @@ public class HashTable extends Configured implements Tool { // can't partition within regions numHashFiles = numRegions; } - + // choose a subset of start keys to group regions into ranges partitions = new ArrayList<>(numHashFiles - 1); // skip the first start key as it is not a partition between ranges. @@ -251,19 +251,19 @@ public class HashTable extends Configured implements Tool { partitions.add(new ImmutableBytesWritable(startKeys.get(splitIndex))); } } - + void writePartitionFile(Configuration conf, Path path) throws IOException { FileSystem fs = path.getFileSystem(conf); @SuppressWarnings("deprecation") SequenceFile.Writer writer = SequenceFile.createWriter( fs, conf, path, ImmutableBytesWritable.class, NullWritable.class); - + for (int i = 0; i < partitions.size(); i++) { writer.append(partitions.get(i), NullWritable.get()); } writer.close(); } - + private void readPartitionFile(FileSystem fs, Configuration conf, Path path) throws IOException { @SuppressWarnings("deprecation") @@ -274,7 +274,7 @@ public class HashTable extends Configured implements Tool { partitions.add(new ImmutableBytesWritable(key.copyBytes())); } reader.close(); - + if (!Ordering.natural().isOrdered(partitions)) { throw new IOException("Partitions are not ordered!"); } @@ -309,30 +309,30 @@ public class HashTable extends Configured implements Tool { } return sb.toString(); } - + static String getDataFileName(int hashFileIndex) { return String.format(HashTable.OUTPUT_DATA_FILE_PREFIX + "%05d", hashFileIndex); } - + /** * Open a TableHash.Reader starting at the first hash at or after the given key. - * @throws IOException + * @throws IOException */ public Reader newReader(Configuration conf, ImmutableBytesWritable startKey) throws IOException { return new Reader(conf, startKey); } - + public class Reader implements java.io.Closeable { private final Configuration conf; - + private int hashFileIndex; private MapFile.Reader mapFileReader; - + private boolean cachedNext; private ImmutableBytesWritable key; private ImmutableBytesWritable hash; - + Reader(Configuration conf, ImmutableBytesWritable startKey) throws IOException { this.conf = conf; int partitionIndex = Collections.binarySearch(partitions, startKey); @@ -344,7 +344,7 @@ public class HashTable extends Configured implements Tool { hashFileIndex = -1-partitionIndex; } openHashFile(); - + // MapFile's don't make it easy to seek() so that the subsequent next() returns // the desired key/value pair. So we cache it for the first call of next(). hash = new ImmutableBytesWritable(); @@ -356,7 +356,7 @@ public class HashTable extends Configured implements Tool { cachedNext = true; } } - + /** * Read the next key/hash pair. * Returns true if such a pair exists and false when at the end of the data. @@ -384,7 +384,7 @@ public class HashTable extends Configured implements Tool { } } } - + /** * Get the current key * @return the current key or null if there is no current key @@ -392,7 +392,7 @@ public class HashTable extends Configured implements Tool { public ImmutableBytesWritable getCurrentKey() { return key; } - + /** * Get the current hash * @return the current hash or null if there is no current hash @@ -400,7 +400,7 @@ public class HashTable extends Configured implements Tool { public ImmutableBytesWritable getCurrentHash() { return hash; } - + private void openHashFile() throws IOException { if (mapFileReader != null) { mapFileReader.close(); @@ -416,19 +416,19 @@ public class HashTable extends Configured implements Tool { } } } - + static boolean isTableStartRow(byte[] row) { return Bytes.equals(HConstants.EMPTY_START_ROW, row); } - + static boolean isTableEndRow(byte[] row) { return Bytes.equals(HConstants.EMPTY_END_ROW, row); } - + public Job createSubmittableJob(String[] args) throws IOException { Path partitionsPath = new Path(destPath, PARTITIONS_FILE_NAME); generatePartitions(partitionsPath); - + Job job = Job.getInstance(getConf(), getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); Configuration jobConf = job.getConfiguration(); @@ -437,7 +437,7 @@ public class HashTable extends Configured implements Tool { TableMapReduceUtil.initTableMapperJob(tableHash.tableName, tableHash.initScan(), HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); - + // use a TotalOrderPartitioner and reducers to group region output into hash files job.setPartitionerClass(TotalOrderPartitioner.class); TotalOrderPartitioner.setPartitionFile(jobConf, partitionsPath); @@ -447,31 +447,31 @@ public class HashTable extends Configured implements Tool { job.setOutputValueClass(ImmutableBytesWritable.class); job.setOutputFormatClass(MapFileOutputFormat.class); FileOutputFormat.setOutputPath(job, new Path(destPath, HASH_DATA_DIR)); - + return job; } - + private void generatePartitions(Path partitionsPath) throws IOException { Connection connection = ConnectionFactory.createConnection(getConf()); Pair regionKeys = connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); connection.close(); - + tableHash.selectPartitions(regionKeys); LOG.info("Writing " + tableHash.partitions.size() + " partition keys to " + partitionsPath); - + tableHash.writePartitionFile(getConf(), partitionsPath); } - + static class ResultHasher { private MessageDigest digest; - + private boolean batchStarted = false; private ImmutableBytesWritable batchStartKey; private ImmutableBytesWritable batchHash; private long batchSize = 0; - - + + public ResultHasher() { try { digest = MessageDigest.getInstance("MD5"); @@ -479,7 +479,7 @@ public class HashTable extends Configured implements Tool { Throwables.propagate(e); } } - + public void startBatch(ImmutableBytesWritable row) { if (batchStarted) { throw new RuntimeException("Cannot start new batch without finishing existing one."); @@ -489,7 +489,7 @@ public class HashTable extends Configured implements Tool { batchStartKey = row; batchHash = null; } - + public void hashResult(Result result) { if (!batchStarted) { throw new RuntimeException("Cannot add to batch that has not been started."); @@ -508,11 +508,11 @@ public class HashTable extends Configured implements Tool { ts >>>= 8; } digest.update(cell.getValueArray(), cell.getValueOffset(), valueLength); - + batchSize += rowLength + familyLength + qualifierLength + 8 + valueLength; } } - + public void finishBatch() { if (!batchStarted) { throw new RuntimeException("Cannot finish batch that has not started."); @@ -537,39 +537,39 @@ public class HashTable extends Configured implements Tool { return batchSize; } } - + public static class HashMapper extends TableMapper { - + private ResultHasher hasher; private long targetBatchSize; - + private ImmutableBytesWritable currentRow; - + @Override protected void setup(Context context) throws IOException, InterruptedException { targetBatchSize = context.getConfiguration() .getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); hasher = new ResultHasher(); - + TableSplit split = (TableSplit) context.getInputSplit(); hasher.startBatch(new ImmutableBytesWritable(split.getStartRow())); } - + @Override protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { - + if (currentRow == null || !currentRow.equals(key)) { currentRow = new ImmutableBytesWritable(key); // not immutable - + if (hasher.getBatchSize() >= targetBatchSize) { hasher.finishBatch(); context.write(hasher.getBatchStartKey(), hasher.getBatchHash()); hasher.startBatch(currentRow); } } - + hasher.hashResult(value); } @@ -579,20 +579,20 @@ public class HashTable extends Configured implements Tool { context.write(hasher.getBatchStartKey(), hasher.getBatchHash()); } } - + private void writeTempManifestFile() throws IOException { Path tempManifestPath = new Path(destPath, TMP_MANIFEST_FILE_NAME); FileSystem fs = tempManifestPath.getFileSystem(getConf()); tableHash.writePropertiesFile(fs, tempManifestPath); } - + private void completeManifest() throws IOException { Path tempManifestPath = new Path(destPath, TMP_MANIFEST_FILE_NAME); Path manifestPath = new Path(destPath, MANIFEST_FILE_NAME); FileSystem fs = tempManifestPath.getFileSystem(getConf()); fs.rename(tempManifestPath, manifestPath); } - + private static final int NUM_ARGS = 2; private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { @@ -636,41 +636,41 @@ public class HashTable extends Configured implements Tool { return false; } try { - + tableHash.tableName = args[args.length-2]; destPath = new Path(args[args.length-1]); - + for (int i = 0; i < args.length - NUM_ARGS; i++) { String cmd = args[i]; if (cmd.equals("-h") || cmd.startsWith("--h")) { printUsage(null); return false; } - + final String batchSizeArgKey = "--batchsize="; if (cmd.startsWith(batchSizeArgKey)) { tableHash.batchSize = Long.parseLong(cmd.substring(batchSizeArgKey.length())); continue; } - + final String numHashFilesArgKey = "--numhashfiles="; if (cmd.startsWith(numHashFilesArgKey)) { tableHash.numHashFiles = Integer.parseInt(cmd.substring(numHashFilesArgKey.length())); continue; } - + final String startRowArgKey = "--startrow="; if (cmd.startsWith(startRowArgKey)) { tableHash.startRow = Bytes.fromHex(cmd.substring(startRowArgKey.length())); continue; } - + final String stopRowArgKey = "--stoprow="; if (cmd.startsWith(stopRowArgKey)) { tableHash.stopRow = Bytes.fromHex(cmd.substring(stopRowArgKey.length())); continue; } - + final String startTimeArgKey = "--starttime="; if (cmd.startsWith(startTimeArgKey)) { tableHash.startTime = Long.parseLong(cmd.substring(startTimeArgKey.length())); @@ -710,7 +710,7 @@ public class HashTable extends Configured implements Tool { + tableHash.startTime + " >= endtime=" + tableHash.endTime); return false; } - + } catch (Exception e) { e.printStackTrace(); printUsage("Can't start because " + e.getMessage()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java similarity index 98% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java index 5289f46278d..73475dbd64d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java @@ -57,8 +57,8 @@ extends TableReducer { /** * Writes each given record, consisting of the row key and the given values, - * to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}. - * It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put} + * to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}. + * It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put} * or {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. * * @param key The current row key. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java similarity index 96% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index b5bb2eca4c6..18dcf35cfe7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -96,7 +96,7 @@ public class Import extends Configured implements Tool { private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; - public static class KeyValueWritableComparablePartitioner + public static class KeyValueWritableComparablePartitioner extends Partitioner { private static KeyValueWritableComparable[] START_KEYS = null; @Override @@ -109,27 +109,27 @@ public class Import extends Configured implements Tool { } return START_KEYS.length; } - + } - - public static class KeyValueWritableComparable + + public static class KeyValueWritableComparable implements WritableComparable { private KeyValue kv = null; - - static { + + static { // register this comparator - WritableComparator.define(KeyValueWritableComparable.class, + WritableComparator.define(KeyValueWritableComparable.class, new KeyValueWritableComparator()); } - + public KeyValueWritableComparable() { } - + public KeyValueWritableComparable(KeyValue kv) { this.kv = kv; } - + @Override public void write(DataOutput out) throws IOException { KeyValue.write(kv, out); @@ -146,7 +146,7 @@ public class Import extends Configured implements Tool { public int compareTo(KeyValueWritableComparable o) { return CellComparator.COMPARATOR.compare(this.kv, ((KeyValueWritableComparable)o).kv); } - + public static class KeyValueWritableComparator extends WritableComparator { @Override @@ -159,13 +159,13 @@ public class Import extends Configured implements Tool { return compare(kv1, kv2); } catch (IOException e) { throw new RuntimeException(e); - } + } } - + } - + } - + public static class KeyValueReducer extends Reducer { @@ -180,12 +180,12 @@ public class Import extends Configured implements Tool { context.write(new ImmutableBytesWritable(kv.getRowArray()), kv); if (++index % 100 == 0) context.setStatus("Wrote " + index + " KeyValues, " - + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); + + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); } } } - - public static class KeyValueSortImporter + + public static class KeyValueSortImporter extends TableMapper { private Map cfRenameMap; private Filter filter; @@ -215,16 +215,16 @@ public class Import extends Configured implements Tool { if (kv == null) continue; // TODO get rid of ensureKeyValue KeyValue ret = KeyValueUtil.ensureKeyValue(convertKv(kv, cfRenameMap)); - context.write(new KeyValueWritableComparable(ret.createKeyOnly(false)), ret); + context.write(new KeyValueWritableComparable(ret.createKeyOnly(false)), ret); } } } catch (InterruptedException e) { e.printStackTrace(); } } - + @Override - public void setup(Context context) throws IOException { + public void setup(Context context) throws IOException { cfRenameMap = createCfRenameMap(context.getConfiguration()); filter = instantiateFilter(context.getConfiguration()); int reduceNum = context.getNumReduceTasks(); @@ -236,17 +236,17 @@ public class Import extends Configured implements Tool { if (startKeys.length != reduceNum) { throw new IOException("Region split after job initialization"); } - KeyValueWritableComparable[] startKeyWraps = + KeyValueWritableComparable[] startKeyWraps = new KeyValueWritableComparable[startKeys.length - 1]; for (int i = 1; i < startKeys.length; ++i) { - startKeyWraps[i - 1] = + startKeyWraps[i - 1] = new KeyValueWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); } KeyValueWritableComparablePartitioner.START_KEYS = startKeyWraps; } } } - + /** * A mapper that just writes out KeyValues. */ @@ -438,7 +438,7 @@ public class Import extends Configured implements Tool { * @throws IllegalArgumentException if the filter is misconfigured */ public static Filter instantiateFilter(Configuration conf) { - // get the filter, if it was configured + // get the filter, if it was configured Class filterClass = conf.getClass(FILTER_CLASS_CONF_KEY, null, Filter.class); if (filterClass == null) { LOG.debug("No configured filter class, accepting all keyvalues."); @@ -506,18 +506,18 @@ public class Import extends Configured implements Tool { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if(newCfName != null) { - kv = new KeyValue(kv.getRowArray(), // row buffer + kv = new KeyValue(kv.getRowArray(), // row buffer kv.getRowOffset(), // row offset kv.getRowLength(), // row length newCfName, // CF buffer - 0, // CF offset - newCfName.length, // CF length + 0, // CF offset + newCfName.length, // CF length kv.getQualifierArray(), // qualifier buffer kv.getQualifierOffset(), // qualifier offset kv.getQualifierLength(), // qualifier length kv.getTimestamp(), // timestamp KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type - kv.getValueArray(), // value buffer + kv.getValueArray(), // value buffer kv.getValueOffset(), // value offset kv.getValueLength()); // value length } @@ -549,26 +549,26 @@ public class Import extends Configured implements Tool { /** *

Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells * the mapper how to rename column families. - * - *

Alternately, instead of calling this function, you could set the configuration key - * {@link #CF_RENAME_PROP} yourself. The value should look like + * + *

Alternately, instead of calling this function, you could set the configuration key + * {@link #CF_RENAME_PROP} yourself. The value should look like *

srcCf1:destCf1,srcCf2:destCf2,....
. This would have the same effect on * the mapper behavior. - * + * * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be * set * @param renameMap a mapping from source CF names to destination CF names */ - static public void configureCfRenaming(Configuration conf, + static public void configureCfRenaming(Configuration conf, Map renameMap) { StringBuilder sb = new StringBuilder(); for(Map.Entry entry: renameMap.entrySet()) { String sourceCf = entry.getKey(); String destCf = entry.getValue(); - if(sourceCf.contains(":") || sourceCf.contains(",") || + if(sourceCf.contains(":") || sourceCf.contains(",") || destCf.contains(":") || destCf.contains(",")) { - throw new IllegalArgumentException("Illegal character in CF names: " + throw new IllegalArgumentException("Illegal character in CF names: " + sourceCf + ", " + destCf); } @@ -632,10 +632,10 @@ public class Import extends Configured implements Tool { FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputKeyClass(KeyValueWritableComparable.class); job.setMapOutputValueClass(KeyValue.class); - job.getConfiguration().setClass("mapreduce.job.output.key.comparator.class", + job.getConfiguration().setClass("mapreduce.job.output.key.comparator.class", KeyValueWritableComparable.KeyValueWritableComparator.class, RawComparator.class); - Path partitionsPath = + Path partitionsPath = new Path(TotalOrderPartitioner.getPartitionFile(job.getConfiguration())); FileSystem fs = FileSystem.get(job.getConfiguration()); fs.deleteOnExit(partitionsPath); @@ -647,7 +647,7 @@ public class Import extends Configured implements Tool { } else if (hfileOutPath != null) { LOG.info("writing to hfiles for bulk load."); job.setMapperClass(KeyValueImporter.class); - try (Connection conn = ConnectionFactory.createConnection(conf); + try (Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(tableName); RegionLocator regionLocator = conn.getRegionLocator(tableName)){ job.setReducerClass(KeyValueSortReducer.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java similarity index 85% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java index 5c7ace29be0..997e5a892cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java @@ -34,9 +34,10 @@ import org.apache.hadoop.mapreduce.Reducer; * @see HFileOutputFormat2 */ @InterfaceAudience.Public -public class KeyValueSortReducer extends Reducer { - protected void reduce(ImmutableBytesWritable row, java.lang.Iterable kvs, - org.apache.hadoop.mapreduce.Reducer.Context context) +public class KeyValueSortReducer + extends Reducer { + protected void reduce(ImmutableBytesWritable row, Iterable kvs, + Reducer.Context context) throws java.io.IOException, InterruptedException { TreeSet map = new TreeSet<>(CellComparator.COMPARATOR); for (KeyValue kv: kvs) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java similarity index 98% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java index d7c7cc0d2bc..9f783f19c70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java @@ -100,7 +100,7 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { final private static int validateCompositeKey(byte[] keyBytes) { - int separatorIdx = Bytes.indexOf(keyBytes, HFileOutputFormat2.tableSeparator); + int separatorIdx = Bytes.indexOf(keyBytes, tableSeparator); // Either the separator was not found or a tablename wasn't present or a key wasn't present if (separatorIdx == -1) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java index a8e683711ec..f8fb6dc0fc5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java @@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Scan; /** - * Convert HBase tabular data from multiple scanners into a format that + * Convert HBase tabular data from multiple scanners into a format that * is consumable by Map/Reduce. * *

@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.Scan; * *

  * List<Scan> scans = new ArrayList<Scan>();
- * 
+ *
  * Scan scan1 = new Scan();
  * scan1.setStartRow(firstRow1);
  * scan1.setStopRow(lastRow1);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
index e18b3aa53f4..5d541a6bebd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.RegionSizeCalculator;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.JobContext;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java
similarity index 89%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java
index 0f07a58e35a..e7538a8bffc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java
@@ -33,13 +33,13 @@ import java.util.Map;
 
 /**
  * MultiTableSnapshotInputFormat generalizes
- * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}
+ * {@link TableSnapshotInputFormat}
  * allowing a MapReduce job to run over one or more table snapshots, with one or more scans
  * configured for each.
  * Internally, the input format delegates to
- * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}
+ * {@link TableSnapshotInputFormat}
  * and thus has the same performance advantages;
- * see {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for
+ * see {@link TableSnapshotInputFormat} for
  * more details.
  * Usage is similar to TableSnapshotInputFormat, with the following exception:
  * initMultiTableSnapshotMapperJob takes in a map
@@ -48,7 +48,7 @@ import java.util.Map;
  * the overall dataset for the job is defined by the concatenation of the regions and tables
  * included in each snapshot/scan
  * pair.
- * {@link org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil#initMultiTableSnapshotMapperJob
+ * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob
  * (java.util.Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, org.apache
  * .hadoop.fs.Path)}
  * can be used to configure the job.
@@ -69,11 +69,11 @@ import java.util.Map;
  * record readers are created as described in {@link org.apache.hadoop.hbase.mapreduce
  * .TableSnapshotInputFormat}
  * (one per region).
- * See {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on
+ * See {@link TableSnapshotInputFormat} for more notes on
  * permissioning; the
  * same caveats apply here.
  *
- * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
+ * @see TableSnapshotInputFormat
  * @see org.apache.hadoop.hbase.client.TableSnapshotScanner
  */
 @InterfaceAudience.Public
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
index d1dba1d4d0b..a5053796640 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java
@@ -253,7 +253,7 @@ public class MultithreadedTableMapper extends TableMapper {
         c.setAccessible(true);
         subcontext = (Context) c.newInstance(
           mapper,
-          outer.getConfiguration(), 
+          outer.getConfiguration(),
           outer.getTaskAttemptID(),
           new SubMapRecordReader(),
           new SubMapRecordWriter(),
@@ -272,7 +272,7 @@ public class MultithreadedTableMapper extends TableMapper {
             InputSplit.class);
           c.setAccessible(true);
           MapContext mc = (MapContext) c.newInstance(
-            outer.getConfiguration(), 
+            outer.getConfiguration(),
             outer.getTaskAttemptID(),
             new SubMapRecordReader(),
             new SubMapRecordWriter(),
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java
index 8997da9f8fb..d5faab55d82 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java
@@ -67,7 +67,7 @@ public class MutationSerialization implements Serialization {
     public void open(InputStream in) throws IOException {
       this.in = in;
     }
-    
+
   }
   private static class MutationSerializer implements Serializer {
     private OutputStream out;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java
similarity index 84%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java
index 99769b7d880..f14cd90666f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java
@@ -15,12 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hbase.util;
+package org.apache.hadoop.hbase.mapreduce;
 
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
@@ -33,18 +32,14 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Computes size of each region for given table and given column families.
  * The value is used by MapReduce for better scheduling.
  * */
-@InterfaceStability.Evolving
 @InterfaceAudience.Private
 public class RegionSizeCalculator {
 
@@ -58,20 +53,6 @@ public class RegionSizeCalculator {
   static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable";
   private static final long MEGABYTE = 1024L * 1024L;
 
-  /**
-   * Computes size of each region for table and given column families.
-   * 
-   * @deprecated Use {@link #RegionSizeCalculator(RegionLocator, Admin)} instead.
-   */
-  @Deprecated
-  public RegionSizeCalculator(Table table) throws IOException {
-    try (Connection conn = ConnectionFactory.createConnection(table.getConfiguration());
-        RegionLocator locator = conn.getRegionLocator(table.getName());
-        Admin admin = conn.getAdmin()) {
-      init(locator, admin);
-    }
-  }
-
   /**
    * Computes size of each region for table and given column families.
    * */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java
index 4ba1088bdec..01a919c73d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SimpleTotalOrderPartitioner.java
@@ -52,10 +52,10 @@ implements Configurable {
   public static final String START = "hbase.simpletotalorder.start";
   @Deprecated
   public static final String END = "hbase.simpletotalorder.end";
-  
+
   static final String START_BASE64 = "hbase.simpletotalorder.start.base64";
   static final String END_BASE64 = "hbase.simpletotalorder.end.base64";
-  
+
   private Configuration c;
   private byte [] startkey;
   private byte [] endkey;
@@ -65,21 +65,21 @@ implements Configurable {
   public static void setStartKey(Configuration conf, byte[] startKey) {
     conf.set(START_BASE64, Base64.encodeBytes(startKey));
   }
-  
+
   public static void setEndKey(Configuration conf, byte[] endKey) {
     conf.set(END_BASE64, Base64.encodeBytes(endKey));
   }
-  
+
   @SuppressWarnings("deprecation")
   static byte[] getStartKey(Configuration conf) {
     return getKeyFromConf(conf, START_BASE64, START);
   }
-  
+
   @SuppressWarnings("deprecation")
   static byte[] getEndKey(Configuration conf) {
     return getKeyFromConf(conf, END_BASE64, END);
   }
-  
+
   private static byte[] getKeyFromConf(Configuration conf,
       String base64Key, String deprecatedKey) {
     String encoded = conf.get(base64Key);
@@ -94,7 +94,7 @@ implements Configurable {
         " - please use static accessor methods instead.");
     return Bytes.toBytesBinary(oldStyleVal);
   }
-  
+
   @Override
   public int getPartition(final ImmutableBytesWritable key, final VALUE value,
       final int reduces) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index ce1928e6df7..fb38ebe7ac6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.RegionSizeCalculator;
 import org.apache.hadoop.hbase.util.Strings;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -133,7 +132,7 @@ extends InputFormat {
   /** The underlying {@link Connection} of the table. */
   private Connection connection;
 
-  
+
   /** The reverse DNS lookup cache mapping: IPAddress => HostName */
   private HashMap reverseDNSCacheMap = new HashMap<>();
 
@@ -248,9 +247,9 @@ extends InputFormat {
     try {
       RegionSizeCalculator sizeCalculator =
           new RegionSizeCalculator(getRegionLocator(), getAdmin());
-      
+
       TableName tableName = getTable().getName();
-  
+
       Pair keys = getStartEndKeys();
       if (keys == null || keys.getFirst() == null ||
           keys.getFirst().length == 0) {
@@ -544,7 +543,7 @@ extends InputFormat {
     }
     return regionLocator;
   }
-  
+
   /**
    * Allows subclasses to get the {@link Table}.
    */
@@ -569,8 +568,8 @@ extends InputFormat {
    * Allows subclasses to initialize the table information.
    *
    * @param connection  The Connection to the HBase cluster. MUST be unmanaged. We will close.
-   * @param tableName  The {@link TableName} of the table to process. 
-   * @throws IOException 
+   * @param tableName  The {@link TableName} of the table to process.
+   * @throws IOException
    */
   protected void initializeTable(Connection connection, TableName tableName) throws IOException {
     if (this.table != null || this.connection != null) {
@@ -611,7 +610,7 @@ extends InputFormat {
   protected void setTableRecordReader(TableRecordReader tableRecordReader) {
     this.tableRecordReader = tableRecordReader;
   }
-  
+
   /**
    * Handle subclass specific set up.
    * Each of the entry points used by the MapReduce framework,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 5986df8fc38..604ef009000 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -98,8 +98,8 @@ implements Configurable {
     private BufferedMutator mutator;
 
     /**
-     * @throws IOException 
-     * 
+     * @throws IOException
+     *
      */
     public TableRecordWriter() throws IOException {
       String tableName = conf.get(OUTPUT_TABLE);
@@ -147,7 +147,7 @@ implements Configurable {
 
   /**
    * Creates a new record writer.
-   * 
+   *
    * Be aware that the baseline javadoc gives the impression that there is a single
    * {@link RecordWriter} per job but in HBase, it is more natural if we give you a new
    * RecordWriter per call of this method. You must close the returned RecordWriter when done.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
similarity index 99%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
index 9a1c98e0185..5f85537a94d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe
 @InterfaceAudience.Public
 public class TableRecordReaderImpl {
   public static final String LOG_PER_ROW_COUNT
-    = "hbase.mapreduce.log.scanner.rowcount";
+      = "hbase.mapreduce.log.scanner.rowcount";
 
   private static final Log LOG = LogFactory.getLog(TableRecordReaderImpl.class);
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java
similarity index 100%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
similarity index 98%
rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
index 7e59c3bb91e..691f0c5e220 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
@@ -48,13 +48,12 @@ import java.util.List;
  * wals, etc) directly to provide maximum performance. The snapshot is not required to be
  * restored to the live cluster or cloned. This also allows to run the mapreduce job from an
  * online or offline hbase cluster. The snapshot files can be exported by using the
- * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, 
- * and this InputFormat can be used to run the mapreduce job directly over the snapshot files. 
+ * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster,
+ * and this InputFormat can be used to run the mapreduce job directly over the snapshot files.
  * The snapshot should not be deleted while there are jobs reading from snapshot files.
  * 

* Usage is similar to TableInputFormat, and - * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, - * boolean, Path)} + * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, boolean, Path)} * can be used to configure the job. *

{@code
  * Job job = new Job(conf);
@@ -67,7 +66,7 @@ import java.util.List;
  * 

* Internally, this input format restores the snapshot into the given tmp directory. Similar to * {@link TableInputFormat} an InputSplit is created per region. The region is opened for reading - * from each RecordReader. An internal RegionScanner is used to execute the + * from each RecordReader. An internal RegionScanner is used to execute the * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user. *

* HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java index bf11473df95..403051f68c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution.HostAndWeight; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.ClientSideRegionScanner; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Result; @@ -60,7 +59,6 @@ import java.util.UUID; * Hadoop MR API-agnostic implementation for mapreduce over table snapshots. */ @InterfaceAudience.Private -@InterfaceStability.Evolving public class TableSnapshotInputFormatImpl { // TODO: Snapshots files are owned in fs by the hbase user. There is no // easy way to delegate access. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index 84324e23e30..30cd46199a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -51,7 +51,7 @@ import org.apache.hadoop.util.StringUtils; @InterfaceAudience.Public public class TextSortReducer extends Reducer { - + /** Timestamp for all inserted rows */ private long ts; @@ -60,7 +60,7 @@ public class TextSortReducer extends /** Should skip bad lines */ private boolean skipBadLines; - + private Counter badLineCount; private ImportTsv.TsvParser parser; @@ -130,7 +130,7 @@ public class TextSortReducer extends skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); } - + @Override protected void reduce( ImmutableBytesWritable rowKey, @@ -156,7 +156,7 @@ public class TextSortReducer extends ts = parsed.getTimestamp(ts); cellVisibilityExpr = parsed.getCellVisibility(); ttl = parsed.getCellTTL(); - + // create tags for the parsed line List tags = new ArrayList<>(); if (cellVisibilityExpr != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java index a9d8e035806..3c507b3e026 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java @@ -75,7 +75,7 @@ extends Mapper /** List of cell tags */ private List tags; - + public long getTs() { return ts; } @@ -180,7 +180,7 @@ extends Mapper for (int i = 0; i < parsed.getColumnCount(); i++) { if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() - || i == parser.getCellTTLColumnIndex() || (skipEmptyColumns + || i == parser.getCellTTLColumnIndex() || (skipEmptyColumns && parsed.getColumnLength(i) == 0)) { continue; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java index 581f0d08f33..a3b095c416b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java @@ -118,11 +118,11 @@ extends Mapper if (skipBadLines) { incrementBadLineCount(1); return; - } + } throw new IOException(badLine); } catch (InterruptedException e) { e.printStackTrace(); Thread.currentThread().interrupt(); - } + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java index 199e1685ba6..b1f15bade5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java @@ -21,6 +21,6 @@ Provides HBase MapReduce Input/OutputFormats, a table indexing MapReduce job, and utility methods.

See HBase and MapReduce -in the HBase Reference Guide for mapreduce over hbase documentation. +in the HBase Reference Guide for mapreduce over hbase documentation. */ package org.apache.hadoop.hbase.mapreduce; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java similarity index 99% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 8bb266e15e2..acf6ff8f733 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -45,8 +45,8 @@ import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; -import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; +import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableSplit; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; @@ -514,7 +514,7 @@ public class VerifyReplication extends Configured implements Tool { versions = Integer.parseInt(cmd.substring(versionsArgKey.length())); continue; } - + final String batchArgKey = "--batch="; if (cmd.startsWith(batchArgKey)) { batch = Integer.parseInt(cmd.substring(batchArgKey.length())); @@ -683,7 +683,7 @@ public class VerifyReplication extends Configured implements Tool { Job job = createSubmittableJob(conf, args); if (job != null) { return job.waitForCompletion(true) ? 0 : 1; - } + } return 1; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java rename to hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index eebb0f3967a..23a70a91300 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -85,6 +85,7 @@ import org.apache.hadoop.hbase.io.hfile.RandomDistribution; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.CompactingMemStore; +import org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem; import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration; import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.util.*; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java index ab6a86d2616..d085c21a394 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java @@ -36,6 +36,6 @@ public class TestDriver { ProgramDriver programDriverMock = mock(ProgramDriver.class); Driver.setProgramDriver(programDriverMock); Driver.main(new String[]{}); - verify(programDriverMock).driver(Mockito.any(String[].class)); + verify(programDriverMock).driver(Mockito.any(String[].class)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java similarity index 98% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java index 36e45e4d182..7131cf91ddc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java @@ -65,7 +65,7 @@ public class TestGroupingTableMap { cfg.set(GroupingTableMap.GROUP_COLUMNS, "familyA:qualifierA familyB:qualifierB"); JobConf jobConf = new JobConf(cfg); gTableMap.configure(jobConf); - + byte[] row = {}; List keyValues = ImmutableList.of( new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), @@ -79,7 +79,7 @@ public class TestGroupingTableMap { verifyZeroInteractions(outputCollectorMock); } finally { if (gTableMap != null) - gTableMap.close(); + gTableMap.close(); } } @@ -95,7 +95,7 @@ public class TestGroupingTableMap { cfg.set(GroupingTableMap.GROUP_COLUMNS, "familyA:qualifierA familyB:qualifierB"); JobConf jobConf = new JobConf(cfg); gTableMap.configure(jobConf); - + byte[] row = {}; List keyValues = ImmutableList.of( new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), @@ -118,7 +118,7 @@ public class TestGroupingTableMap { @Test @SuppressWarnings({ "deprecation" }) public void shouldCreateNewKey() throws Exception { - GroupingTableMap gTableMap = null; + GroupingTableMap gTableMap = null; try { Result result = mock(Result.class); Reporter reporter = mock(Reporter.class); @@ -128,7 +128,7 @@ public class TestGroupingTableMap { cfg.set(GroupingTableMap.GROUP_COLUMNS, "familyA:qualifierA familyB:qualifierB"); JobConf jobConf = new JobConf(cfg); gTableMap.configure(jobConf); - + final byte[] firstPartKeyValue = Bytes.toBytes("34879512738945"); final byte[] secondPartKeyValue = Bytes.toBytes("35245142671437"); byte[] row = {}; @@ -136,7 +136,7 @@ public class TestGroupingTableMap { new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), firstPartKeyValue), new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), secondPartKeyValue)); when(result.listCells()).thenReturn(cells); - + final AtomicBoolean outputCollected = new AtomicBoolean(); OutputCollector outputCollector = new OutputCollector() { @@ -148,11 +148,11 @@ public class TestGroupingTableMap { outputCollected.set(true); } }; - + gTableMap.map(null, result, outputCollector, reporter); verify(result).listCells(); Assert.assertTrue("Output not received", outputCollected.get()); - + final byte[] firstPartValue = Bytes.toBytes("238947928"); final byte[] secondPartValue = Bytes.toBytes("4678456942345"); byte[][] data = { firstPartValue, secondPartValue }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java index 3fad1fe5fe5..e222d0b95f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java @@ -49,11 +49,11 @@ public class TestIdentityTableMap { ImmutableBytesWritable bytesWritableMock = mock(ImmutableBytesWritable.class); OutputCollector outputCollectorMock = mock(OutputCollector.class); - + for (int i = 0; i < recordNumber; i++) identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, reporterMock); - + verify(outputCollectorMock, times(recordNumber)).collect( Mockito.any(ImmutableBytesWritable.class), Mockito.any(Result.class)); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java index 4b93843484a..f39a7f59abd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java @@ -71,7 +71,6 @@ import org.mockito.stubbing.Answer; /** * This tests the TableInputFormat and its recovery semantics - * */ @Category({MapReduceTests.class, LargeTests.class}) public class TestTableInputFormat { @@ -103,7 +102,7 @@ public class TestTableInputFormat { /** * Setup a table with two rows and values. - * + * * @param tableName * @return * @throws IOException @@ -114,7 +113,7 @@ public class TestTableInputFormat { /** * Setup a table with two rows and values per column family. - * + * * @param tableName * @return * @throws IOException @@ -136,7 +135,7 @@ public class TestTableInputFormat { /** * Verify that the result and key have expected values. - * + * * @param r * @param key * @param expectedKey @@ -155,12 +154,12 @@ public class TestTableInputFormat { /** * Create table data and run tests on specified htable using the * o.a.h.hbase.mapred API. - * + * * @param table * @throws IOException */ static void runTestMapred(Table table) throws IOException { - org.apache.hadoop.hbase.mapred.TableRecordReader trr = + org.apache.hadoop.hbase.mapred.TableRecordReader trr = new org.apache.hadoop.hbase.mapred.TableRecordReader(); trr.setStartRow("aaa".getBytes()); trr.setEndRow("zzz".getBytes()); @@ -186,7 +185,7 @@ public class TestTableInputFormat { /** * Create a table that IOE's on first scanner next call - * + * * @throws IOException */ static Table createIOEScannerTable(byte[] name, final int failCnt) @@ -221,7 +220,7 @@ public class TestTableInputFormat { /** * Create a table that throws a DoNoRetryIOException on first scanner next * call - * + * * @throws IOException */ static Table createDNRIOEScannerTable(byte[] name, final int failCnt) @@ -258,7 +257,7 @@ public class TestTableInputFormat { /** * Run test assuming no errors using mapred api. - * + * * @throws IOException */ @Test @@ -269,7 +268,7 @@ public class TestTableInputFormat { /** * Run test assuming Scanner IOException failure using mapred api, - * + * * @throws IOException */ @Test @@ -280,7 +279,7 @@ public class TestTableInputFormat { /** * Run test assuming Scanner IOException failure using mapred api, - * + * * @throws IOException */ @Test(expected = IOException.class) @@ -291,7 +290,7 @@ public class TestTableInputFormat { /** * Run test assuming NotServingRegionException using mapred api. - * + * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test @@ -302,7 +301,7 @@ public class TestTableInputFormat { /** * Run test assuming NotServingRegionException using mapred api. - * + * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java index c689c834f6c..1c72f2a1832 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java @@ -72,7 +72,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa } static class TestTableSnapshotMapper extends MapReduceBase - implements TableMap { + implements TableMap { @Override public void map(ImmutableBytesWritable key, Result value, OutputCollector collector, Reporter reporter) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/HadoopSecurityEnabledUserProviderForTesting.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HadoopSecurityEnabledUserProviderForTesting.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/HadoopSecurityEnabledUserProviderForTesting.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/HadoopSecurityEnabledUserProviderForTesting.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java index efacca91ba0..3203f0c8989 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java @@ -87,10 +87,10 @@ public class NMapInputFormat extends InputFormat { public void write(DataOutput out) throws IOException { } } - + private static class SingleRecordReader extends RecordReader { - + private final K key; private final V value; boolean providedKey = false; @@ -129,6 +129,6 @@ public class NMapInputFormat extends InputFormat { providedKey = true; return true; } - + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java index b7fdb47ccc5..7e366028adf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java @@ -4,9 +4,9 @@ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with the License. You may * obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions and diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 87522b6e069..c6a87613114 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -123,9 +124,9 @@ public class TestHFileOutputFormat2 { withTimeout(this.getClass()).withLookingForStuckThread(true).build(); private final static int ROWSPERSPLIT = 1024; - private static final byte[][] FAMILIES - = { Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-A")) - , Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-B"))}; + public static final byte[] FAMILY_NAME = TestHRegionFileSystem.FAMILY_NAME; + private static final byte[][] FAMILIES = { + Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B"))}; private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", "TestTable3").map(TableName::valueOf).toArray(TableName[]::new); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index 2867f130bf1..c0debb43431 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -4,9 +4,9 @@ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with the License. You may * obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions and diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java similarity index 98% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index 1f4efcd8f3c..87e7852b635 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -51,24 +51,24 @@ import org.junit.rules.TestName; */ @Category(LargeTests.class) public class TestHashTable { - + private static final Log LOG = LogFactory.getLog(TestHashTable.class); - + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @Rule public TestName name = new TestName(); - + @BeforeClass public static void beforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); } - + @AfterClass public static void afterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - + @Test public void testHashTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); @@ -76,16 +76,16 @@ public class TestHashTable { final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); final byte[] column3 = Bytes.toBytes("c3"); - + int numRows = 100; int numRegions = 10; int numHashFiles = 3; - + byte[][] splitRows = new byte[numRegions-1][]; for (int i = 1; i < numRegions; i++) { splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); } - + long timestamp = 1430764183454L; // put rows into the first table Table t1 = TEST_UTIL.createTable(tableName, family, splitRows); @@ -97,22 +97,22 @@ public class TestHashTable { t1.put(p); } t1.close(); - + HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration()); - + Path testDir = TEST_UTIL.getDataTestDirOnTestFS(tableName.getNameAsString()); - + long batchSize = 300; - int code = hashTable.run(new String[] { + int code = hashTable.run(new String[] { "--batchsize=" + batchSize, "--numhashfiles=" + numHashFiles, "--scanbatch=2", tableName.getNameAsString(), testDir.toString()}); assertEquals("test job failed", 0, code); - + FileSystem fs = TEST_UTIL.getTestFileSystem(); - + HashTable.TableHash tableHash = HashTable.TableHash.read(fs.getConf(), testDir); assertEquals(tableName.getNameAsString(), tableHash.tableName); assertEquals(batchSize, tableHash.batchSize); @@ -121,7 +121,7 @@ public class TestHashTable { for (ImmutableBytesWritable bytes : tableHash.partitions) { LOG.debug("partition: " + Bytes.toInt(bytes.get())); } - + ImmutableMap expectedHashes = ImmutableMap.builder() .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) @@ -145,12 +145,12 @@ public class TestHashTable { .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) .build(); - + Map actualHashes = new HashMap<>(); Path dataDir = new Path(testDir, HashTable.HASH_DATA_DIR); for (int i = 0; i < numHashFiles; i++) { Path hashPath = new Path(dataDir, HashTable.TableHash.getDataFileName(i)); - + MapFile.Reader reader = new MapFile.Reader(hashPath, fs.getConf()); ImmutableBytesWritable key = new ImmutableBytesWritable(); ImmutableBytesWritable hash = new ImmutableBytesWritable(); @@ -158,7 +158,7 @@ public class TestHashTable { String keyString = Bytes.toHex(key.get(), key.getOffset(), key.getLength()); LOG.debug("Key: " + (keyString.isEmpty() ? "-1" : Integer.parseInt(keyString, 16)) + " Hash: " + Bytes.toHex(hash.get(), hash.getOffset(), hash.getLength())); - + int intKey = -1; if (key.getLength() > 0) { intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); @@ -170,22 +170,22 @@ public class TestHashTable { } reader.close(); } - + FileStatus[] files = fs.listStatus(testDir); for (FileStatus file : files) { LOG.debug("Output file: " + file.getPath()); } - + files = fs.listStatus(dataDir); for (FileStatus file : files) { LOG.debug("Data file: " + file.getPath()); } - + if (!expectedHashes.equals(actualHashes)) { LOG.error("Diff: " + Maps.difference(expectedHashes, actualHashes)); } Assert.assertEquals(expectedHashes, actualHashes); - + TEST_UTIL.deleteTable(tableName); TEST_UTIL.cleanupDataTestDirOnTestFS(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index dc5981790fd..91d2696fbe7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -34,7 +34,6 @@ import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.NavigableMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 6d9b05b3937..7d6d74f3417 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -151,7 +151,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable { * the ImportTsv Tool instance so that other tests can inspect it * for further validation as necessary. This method is static to insure * non-reliance on instance's util/conf facilities. - * + * * @param args * Any arguments to pass BEFORE inputFile path is appended. * @param dataAvailable @@ -193,7 +193,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable { /** * Confirm ImportTsv via data in online table. - * + * * @param dataAvailable */ private static void validateTable(Configuration conf, TableName tableName, String family, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index efcf91eb238..7b6e6844199 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -363,7 +363,7 @@ public class TestImportTsv implements Configurable { doMROnTableTest(util, tn, FAMILY, data, args, 1, 4); util.deleteTable(tn); } - + @Test public void testSkipEmptyColumns() throws Exception { Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java similarity index 97% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java index 32f511b236c..530d9c57e3c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java @@ -56,7 +56,7 @@ public class TestMultiTableSnapshotInputFormat extends MultiTableInputFormatTest for (String tableName : TABLES) { SnapshotTestingUtils .createSnapshotAndValidate(TEST_UTIL.getAdmin(), TableName.valueOf(tableName), - ImmutableList.of(MultiTableInputFormatTestBase.INPUT_FAMILY), null, + ImmutableList.of(INPUT_FAMILY), null, snapshotNameForTable(tableName), FSUtils.getRootDir(TEST_UTIL.getConfiguration()), TEST_UTIL.getTestFileSystem(), true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java similarity index 98% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java index 51dc2380e5d..301cfef68a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.util; +package org.apache.hadoop.hbase.mapreduce; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFiles.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSecureLoadIncrementalHFilesSplitRecovery.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java index 0f41f336777..5629cb4c9e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java @@ -37,13 +37,13 @@ import org.junit.Test; public class TestSimpleTotalOrderPartitioner { protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); Configuration conf = TEST_UTIL.getConfiguration(); - + @Test public void testSplit() throws Exception { String start = "a"; String end = "{"; SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); - + this.conf.set(SimpleTotalOrderPartitioner.START, start); this.conf.set(SimpleTotalOrderPartitioner.END, end); p.setConf(this.conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java similarity index 97% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index 79b2cf05c33..9a0c1604249 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -59,22 +59,22 @@ public class TestSyncTable { @Rule public final TestRule timeout = CategoryBasedTimeout.builder(). withTimeout(this.getClass()).withLookingForStuckThread(true).build(); private static final Log LOG = LogFactory.getLog(TestSyncTable.class); - + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @Rule public TestName name = new TestName(); - + @BeforeClass public static void beforeClass() throws Exception { TEST_UTIL.startMiniCluster(3); } - + @AfterClass public static void afterClass() throws Exception { TEST_UTIL.shutdownMiniCluster(); } - + private static byte[][] generateSplits(int numRows, int numRegions) { byte[][] splitRows = new byte[numRegions-1][]; for (int i = 1; i < numRegions; i++) { @@ -82,25 +82,25 @@ public class TestSyncTable { } return splitRows; } - + @Test public void testSyncTable() throws Exception { final TableName sourceTableName = TableName.valueOf(name.getMethodName() + "_source"); final TableName targetTableName = TableName.valueOf(name.getMethodName() + "_target"); Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTable"); - + writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir); assertEqualTables(90, sourceTableName, targetTableName); - + assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue()); assertEquals(10, syncCounters.findCounter(Counter.TARGETMISSINGROWS).getValue()); assertEquals(50, syncCounters.findCounter(Counter.SOURCEMISSINGCELLS).getValue()); assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue()); assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue()); - + TEST_UTIL.deleteTable(sourceTableName); TEST_UTIL.deleteTable(targetTableName); TEST_UTIL.cleanupDataTestDirOnTestFS(); @@ -110,26 +110,26 @@ public class TestSyncTable { TableName targetTableName) throws Exception { Table sourceTable = TEST_UTIL.getConnection().getTable(sourceTableName); Table targetTable = TEST_UTIL.getConnection().getTable(targetTableName); - + ResultScanner sourceScanner = sourceTable.getScanner(new Scan()); ResultScanner targetScanner = targetTable.getScanner(new Scan()); - + for (int i = 0; i < expectedRows; i++) { Result sourceRow = sourceScanner.next(); Result targetRow = targetScanner.next(); - + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + " cells:" + sourceRow); LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + " cells:" + targetRow); - + if (sourceRow == null) { Assert.fail("Expected " + expectedRows - + " source rows but only found " + i); + + " source rows but only found " + i); } if (targetRow == null) { Assert.fail("Expected " + expectedRows - + " target rows but only found " + i); + + " target rows but only found " + i); } Cell[] sourceCells = sourceRow.rawCells(); Cell[] targetCells = targetRow.rawCells(); @@ -185,13 +185,13 @@ public class TestSyncTable { private Counters syncTables(TableName sourceTableName, TableName targetTableName, Path testDir) throws Exception { SyncTable syncTable = new SyncTable(TEST_UTIL.getConfiguration()); - int code = syncTable.run(new String[] { + int code = syncTable.run(new String[] { testDir.toString(), sourceTableName.getNameAsString(), targetTableName.getNameAsString() }); assertEquals("sync table job failed", 0, code); - + LOG.info("Sync tables completed"); return syncTable.counters; } @@ -202,16 +202,16 @@ public class TestSyncTable { long batchSize = 100; // should be 2 batches per region int scanBatch = 1; HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration()); - int code = hashTable.run(new String[] { + int code = hashTable.run(new String[] { "--batchsize=" + batchSize, "--numhashfiles=" + numHashFiles, "--scanbatch=" + scanBatch, sourceTableName.getNameAsString(), testDir.toString()}); assertEquals("hash table job failed", 0, code); - + FileSystem fs = TEST_UTIL.getTestFileSystem(); - + HashTable.TableHash tableHash = HashTable.TableHash.read(fs.getConf(), testDir); assertEquals(sourceTableName.getNameAsString(), tableHash.tableName); assertEquals(batchSize, tableHash.batchSize); @@ -229,11 +229,11 @@ public class TestSyncTable { final byte[] value1 = Bytes.toBytes("val1"); final byte[] value2 = Bytes.toBytes("val2"); final byte[] value3 = Bytes.toBytes("val3"); - + int numRows = 100; int sourceRegions = 10; int targetRegions = 6; - + Table sourceTable = TEST_UTIL.createTable(sourceTableName, family, generateSplits(numRows, sourceRegions)); @@ -249,7 +249,7 @@ public class TestSyncTable { sourcePut.addColumn(family, column1, timestamp, value1); sourcePut.addColumn(family, column2, timestamp, value2); sourceTable.put(sourcePut); - + Put targetPut = new Put(Bytes.toBytes(rowIndex)); targetPut.addColumn(family, column1, timestamp, value1); targetPut.addColumn(family, column2, timestamp, value2); @@ -324,16 +324,16 @@ public class TestSyncTable { sourcePut.addColumn(family, column1, timestamp, value1); sourcePut.addColumn(family, column2, timestamp, value2); sourceTable.put(sourcePut); - + Put targetPut = new Put(Bytes.toBytes(rowIndex)); targetPut.addColumn(family, column1, timestamp, value3); targetPut.addColumn(family, column2, timestamp, value3); targetTable.put(targetPut); } - + sourceTable.close(); targetTable.close(); } - + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index 469351983ae..b4c6ab99589 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -223,7 +223,7 @@ public class TestTableInputFormat { } /** - * Create a table that throws a NotServingRegionException on first scanner + * Create a table that throws a NotServingRegionException on first scanner * next call * * @throws IOException @@ -243,7 +243,7 @@ public class TestTableInputFormat { doReturn("bogus".getBytes()).when(scan).getStartRow(); // avoid npe ResultScanner scanner = mock(ResultScanner.class); - invocation.callRealMethod(); // simulate NotServingRegionException + invocation.callRealMethod(); // simulate NotServingRegionException doThrow( new NotServingRegionException("Injected simulated TimeoutException")) .when(scanner).next(); @@ -275,7 +275,7 @@ public class TestTableInputFormat { /** * Run test assuming Scanner IOException failure using newer mapreduce api - * + * * @throws IOException * @throws InterruptedException */ @@ -288,7 +288,7 @@ public class TestTableInputFormat { /** * Run test assuming Scanner IOException failure using newer mapreduce api - * + * * @throws IOException * @throws InterruptedException */ @@ -301,7 +301,7 @@ public class TestTableInputFormat { /** * Run test assuming NotServingRegionException using newer mapreduce api - * + * * @throws InterruptedException * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @@ -314,7 +314,7 @@ public class TestTableInputFormat { /** * Run test assuming NotServingRegionException using newer mapreduce api - * + * * @throws InterruptedException * @throws org.apache.hadoop.hbase.NotServingRegionException */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan1.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan1.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan1.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan1.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan2.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan2.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan2.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index 0f493337698..13b6a965f1e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -74,7 +74,7 @@ public abstract class TestTableInputFormatScanBase { @BeforeClass public static void setUpBeforeClass() throws Exception { // test intermittently fails under hadoop2 (2.0.2-alpha) if shortcircuit-read (scr) is on. - // this turns it off for this test. TODO: Figure out why scr breaks recovery. + // this turns it off for this test. TODO: Figure out why scr breaks recovery. System.setProperty("hbase.tests.use.shortcircuit.reads", "false"); // switch TIF to log at DEBUG level @@ -172,7 +172,7 @@ public abstract class TestTableInputFormatScanBase { /** * Tests an MR Scan initialized from properties set in the Configuration. - * + * * @throws IOException * @throws ClassNotFoundException * @throws InterruptedException diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java index 303a144d16f..506bf4f4d5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java @@ -4,9 +4,9 @@ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with the License. You may * obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions and @@ -44,7 +44,7 @@ public class TestTableMapReduceUtil { public void testInitTableMapperJob1() throws Exception { Configuration configuration = new Configuration(); Job job = new Job(configuration, "tableName"); - // test + // test TableMapReduceUtil.initTableMapperJob("Table", new Scan(), Import.Importer.class, Text.class, Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java similarity index 96% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index 5e630822b74..028df983357 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -32,10 +32,10 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.TestTableSnapshotScanner; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -233,17 +233,6 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa } } - public static void blockUntilSplitFinished(HBaseTestingUtility util, TableName tableName, - int expectedRegionSize) throws Exception { - for (int i = 0; i < 100; i++) { - List hRegionInfoList = util.getAdmin().getTableRegions(tableName); - if (hRegionInfoList.size() >= expectedRegionSize) { - break; - } - Thread.sleep(1000); - } - } - @Test public void testNoDuplicateResultsWhenSplitting() throws Exception { setupCluster(); @@ -263,7 +252,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa // split to 2 regions admin.split(tableName, Bytes.toBytes("eee")); - blockUntilSplitFinished(UTIL, tableName, 2); + TestTableSnapshotScanner.blockUntilSplitFinished(UTIL, tableName, 2); Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration()); FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java index 9d8b8f0b0e3..a9da98b531d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.ParsedLine; import org.apache.hadoop.hbase.util.Bytes; /** - * * Just shows a simple example of how the attributes can be extracted and added * to the puts */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java similarity index 99% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index e1cb8baf21c..69c4c7cabb4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -688,7 +688,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { htable1.put(put); put.addColumn(famName, qualifierName, ts + 3, Bytes.toBytes("v3")); htable1.put(put); - + Scan scan = new Scan(); scan.setMaxVersions(100); ResultScanner scanner1 = htable1.getScanner(scan); @@ -697,7 +697,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { assertEquals(1, res1.length); assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size()); - + for (int i = 0; i < NB_RETRIES; i++) { scan = new Scan(); scan.setMaxVersions(100); @@ -720,14 +720,14 @@ public class TestReplicationSmallTests extends TestReplicationBase { fail("Waited too much time for normal batch replication"); } } - + try { - // Disabling replication and modifying the particular version of the cell to validate the feature. + // Disabling replication and modifying the particular version of the cell to validate the feature. admin.disablePeer(PEER_ID); Put put2 = new Put(Bytes.toBytes("r1")); put2.addColumn(famName, qualifierName, ts +2, Bytes.toBytes("v99")); htable2.put(put2); - + scan = new Scan(); scan.setMaxVersions(100); scanner1 = htable2.getScanner(scan); @@ -735,7 +735,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { scanner1.close(); assertEquals(1, res1.length); assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size()); - + String[] args = new String[] {"--versions=100", PEER_ID, tableName.getNameAsString()}; runVerifyReplication(args, 0, 1); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotNoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotNoCluster.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotNoCluster.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotNoCluster.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java similarity index 100% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java similarity index 93% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java rename to hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index ad832e36410..6b5cbe2dd42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -19,12 +19,10 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.Constructor; -import java.net.InetAddress; import java.security.SecureRandom; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Locale; import java.util.Properties; import java.util.Random; import java.util.concurrent.atomic.AtomicReference; @@ -53,13 +51,12 @@ import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.hadoop.hbase.security.HBaseKerberosUtils; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessControlClient; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; import org.apache.hadoop.hbase.util.test.LoadTestDataGeneratorWithACL; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ToolRunner; /** @@ -82,12 +79,6 @@ public class LoadTestTool extends AbstractHBaseTool { /** Table name to use of not overridden on the command line */ protected static final String DEFAULT_TABLE_NAME = "cluster_test"; - /** Column family used by the test */ - public static byte[] DEFAULT_COLUMN_FAMILY = Bytes.toBytes("test_cf"); - - /** Column families used by the test */ - public static final byte[][] DEFAULT_COLUMN_FAMILIES = { DEFAULT_COLUMN_FAMILY }; - /** The default data size if not specified */ protected static final int DEFAULT_DATA_SIZE = 64; @@ -114,19 +105,11 @@ public class LoadTestTool extends AbstractHBaseTool { protected static final String OPT_USAGE_COMPRESSION = "Compression type, " + "one of " + Arrays.toString(Compression.Algorithm.values()); - public static final String OPT_DATA_BLOCK_ENCODING_USAGE = - "Encoding algorithm (e.g. prefix " - + "compression) to use for data blocks in the test column family, " - + "one of " + Arrays.toString(DataBlockEncoding.values()) + "."; - public static final String OPT_BLOOM = "bloom"; public static final String OPT_COMPRESSION = "compression"; public static final String OPT_DEFERRED_LOG_FLUSH = "deferredlogflush"; public static final String OPT_DEFERRED_LOG_FLUSH_USAGE = "Enable deferred log flush."; - public static final String OPT_DATA_BLOCK_ENCODING = - HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase(Locale.ROOT); - public static final String OPT_INMEMORY = "in_memory"; public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; @@ -335,7 +318,7 @@ public class LoadTestTool extends AbstractHBaseTool { addOptNoArg(OPT_INIT_ONLY, "Initialize the test table only, don't do any loading"); addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM); addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION); - addOptWithArg(OPT_DATA_BLOCK_ENCODING, OPT_DATA_BLOCK_ENCODING_USAGE); + addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " + "to tolerate before terminating all reader threads. The default is " + MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); @@ -393,7 +376,7 @@ public class LoadTestTool extends AbstractHBaseTool { families[i] = Bytes.toBytes(list[i]); } } else { - families = DEFAULT_COLUMN_FAMILIES; + families = HFileTestUtil.DEFAULT_COLUMN_FAMILIES; } isWrite = cmd.hasOption(OPT_WRITE); @@ -526,7 +509,7 @@ public class LoadTestTool extends AbstractHBaseTool { } private void parseColumnFamilyOptions(CommandLine cmd) { - String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING); + String dataBlockEncodingStr = cmd.getOptionValue(HFileTestUtil.OPT_DATA_BLOCK_ENCODING); dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null : DataBlockEncoding.valueOf(dataBlockEncodingStr); @@ -604,7 +587,7 @@ public class LoadTestTool extends AbstractHBaseTool { LOG.error(exp); return EXIT_FAILURE; } - userOwner = User.create(loginAndReturnUGI(conf, superUser)); + userOwner = User.create(HBaseKerberosUtils.loginAndReturnUGI(conf, superUser)); } else { superUser = clazzAndArgs[1]; userNames = clazzAndArgs[2]; @@ -642,7 +625,7 @@ public class LoadTestTool extends AbstractHBaseTool { User user = null; for (String userStr : users) { if (User.isHBaseSecurityEnabled(conf)) { - user = User.create(loginAndReturnUGI(conf, userStr)); + user = User.create(HBaseKerberosUtils.loginAndReturnUGI(conf, userStr)); } else { user = User.createUserForTesting(conf, userStr, new String[0]); } @@ -806,27 +789,6 @@ public class LoadTestTool extends AbstractHBaseTool { } } - public static byte[] generateData(final Random r, int length) { - byte [] b = new byte [length]; - int i = 0; - - for(i = 0; i < (length-8); i += 8) { - b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; - } - - byte a = (byte) (65 + r.nextInt(26)); - for(; i < length; i++) { - b[i] = a; - } - return b; - } public static void main(String[] args) { new LoadTestTool().doStaticMain(args); } @@ -950,19 +912,4 @@ public class LoadTestTool extends AbstractHBaseTool { LOG.debug("Added authentication properties to config successfully."); } - public static UserGroupInformation loginAndReturnUGI(Configuration conf, String username) - throws IOException { - String hostname = InetAddress.getLocalHost().getHostName(); - String keyTabFileConfKey = "hbase." + username + ".keytab.file"; - String keyTabFileLocation = conf.get(keyTabFileConfKey); - String principalConfKey = "hbase." + username + ".kerberos.principal"; - String principal = SecurityUtil.getServerPrincipal(conf.get(principalConfKey), hostname); - if (keyTabFileLocation == null || principal == null) { - LOG.warn("Principal or key tab file null for : " + principalConfKey + ", " - + keyTabFileConfKey); - } - UserGroupInformation ugi = - UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation); - return ugi; - } } diff --git a/hbase-mapreduce/src/test/resources/hbase-site.xml b/hbase-mapreduce/src/test/resources/hbase-site.xml new file mode 100644 index 00000000000..64a19644358 --- /dev/null +++ b/hbase-mapreduce/src/test/resources/hbase-site.xml @@ -0,0 +1,161 @@ + + + + + + hbase.regionserver.msginterval + 1000 + Interval between messages from the RegionServer to HMaster + in milliseconds. Default is 15. Set this value low if you want unit + tests to be responsive. + + + + hbase.defaults.for.version.skip + true + + + hbase.server.thread.wakefrequency + 1000 + Time to sleep in between searches for work (in milliseconds). + Used as sleep interval by service threads such as hbase:meta scanner and log roller. + + + + hbase.master.event.waiting.time + 50 + Time to sleep between checks to see if a table event took place. + + + + hbase.regionserver.handler.count + 5 + + + hbase.regionserver.metahandler.count + 6 + + + hbase.ipc.server.read.threadpool.size + 3 + + + hbase.master.info.port + -1 + The port for the hbase master web UI + Set to -1 if you do not want the info server to run. + + + + hbase.master.port + 0 + Always have masters and regionservers come up on port '0' so we don't clash over + default ports. + + + + hbase.regionserver.port + 0 + Always have masters and regionservers come up on port '0' so we don't clash over + default ports. + + + + hbase.ipc.client.fallback-to-simple-auth-allowed + true + + + + hbase.regionserver.info.port + -1 + The port for the hbase regionserver web UI + Set to -1 if you do not want the info server to run. + + + + hbase.regionserver.info.port.auto + true + Info server auto port bind. Enables automatic port + search if hbase.regionserver.info.port is already in use. + Enabled for testing to run multiple tests on one machine. + + + + hbase.regionserver.safemode + false + + Turn on/off safe mode in region server. Always on for production, always off + for tests. + + + + hbase.hregion.max.filesize + 67108864 + + Maximum desired file size for an HRegion. If filesize exceeds + value + (value / 2), the HRegion is split in two. Default: 256M. + + Keep the maximum filesize small so we split more often in tests. + + + + hadoop.log.dir + ${user.dir}/../logs + + + hbase.zookeeper.property.clientPort + 21818 + Property from ZooKeeper's config zoo.cfg. + The port at which the clients will connect. + + + + hbase.defaults.for.version.skip + true + + Set to true to skip the 'hbase.defaults.for.version'. + Setting this to true can be useful in contexts other than + the other side of a maven generation; i.e. running in an + ide. You'll want to set this boolean to true to avoid + seeing the RuntimeException complaint: "hbase-default.xml file + seems to be for and old version of HBase (@@@VERSION@@@), this + version is X.X.X-SNAPSHOT" + + + + hbase.table.sanity.checks + false + Skip sanity checks in tests + + + + hbase.procedure.fail.on.corruption + true + + Enable replay sanity checks on procedure tests. + + + + hbase.hconnection.threads.keepalivetime + 3 + + diff --git a/hbase-mapreduce/src/test/resources/hbase-site2.xml b/hbase-mapreduce/src/test/resources/hbase-site2.xml new file mode 100644 index 00000000000..8bef31a4d77 --- /dev/null +++ b/hbase-mapreduce/src/test/resources/hbase-site2.xml @@ -0,0 +1,146 @@ + + + + + + hbase.custom.config + 1000 + + + hbase.regionserver.msginterval + 1000 + Interval between messages from the RegionServer to HMaster + in milliseconds. Default is 15. Set this value low if you want unit + tests to be responsive. + + + + hbase.defaults.for.version.skip + true + + + hbase.server.thread.wakefrequency + 1000 + Time to sleep in between searches for work (in milliseconds). + Used as sleep interval by service threads such as hbase:meta scanner and log roller. + + + + hbase.master.event.waiting.time + 50 + Time to sleep between checks to see if a table event took place. + + + + hbase.regionserver.handler.count + 5 + + + hbase.master.info.port + -1 + The port for the hbase master web UI + Set to -1 if you do not want the info server to run. + + + + hbase.master.port + 0 + Always have masters and regionservers come up on port '0' so we don't clash over + default ports. + + + + hbase.regionserver.port + 0 + Always have masters and regionservers come up on port '0' so we don't clash over + default ports. + + + + hbase.ipc.client.fallback-to-simple-auth-allowed + true + + + + hbase.regionserver.info.port + -1 + The port for the hbase regionserver web UI + Set to -1 if you do not want the info server to run. + + + + hbase.regionserver.info.port.auto + true + Info server auto port bind. Enables automatic port + search if hbase.regionserver.info.port is already in use. + Enabled for testing to run multiple tests on one machine. + + + + hbase.regionserver.safemode + false + + Turn on/off safe mode in region server. Always on for production, always off + for tests. + + + + hbase.hregion.max.filesize + 67108864 + + Maximum desired file size for an HRegion. If filesize exceeds + value + (value / 2), the HRegion is split in two. Default: 256M. + + Keep the maximum filesize small so we split more often in tests. + + + + hadoop.log.dir + ${user.dir}/../logs + + + hbase.zookeeper.property.clientPort + 21818 + Property from ZooKeeper's config zoo.cfg. + The port at which the clients will connect. + + + + hbase.defaults.for.version.skip + true + + Set to true to skip the 'hbase.defaults.for.version'. + Setting this to true can be useful in contexts other than + the other side of a maven generation; i.e. running in an + ide. You'll want to set this boolean to true to avoid + seeing the RuntimeException complaint: "hbase-default.xml file + seems to be for and old version of HBase (@@@VERSION@@@), this + version is X.X.X-SNAPSHOT" + + + + hbase.table.sanity.checks + false + Skip sanity checks in tests + + + diff --git a/hbase-mapreduce/src/test/resources/hdfs-site.xml b/hbase-mapreduce/src/test/resources/hdfs-site.xml new file mode 100644 index 00000000000..03be0c72c6d --- /dev/null +++ b/hbase-mapreduce/src/test/resources/hdfs-site.xml @@ -0,0 +1,32 @@ + + + + + + + + dfs.namenode.fs-limits.min-block-size + 0 + + \ No newline at end of file diff --git a/hbase-mapreduce/src/test/resources/log4j.properties b/hbase-mapreduce/src/test/resources/log4j.properties new file mode 100644 index 00000000000..c322699ced2 --- /dev/null +++ b/hbase-mapreduce/src/test/resources/log4j.properties @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Define some default values that can be overridden by system properties +hbase.root.logger=INFO,console +hbase.log.dir=. +hbase.log.file=hbase.log + +# Define the root logger to the system property "hbase.root.logger". +log4j.rootLogger=${hbase.root.logger} + +# Logging Threshold +log4j.threshold=ALL + +# +# Daily Rolling File Appender +# +log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} + +# Rollver at midnight +log4j.appender.DRFA.DatePattern=.yyyy-MM-dd + +# 30-day backup +#log4j.appender.DRFA.MaxBackupIndex=30 +log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout +# Debugging Pattern format +log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n + + +# +# console +# Add "console" to rootlogger above if you want to use this +# +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n + +# Custom Logging levels + +#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG + +log4j.logger.org.apache.hadoop=WARN +log4j.logger.org.apache.zookeeper=ERROR +log4j.logger.org.apache.hadoop.hbase=DEBUG + +#These settings are workarounds against spurious logs from the minicluster. +#See HBASE-4709 +log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=WARN +log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSinkAdapter=WARN +log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=WARN +log4j.logger.org.apache.hadoop.metrics2.util.MBeans=WARN +# Enable this to get detailed connection error/retry logging. +# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE diff --git a/hbase-mapreduce/src/test/resources/mapred-queues.xml b/hbase-mapreduce/src/test/resources/mapred-queues.xml new file mode 100644 index 00000000000..2e33ae19dd1 --- /dev/null +++ b/hbase-mapreduce/src/test/resources/mapred-queues.xml @@ -0,0 +1,75 @@ + + + + + + + + + + default + + + + + + + running + + + * + + + * + + + + diff --git a/hbase-mapreduce/src/test/resources/mapred-site.xml b/hbase-mapreduce/src/test/resources/mapred-site.xml new file mode 100644 index 00000000000..787ffb75511 --- /dev/null +++ b/hbase-mapreduce/src/test/resources/mapred-site.xml @@ -0,0 +1,34 @@ + + + + + + mapred.map.child.java.opts + -Djava.awt.headless=true + + + + mapred.reduce.child.java.opts + -Djava.awt.headless=true + + + diff --git a/hbase-server/src/test/resources/org/apache/hadoop/hbase/PerformanceEvaluation_Counter.properties b/hbase-mapreduce/src/test/resources/org/apache/hadoop/hbase/PerformanceEvaluation_Counter.properties similarity index 90% rename from hbase-server/src/test/resources/org/apache/hadoop/hbase/PerformanceEvaluation_Counter.properties rename to hbase-mapreduce/src/test/resources/org/apache/hadoop/hbase/PerformanceEvaluation_Counter.properties index 6fca96ab0e8..802eb898c1d 100644 --- a/hbase-server/src/test/resources/org/apache/hadoop/hbase/PerformanceEvaluation_Counter.properties +++ b/hbase-mapreduce/src/test/resources/org/apache/hadoop/hbase/PerformanceEvaluation_Counter.properties @@ -20,9 +20,9 @@ CounterGroupName= HBase Performance Evaluation ELAPSED_TIME.name= Elapsed time in milliseconds -ROWS.name= Row count +ROWS.name= Row count # ResourceBundle properties file for Map-Reduce counters CounterGroupName= HBase Performance Evaluation ELAPSED_TIME.name= Elapsed time in milliseconds -ROWS.name= Row count +ROWS.name= Row count diff --git a/hbase-server/src/test/resources/org/apache/hadoop/hbase/mapreduce/exportedTableIn94Format b/hbase-mapreduce/src/test/resources/org/apache/hadoop/hbase/mapreduce/exportedTableIn94Format similarity index 100% rename from hbase-server/src/test/resources/org/apache/hadoop/hbase/mapreduce/exportedTableIn94Format rename to hbase-mapreduce/src/test/resources/org/apache/hadoop/hbase/mapreduce/exportedTableIn94Format diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 7035756dd13..d09e324e962 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -210,6 +210,16 @@ org.apache.hbase hbase-server + + org.apache.hbase + hbase-mapreduce + + + org.apache.hbase + hbase-mapreduce + test-jar + test + org.apache.hbase hbase-hadoop-compat diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index 3559ee0f01b..6ed170e09a9 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -220,8 +220,8 @@ public class PerformanceEvaluation extends Configured implements Tool { /** * This class works as the InputSplit of Performance Evaluation - * MapReduce InputFormat, and the Record Value of RecordReader. - * Each map task will only read one record from a PeInputSplit, + * MapReduce InputFormat, and the Record Value of RecordReader. + * Each map task will only read one record from a PeInputSplit, * the record value is the PeInputSplit itself. */ public static class PeInputSplit extends InputSplit implements Writable { @@ -950,7 +950,7 @@ public class PerformanceEvaluation extends Configured implements Tool { static abstract class TableTest extends Test { protected Table table; - + public TableTest(Configuration conf, TestOptions options, Status status) { super(conf, options, status); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index bcd433c6403..d520113ae12 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.util.FSUtils; *

* This also allows one to run the scan from an * online or offline hbase cluster. The snapshot files can be exported by using the - * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, + * org.apache.hadoop.hbase.snapshot.ExportSnapshot tool, * to a pure-hdfs cluster, and this scanner can be used to * run the scan directly over the snapshot files. The snapshot should not be deleted while there * are open scanners reading from snapshot files. @@ -60,7 +60,7 @@ import org.apache.hadoop.hbase.util.FSUtils; * snapshot files, the job has to be run as the HBase user or the user must have group or other * priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from * snapshot/data files will completely circumvent the access control enforced by HBase. - * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat + * See org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat. */ @InterfaceAudience.Public public class TableSnapshotScanner extends AbstractClientScanner { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java index 3322e6c8770..535a34d7037 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableSnapshotScanner.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.Arrays; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -29,8 +30,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -45,7 +46,7 @@ import org.junit.experimental.categories.Category; @Category({LargeTests.class, ClientTests.class}) public class TestTableSnapshotScanner { - private static final Log LOG = LogFactory.getLog(TestTableSnapshotInputFormat.class); + private static final Log LOG = LogFactory.getLog(TestTableSnapshotScanner.class); private final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final int NUM_REGION_SERVERS = 2; private static final byte[][] FAMILIES = {Bytes.toBytes("f1"), Bytes.toBytes("f2")}; @@ -55,6 +56,17 @@ public class TestTableSnapshotScanner { private FileSystem fs; private Path rootDir; + public static void blockUntilSplitFinished(HBaseTestingUtility util, TableName tableName, + int expectedRegionSize) throws Exception { + for (int i = 0; i < 100; i++) { + List hRegionInfoList = util.getAdmin().getTableRegions(tableName); + if (hRegionInfoList.size() >= expectedRegionSize) { + break; + } + Thread.sleep(1000); + } + } + public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_REGION_SERVERS, true); @@ -129,7 +141,7 @@ public class TestTableSnapshotScanner { // split to 2 regions admin.split(tableName, Bytes.toBytes("eee")); - TestTableSnapshotInputFormat.blockUntilSplitFinished(UTIL, tableName, 2); + blockUntilSplitFinished(UTIL, tableName, 2); Path rootDir = FSUtils.getRootDir(UTIL.getConfiguration()); FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index b6ad2c9d687..b5b7a0c54e6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index f64188790b2..a81d2680b6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; -import org.apache.hadoop.hbase.mapreduce.TableInputFormatBase; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.TableNamespaceManager; @@ -336,7 +335,7 @@ public class TestNamespaceAuditor { byte[] columnFamily = Bytes.toBytes("info"); HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo); tableDescOne.addFamily(new HColumnDescriptor(columnFamily)); - ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions); + ADMIN.createTable(tableDescOne, Bytes.toBytes("0"), Bytes.toBytes("9"), initialRegions); Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); try (Table table = connection.getTable(tableTwo)) { UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999); @@ -354,7 +353,7 @@ public class TestNamespaceAuditor { hris = ADMIN.getTableRegions(tableTwo); assertEquals(initialRegions - 1, hris.size()); Collections.sort(hris); - ADMIN.split(tableTwo, Bytes.toBytes("500")); + ADMIN.split(tableTwo, Bytes.toBytes("3")); // Not much we can do here until we have split return a Future. Threads.sleep(5000); hris = ADMIN.getTableRegions(tableTwo); @@ -383,8 +382,7 @@ public class TestNamespaceAuditor { Collections.sort(hris); // verify that we cannot split HRegionInfo hriToSplit2 = hris.get(1); - ADMIN.split(tableTwo, - TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(), hriToSplit2.getEndKey(), true)); + ADMIN.split(tableTwo, Bytes.toBytes("6")); Thread.sleep(2000); assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 0aa39f6fe31..477c8708ee1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -42,7 +42,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.PerformanceEvaluation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; @@ -62,9 +61,11 @@ import org.junit.rules.TestName; public class TestHRegionFileSystem { private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final Log LOG = LogFactory.getLog(TestHRegionFileSystem.class); + + public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); private static final byte[][] FAMILIES = { - Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-A")), - Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-B")) }; + Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), + Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; private static final TableName TABLE_NAME = TableName.valueOf("TestTable"); @Rule diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java index 07bb2b73e59..94991e1b1fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java @@ -17,15 +17,22 @@ */ package org.apache.hadoop.hbase.security; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.classification.InterfaceAudience; - import org.apache.hadoop.hbase.shaded.com.google.common.base.Strings; +import org.apache.hadoop.security.UserGroupInformation; + +import java.io.IOException; +import java.net.InetAddress; @InterfaceAudience.Private public class HBaseKerberosUtils { + private static final Log LOG = LogFactory.getLog(HBaseKerberosUtils.class); + public static final String KRB_PRINCIPAL = "hbase.regionserver.kerberos.principal"; public static final String MASTER_KRB_PRINCIPAL = "hbase.master.kerberos.principal"; public static final String KRB_KEYTAB_FILE = "hbase.regionserver.keytab.file"; @@ -81,4 +88,21 @@ public class HBaseKerberosUtils { conf.set(KRB_PRINCIPAL, System.getProperty(KRB_PRINCIPAL)); conf.set(MASTER_KRB_PRINCIPAL, System.getProperty(KRB_PRINCIPAL)); } + + public static UserGroupInformation loginAndReturnUGI(Configuration conf, String username) + throws IOException { + String hostname = InetAddress.getLocalHost().getHostName(); + String keyTabFileConfKey = "hbase." + username + ".keytab.file"; + String keyTabFileLocation = conf.get(keyTabFileConfKey); + String principalConfKey = "hbase." + username + ".kerberos.principal"; + String principal = org.apache.hadoop.security.SecurityUtil + .getServerPrincipal(conf.get(principalConfKey), hostname); + if (keyTabFileLocation == null || principal == null) { + LOG.warn("Principal or key tab file null for : " + principalConfKey + ", " + + keyTabFileConfKey); + } + UserGroupInformation ugi = + UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation); + return ugi; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java index 236994abead..0487bf4bd68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java @@ -22,6 +22,8 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -29,6 +31,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; @@ -50,6 +53,17 @@ import org.apache.hadoop.hbase.regionserver.StoreFile; */ public class HFileTestUtil { + public static final String OPT_DATA_BLOCK_ENCODING_USAGE = + "Encoding algorithm (e.g. prefix " + + "compression) to use for data blocks in the test column family, " + + "one of " + Arrays.toString(DataBlockEncoding.values()) + "."; + public static final String OPT_DATA_BLOCK_ENCODING = + HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase(Locale.ROOT); + /** Column family used by the test */ + public static byte[] DEFAULT_COLUMN_FAMILY = Bytes.toBytes("test_cf"); + /** Column families used by the test */ + public static final byte[][] DEFAULT_COLUMN_FAMILIES = { DEFAULT_COLUMN_FAMILY }; + /** * Create an HFile with the given number of rows between a given * start key and end key @ family:qualifier. The value will be the key value. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java index 2ea01bb5024..0b3c612e22f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithTags.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.util.MultiThreadedAction.DefaultDataGenerator; +import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; @InterfaceAudience.Private public class LoadTestDataGeneratorWithTags extends DefaultDataGenerator { @@ -74,7 +75,7 @@ public class LoadTestDataGeneratorWithTags extends DefaultDataGenerator { List tags; for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); - byte[] tag = LoadTestTool.generateData(random, + byte[] tag = LoadTestDataGenerator.generateData(random, minTagLength + random.nextInt(maxTagLength - minTagLength)); tags = new ArrayList<>(); for (int n = 0; n < numTags; n++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index 1d2e9a6b506..6550baa7aff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -49,7 +49,7 @@ import org.apache.hadoop.util.StringUtils; /** * Common base class for reader and writer parts of multi-thread HBase load - * test ({@link LoadTestTool}). + * test (See LoadTestTool). */ public abstract class MultiThreadedAction { private static final Log LOG = LogFactory.getLog(MultiThreadedAction.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java index 1e7e341b5c7..e9511753721 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.security.HBaseKerberosUtils; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; import org.apache.hadoop.security.UserGroupInformation; @@ -121,7 +122,7 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader { UserGroupInformation realUserUgi; if(!users.containsKey(userNames[mod])) { if(User.isHBaseSecurityEnabled(conf)) { - realUserUgi = LoadTestTool.loginAndReturnUGI(conf, userNames[mod]); + realUserUgi = HBaseKerberosUtils.loginAndReturnUGI(conf, userNames[mod]); } else { realUserUgi = UserGroupInformation.createRemoteUser(userNames[mod]); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java index 40e23fb111f..9d9bb63cffa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdaterWithACL.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.security.HBaseKerberosUtils; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; import org.apache.hadoop.security.UserGroupInformation; @@ -138,7 +139,7 @@ public class MultiThreadedUpdaterWithACL extends MultiThreadedUpdater { try { if (!users.containsKey(userNames[mod])) { if (User.isHBaseSecurityEnabled(conf)) { - realUserUgi = LoadTestTool.loginAndReturnUGI(conf, userNames[mod]); + realUserUgi = HBaseKerberosUtils.loginAndReturnUGI(conf, userNames[mod]); } else { realUserUgi = UserGroupInformation.createRemoteUser(userNames[mod]); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java index 6beb2e61655..7972855d0c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java @@ -81,7 +81,7 @@ public class RestartMetaTest extends AbstractHBaseTool { // start the writers LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator( minColDataSize, maxColDataSize, minColsPerKey, maxColsPerKey, - LoadTestTool.DEFAULT_COLUMN_FAMILY); + HFileTestUtil.DEFAULT_COLUMN_FAMILY); MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, TABLE_NAME); writer.setMultiPut(true); writer.start(startKey, endKey, numThreads); @@ -101,7 +101,7 @@ public class RestartMetaTest extends AbstractHBaseTool { // create tables if needed HBaseTestingUtility.createPreSplitLoadTestTable(conf, TABLE_NAME, - LoadTestTool.DEFAULT_COLUMN_FAMILY, Compression.Algorithm.NONE, + HFileTestUtil.DEFAULT_COLUMN_FAMILY, Compression.Algorithm.NONE, DataBlockEncoding.NONE); LOG.debug("Loading data....\n\n"); @@ -143,8 +143,8 @@ public class RestartMetaTest extends AbstractHBaseTool { @Override protected void addOptions() { addOptWithArg(OPT_NUM_RS, "Number of Region Servers"); - addOptWithArg(LoadTestTool.OPT_DATA_BLOCK_ENCODING, - LoadTestTool.OPT_DATA_BLOCK_ENCODING_USAGE); + addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, + HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java index bf7bf45ddb6..2deba0021c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/test/LoadTestDataGenerator.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hbase.util.test; import java.io.IOException; +import java.util.Random; import java.util.Set; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -56,6 +57,28 @@ public abstract class LoadTestDataGenerator { this.kvGenerator = new LoadTestKVGenerator(minValueSize, maxValueSize); } + public static byte[] generateData(final Random r, int length) { + byte [] b = new byte [length]; + int i = 0; + + for(i = 0; i < (length-8); i += 8) { + b[i] = (byte) (65 + r.nextInt(26)); + b[i+1] = b[i]; + b[i+2] = b[i]; + b[i+3] = b[i]; + b[i+4] = b[i]; + b[i+5] = b[i]; + b[i+6] = b[i]; + b[i+7] = b[i]; + } + + byte a = (byte) (65 + r.nextInt(26)); + for(; i < length; i++) { + b[i] = a; + } + return b; + } + /** * initialize the LoadTestDataGenerator * diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml index aa47947299d..3761d59ac9f 100644 --- a/hbase-spark/pom.xml +++ b/hbase-spark/pom.xml @@ -491,6 +491,10 @@ test test-jar + + org.apache.hbase + hbase-mapreduce + com.google.protobuf protobuf-java diff --git a/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java b/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java index 93cd939c10b..bfacbe8511d 100644 --- a/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java +++ b/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java @@ -27,7 +27,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; diff --git a/pom.xml b/pom.xml index be0be9970ae..f973cd7fdc9 100644 --- a/pom.xml +++ b/pom.xml @@ -62,6 +62,7 @@ hbase-replication + hbase-mapreduce hbase-resource-bundle hbase-server hbase-thrift @@ -1581,6 +1582,18 @@ test-jar test + + hbase-mapreduce + org.apache.hbase + ${project.version} + + + hbase-mapreduce + org.apache.hbase + ${project.version} + test-jar + test + hbase-endpoint org.apache.hbase diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index 6181b13e346..f96cd6c8d3d 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -2478,7 +2478,7 @@ void rename(Admin admin, String oldTableName, TableName newTableName) { RegionServer Grouping (A.K.A `rsgroup`) is an advanced feature for partitioning regionservers into distinctive groups for strict isolation. It should only be used by users who are sophisticated enough to understand the -full implications and have a sufficient background in managing HBase clusters. +full implications and have a sufficient background in managing HBase clusters. It was developed by Yahoo! and they run it at scale on their large grid cluster. See link:http://www.slideshare.net/HBaseCon/keynote-apache-hbase-at-yahoo-scale[HBase at Yahoo! Scale]. @@ -2491,20 +2491,20 @@ rsgroup at a time. By default, all tables and regionservers belong to the APIs. A custom balancer implementation tracks assignments per rsgroup and makes sure to move regions to the relevant regionservers in that rsgroup. The rsgroup information is stored in a regular HBase table, and a zookeeper-based read-only -cache is used at cluster bootstrap time. +cache is used at cluster bootstrap time. -To enable, add the following to your hbase-site.xml and restart your Master: +To enable, add the following to your hbase-site.xml and restart your Master: [source,xml] ---- - - hbase.coprocessor.master.classes - org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint - - - hbase.master.loadbalancer.class - org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer - + + hbase.coprocessor.master.classes + org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint + + + hbase.master.loadbalancer.class + org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer + ---- Then use the shell _rsgroup_ commands to create and manipulate RegionServer @@ -2514,7 +2514,7 @@ rsgroup commands available in the hbase shell type: [source, bash] ---- hbase(main):008:0> help ‘rsgroup’ - Took 0.5610 seconds + Took 0.5610 seconds ---- High level, you create a rsgroup that is other than the `default` group using @@ -2531,8 +2531,8 @@ Here is example using a few of the rsgroup commands. To add a group, do as foll [source, bash] ---- - hbase(main):008:0> add_rsgroup 'my_group' - Took 0.5610 seconds + hbase(main):008:0> add_rsgroup 'my_group' + Took 0.5610 seconds ---- @@ -2556,11 +2556,11 @@ ERROR: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registere ==== Add a server (specified by hostname + port) to the just-made group using the -_move_servers_rsgroup_ command as follows: +_move_servers_rsgroup_ command as follows: [source, bash] ---- - hbase(main):010:0> move_servers_rsgroup 'my_group',['k.att.net:51129'] + hbase(main):010:0> move_servers_rsgroup 'my_group',['k.att.net:51129'] ---- .Hostname and Port vs ServerName