HBASE-14184 Fix indention and type-o in JavaHBaseContext (Ted Malaska)

This commit is contained in:
tedyu 2015-08-07 11:02:24 -07:00
parent f06daaf010
commit e53d2481ee
1 changed files with 288 additions and 293 deletions

View File

@ -17,22 +17,15 @@
package org.apache.hadoop.hbase.spark
import org.apache.hadoop.hbase.TableName
import org.apache.spark.api.java.JavaSparkContext
import org.apache.hadoop.conf.Configuration
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.function.VoidFunction
import org.apache.spark.api.java.function.Function
import org.apache.hadoop.hbase.client.Connection
import org.apache.spark.streaming.api.java.JavaDStream
import org.apache.spark.api.java.function.FlatMapFunction
import scala.collection.JavaConversions._
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.client.Get
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{Connection, Delete, Get, Put, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
import org.apache.spark.api.java.function.{FlatMapFunction, Function, VoidFunction}
import org.apache.spark.streaming.api.java.JavaDStream
import scala.collection.JavaConversions._
import scala.reflect.ClassTag
/**
@ -64,8 +57,9 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
f: VoidFunction[(java.util.Iterator[T], Connection)]) = {
hbaseContext.foreachPartition(javaRdd.rdd,
(it:Iterator[T], conn:Connection) =>
{ f.call((it, conn)) })
(it: Iterator[T], conn: Connection) => {
f.call((it, conn))
})
}
/**
@ -155,16 +149,16 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
* The complexity of managing the HConnection is
* removed from the developer
*
* @param javaDdd Original JavaRDD with data to iterate over
* @param javaRdd Original JavaRDD with data to iterate over
* @param tableName The name of the table to put into
* @param f Function to convert a value in the JavaRDD
* to a HBase Put
*/
def bulkPut[T](javaDdd: JavaRDD[T],
def bulkPut[T](javaRdd: JavaRDD[T],
tableName: TableName,
f: Function[(T), Put]) {
hbaseContext.bulkPut(javaDdd.rdd, tableName, (t:T) => f.call(t))
hbaseContext.bulkPut(javaRdd.rdd, tableName, (t: T) => f.call(t))
}
/**
@ -218,17 +212,17 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
* The complexity of managing the HConnection is
* removed from the developer
*
* @param javaDstream Original DStream with data to iterate over
* @param javaDStream Original DStream with data to iterate over
* @param tableName The name of the table to delete from
* @param f Function to convert a value in the JavaDStream to a
* HBase Delete
* @param batchSize The number of deletes to be sent at once
*/
def streamBulkDelete[T](javaDstream: JavaDStream[T],
def streamBulkDelete[T](javaDStream: JavaDStream[T],
tableName: TableName,
f: Function[T, Delete],
batchSize: Integer) = {
hbaseContext.streamBulkDelete(javaDstream.dstream, tableName,
hbaseContext.streamBulkDelete(javaDStream.dstream, tableName,
(t: T) => f.call(t),
batchSize)
}
@ -247,7 +241,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
* @param convertResult This will convert the HBase Result object to
* what ever the user wants to put in the resulting
* JavaRDD
* return new JavaRDD that is created by the Get to HBase
* @return New JavaRDD that is created by the Get to HBase
*/
def bulkGet[T, U](tableName: TableName,
batchSize: Integer,
@ -259,7 +253,9 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
batchSize,
javaRdd.rdd,
(t: T) => makeGet.call(t),
(r:Result) => {convertResult.call(r)})(fakeClassTag[U]))(fakeClassTag[U])
(r: Result) => {
convertResult.call(r)
})(fakeClassTag[U]))(fakeClassTag[U])
}
@ -279,7 +275,7 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
* @param convertResult This will convert the HBase Result object to
* what ever the user wants to put in the resulting
* JavaDStream
* return new JavaDStream that is created by the Get to HBase
* @return New JavaDStream that is created by the Get to HBase
*/
def streamBulkGet[T, U](tableName: TableName,
batchSize: Integer,
@ -297,11 +293,11 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
* This function will use the native HBase TableInputFormat with the
* given scan object to generate a new JavaRDD
*
* @param tableName the name of the table to scan
* @param scans the HBase scan object to use to read data from HBase
* @param f function to convert a Result object from HBase into
* what the user wants in the final generated JavaRDD
* @return new JavaRDD with results from scan
* @param tableName The name of the table to scan
* @param scans The HBase scan object to use to read data from HBase
* @param f Function to convert a Result object from HBase into
* What the user wants in the final generated JavaRDD
* @return New JavaRDD with results from scan
*/
def hbaseRDD[U](tableName: TableName,
scans: Scan,
@ -318,10 +314,9 @@ class JavaHBaseContext(@transient jsc: JavaSparkContext,
* A overloaded version of HBaseContext hbaseRDD that define the
* type of the resulting JavaRDD
*
* @param tableName the name of the table to scan
* @param scans the HBase scan object to use to read data from HBase
* @param tableName The name of the table to scan
* @param scans The HBase scan object to use to read data from HBase
* @return New JavaRDD with results from scan
*
*/
def hbaseRDD(tableName: TableName,
scans: Scan):