HBASE-819 Remove DOS-style ^M carriage returns from all code where found (Jonathan Gray via Jim Kellerman)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@685009 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
6cab23c4a0
commit
3d6b96aa48
1962
CHANGES.txt
1962
CHANGES.txt
File diff suppressed because it is too large
Load Diff
|
@ -1,235 +1,235 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* HConstants holds a bunch of HBase-related constants
|
||||
*/
|
||||
public interface HConstants {
|
||||
|
||||
/** long constant for zero */
|
||||
static final Long ZERO_L = Long.valueOf(0L);
|
||||
|
||||
static final String NINES = "99999999999999";
|
||||
static final String ZEROES = "00000000000000";
|
||||
|
||||
// For migration
|
||||
|
||||
/** name of version file */
|
||||
static final String VERSION_FILE_NAME = "hbase.version";
|
||||
|
||||
/**
|
||||
* Current version of file system
|
||||
* Version 4 supports only one kind of bloom filter
|
||||
*/
|
||||
public static final String FILE_SYSTEM_VERSION = "4";
|
||||
|
||||
// Configuration parameters
|
||||
|
||||
// TODO: URL for hbase master like hdfs URLs with host and port.
|
||||
// Like jdbc URLs? URLs could be used to refer to table cells?
|
||||
// jdbc:mysql://[host][,failoverhost...][:port]/[database]
|
||||
// jdbc:mysql://[host][,failoverhost...][:port]/[database][?propertyName1][=propertyValue1][&propertyName2][=propertyValue2]...
|
||||
|
||||
// Key into HBaseConfiguration for the hbase.master address.
|
||||
// TODO: Support 'local': i.e. default of all running in single
|
||||
// process. Same for regionserver. TODO: Is having HBase homed
|
||||
// on port 60k OK?
|
||||
|
||||
/** Parameter name for master address */
|
||||
static final String MASTER_ADDRESS = "hbase.master";
|
||||
|
||||
/** default host address */
|
||||
static final String DEFAULT_HOST = "0.0.0.0";
|
||||
|
||||
/** default port that the master listens on */
|
||||
static final int DEFAULT_MASTER_PORT = 60000;
|
||||
|
||||
/** Default master address */
|
||||
static final String DEFAULT_MASTER_ADDRESS = DEFAULT_HOST + ":" +
|
||||
DEFAULT_MASTER_PORT;
|
||||
|
||||
/** default port for master web api */
|
||||
static final int DEFAULT_MASTER_INFOPORT = 60010;
|
||||
|
||||
/** Parameter name for hbase.regionserver address. */
|
||||
static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
|
||||
|
||||
/** Default region server address */
|
||||
static final String DEFAULT_REGIONSERVER_ADDRESS = DEFAULT_HOST + ":60020";
|
||||
|
||||
/** default port for region server web api */
|
||||
static final int DEFAULT_REGIONSERVER_INFOPORT = 60030;
|
||||
|
||||
/** Parameter name for what region server interface to use. */
|
||||
static final String REGION_SERVER_CLASS = "hbase.regionserver.class";
|
||||
|
||||
/** Parameter name for what region server implementation to use. */
|
||||
static final String REGION_SERVER_IMPL= "hbase.regionserver.impl";
|
||||
|
||||
/** Default region server interface class name. */
|
||||
static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName();
|
||||
|
||||
/** Parameter name for how often threads should wake up */
|
||||
static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
|
||||
|
||||
/** Parameter name for HBase instance root directory */
|
||||
static final String HBASE_DIR = "hbase.rootdir";
|
||||
|
||||
/** Used to construct the name of the log directory for a region server */
|
||||
static final String HREGION_LOGDIR_NAME = "log";
|
||||
|
||||
/** Name of old log file for reconstruction */
|
||||
static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
|
||||
|
||||
/** Default maximum file size */
|
||||
static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
|
||||
|
||||
/** Default size of a reservation block */
|
||||
static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5;
|
||||
|
||||
// Always store the location of the root table's HRegion.
|
||||
// This HRegion is never split.
|
||||
|
||||
// region name = table + startkey + regionid. This is the row key.
|
||||
// each row in the root and meta tables describes exactly 1 region
|
||||
// Do we ever need to know all the information that we are storing?
|
||||
|
||||
// Note that the name of the root table starts with "-" and the name of the
|
||||
// meta table starts with "." Why? it's a trick. It turns out that when we
|
||||
// store region names in memory, we use a SortedMap. Since "-" sorts before
|
||||
// "." (and since no other table name can start with either of these
|
||||
// characters, the root region will always be the first entry in such a Map,
|
||||
// followed by all the meta regions (which will be ordered by their starting
|
||||
// row key as well), followed by all user tables. So when the Master is
|
||||
// choosing regions to assign, it will always choose the root region first,
|
||||
// followed by the meta regions, followed by user regions. Since the root
|
||||
// and meta regions always need to be on-line, this ensures that they will
|
||||
// be the first to be reassigned if the server(s) they are being served by
|
||||
// should go down.
|
||||
|
||||
/** The root table's name.*/
|
||||
static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
|
||||
|
||||
/** The META table's name. */
|
||||
static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");
|
||||
|
||||
// Defines for the column names used in both ROOT and META HBase 'meta' tables.
|
||||
|
||||
/** The ROOT and META column family (string) */
|
||||
static final String COLUMN_FAMILY_STR = "info:";
|
||||
|
||||
/** The META historian column family (string) */
|
||||
static final String COLUMN_FAMILY_HISTORIAN_STR = "historian:";
|
||||
|
||||
/** The ROOT and META column family */
|
||||
static final byte [] COLUMN_FAMILY = Bytes.toBytes(COLUMN_FAMILY_STR);
|
||||
|
||||
/** The META historian column family */
|
||||
static final byte [] COLUMN_FAMILY_HISTORIAN = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR);
|
||||
|
||||
/** Array of meta column names */
|
||||
static final byte[][] COLUMN_FAMILY_ARRAY = new byte[][] {COLUMN_FAMILY};
|
||||
|
||||
/** ROOT/META column family member - contains HRegionInfo */
|
||||
static final byte [] COL_REGIONINFO =
|
||||
Bytes.toBytes(COLUMN_FAMILY_STR + "regioninfo");
|
||||
|
||||
/** Array of column - contains HRegionInfo */
|
||||
static final byte[][] COL_REGIONINFO_ARRAY = new byte[][] {COL_REGIONINFO};
|
||||
|
||||
/** ROOT/META column family member - contains HServerAddress.toString() */
|
||||
static final byte[] COL_SERVER = Bytes.toBytes(COLUMN_FAMILY_STR + "server");
|
||||
|
||||
/** ROOT/META column family member - contains server start code (a long) */
|
||||
static final byte [] COL_STARTCODE =
|
||||
Bytes.toBytes(COLUMN_FAMILY_STR + "serverstartcode");
|
||||
|
||||
/** the lower half of a split region */
|
||||
static final byte [] COL_SPLITA = Bytes.toBytes(COLUMN_FAMILY_STR + "splitA");
|
||||
|
||||
/** the upper half of a split region */
|
||||
static final byte [] COL_SPLITB = Bytes.toBytes(COLUMN_FAMILY_STR + "splitB");
|
||||
|
||||
/** All the columns in the catalog -ROOT- and .META. tables.
|
||||
*/
|
||||
static final byte[][] ALL_META_COLUMNS = {COL_REGIONINFO, COL_SERVER,
|
||||
COL_STARTCODE, COL_SPLITA, COL_SPLITB};
|
||||
|
||||
// Other constants
|
||||
|
||||
/**
|
||||
* An empty instance.
|
||||
*/
|
||||
static final byte [] EMPTY_BYTE_ARRAY = new byte [0];
|
||||
|
||||
/**
|
||||
* Used by scanners, etc when they want to start at the beginning of a region
|
||||
*/
|
||||
static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
|
||||
|
||||
/**
|
||||
* Last row in a table.
|
||||
*/
|
||||
static final byte [] EMPTY_END_ROW = EMPTY_START_ROW;
|
||||
|
||||
/**
|
||||
* Used by scanners and others when they're trying to detect the end of a
|
||||
* table
|
||||
*/
|
||||
static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY;
|
||||
|
||||
/** When we encode strings, we always specify UTF8 encoding */
|
||||
static final String UTF8_ENCODING = "UTF-8";
|
||||
|
||||
/**
|
||||
* Timestamp to use when we want to refer to the latest cell.
|
||||
* This is the timestamp sent by clients when no timestamp is specified on
|
||||
* commit.
|
||||
*/
|
||||
static final long LATEST_TIMESTAMP = Long.MAX_VALUE;
|
||||
|
||||
/**
|
||||
* Define for 'return-all-versions'.
|
||||
*/
|
||||
static final int ALL_VERSIONS = Integer.MAX_VALUE;
|
||||
|
||||
/**
|
||||
* Unlimited time-to-live.
|
||||
*/
|
||||
static final int FOREVER = -1;
|
||||
|
||||
public static final String HBASE_CLIENT_RETRIES_NUMBER_KEY =
|
||||
"hbase.client.retries.number";
|
||||
public static final int DEFAULT_CLIENT_RETRIES = 5;
|
||||
|
||||
public static final String NAME = "NAME";
|
||||
public static final String VERSIONS = "VERSIONS";
|
||||
public static final String IN_MEMORY = "IN_MEMORY";
|
||||
|
||||
/**
|
||||
* This is a retry backoff multiplier table similar to the BSD TCP syn
|
||||
* backoff table, a bit more aggressive than simple exponential backoff.
|
||||
*/
|
||||
public static int RETRY_BACKOFF[] = { 1, 1, 1, 1, 2, 4, 8, 16, 32, 64 };
|
||||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* HConstants holds a bunch of HBase-related constants
|
||||
*/
|
||||
public interface HConstants {
|
||||
|
||||
/** long constant for zero */
|
||||
static final Long ZERO_L = Long.valueOf(0L);
|
||||
|
||||
static final String NINES = "99999999999999";
|
||||
static final String ZEROES = "00000000000000";
|
||||
|
||||
// For migration
|
||||
|
||||
/** name of version file */
|
||||
static final String VERSION_FILE_NAME = "hbase.version";
|
||||
|
||||
/**
|
||||
* Current version of file system
|
||||
* Version 4 supports only one kind of bloom filter
|
||||
*/
|
||||
public static final String FILE_SYSTEM_VERSION = "4";
|
||||
|
||||
// Configuration parameters
|
||||
|
||||
// TODO: URL for hbase master like hdfs URLs with host and port.
|
||||
// Like jdbc URLs? URLs could be used to refer to table cells?
|
||||
// jdbc:mysql://[host][,failoverhost...][:port]/[database]
|
||||
// jdbc:mysql://[host][,failoverhost...][:port]/[database][?propertyName1][=propertyValue1][&propertyName2][=propertyValue2]...
|
||||
|
||||
// Key into HBaseConfiguration for the hbase.master address.
|
||||
// TODO: Support 'local': i.e. default of all running in single
|
||||
// process. Same for regionserver. TODO: Is having HBase homed
|
||||
// on port 60k OK?
|
||||
|
||||
/** Parameter name for master address */
|
||||
static final String MASTER_ADDRESS = "hbase.master";
|
||||
|
||||
/** default host address */
|
||||
static final String DEFAULT_HOST = "0.0.0.0";
|
||||
|
||||
/** default port that the master listens on */
|
||||
static final int DEFAULT_MASTER_PORT = 60000;
|
||||
|
||||
/** Default master address */
|
||||
static final String DEFAULT_MASTER_ADDRESS = DEFAULT_HOST + ":" +
|
||||
DEFAULT_MASTER_PORT;
|
||||
|
||||
/** default port for master web api */
|
||||
static final int DEFAULT_MASTER_INFOPORT = 60010;
|
||||
|
||||
/** Parameter name for hbase.regionserver address. */
|
||||
static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
|
||||
|
||||
/** Default region server address */
|
||||
static final String DEFAULT_REGIONSERVER_ADDRESS = DEFAULT_HOST + ":60020";
|
||||
|
||||
/** default port for region server web api */
|
||||
static final int DEFAULT_REGIONSERVER_INFOPORT = 60030;
|
||||
|
||||
/** Parameter name for what region server interface to use. */
|
||||
static final String REGION_SERVER_CLASS = "hbase.regionserver.class";
|
||||
|
||||
/** Parameter name for what region server implementation to use. */
|
||||
static final String REGION_SERVER_IMPL= "hbase.regionserver.impl";
|
||||
|
||||
/** Default region server interface class name. */
|
||||
static final String DEFAULT_REGION_SERVER_CLASS = HRegionInterface.class.getName();
|
||||
|
||||
/** Parameter name for how often threads should wake up */
|
||||
static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";
|
||||
|
||||
/** Parameter name for HBase instance root directory */
|
||||
static final String HBASE_DIR = "hbase.rootdir";
|
||||
|
||||
/** Used to construct the name of the log directory for a region server */
|
||||
static final String HREGION_LOGDIR_NAME = "log";
|
||||
|
||||
/** Name of old log file for reconstruction */
|
||||
static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
|
||||
|
||||
/** Default maximum file size */
|
||||
static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
|
||||
|
||||
/** Default size of a reservation block */
|
||||
static final int DEFAULT_SIZE_RESERVATION_BLOCK = 1024 * 1024 * 5;
|
||||
|
||||
// Always store the location of the root table's HRegion.
|
||||
// This HRegion is never split.
|
||||
|
||||
// region name = table + startkey + regionid. This is the row key.
|
||||
// each row in the root and meta tables describes exactly 1 region
|
||||
// Do we ever need to know all the information that we are storing?
|
||||
|
||||
// Note that the name of the root table starts with "-" and the name of the
|
||||
// meta table starts with "." Why? it's a trick. It turns out that when we
|
||||
// store region names in memory, we use a SortedMap. Since "-" sorts before
|
||||
// "." (and since no other table name can start with either of these
|
||||
// characters, the root region will always be the first entry in such a Map,
|
||||
// followed by all the meta regions (which will be ordered by their starting
|
||||
// row key as well), followed by all user tables. So when the Master is
|
||||
// choosing regions to assign, it will always choose the root region first,
|
||||
// followed by the meta regions, followed by user regions. Since the root
|
||||
// and meta regions always need to be on-line, this ensures that they will
|
||||
// be the first to be reassigned if the server(s) they are being served by
|
||||
// should go down.
|
||||
|
||||
/** The root table's name.*/
|
||||
static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-");
|
||||
|
||||
/** The META table's name. */
|
||||
static final byte [] META_TABLE_NAME = Bytes.toBytes(".META.");
|
||||
|
||||
// Defines for the column names used in both ROOT and META HBase 'meta' tables.
|
||||
|
||||
/** The ROOT and META column family (string) */
|
||||
static final String COLUMN_FAMILY_STR = "info:";
|
||||
|
||||
/** The META historian column family (string) */
|
||||
static final String COLUMN_FAMILY_HISTORIAN_STR = "historian:";
|
||||
|
||||
/** The ROOT and META column family */
|
||||
static final byte [] COLUMN_FAMILY = Bytes.toBytes(COLUMN_FAMILY_STR);
|
||||
|
||||
/** The META historian column family */
|
||||
static final byte [] COLUMN_FAMILY_HISTORIAN = Bytes.toBytes(COLUMN_FAMILY_HISTORIAN_STR);
|
||||
|
||||
/** Array of meta column names */
|
||||
static final byte[][] COLUMN_FAMILY_ARRAY = new byte[][] {COLUMN_FAMILY};
|
||||
|
||||
/** ROOT/META column family member - contains HRegionInfo */
|
||||
static final byte [] COL_REGIONINFO =
|
||||
Bytes.toBytes(COLUMN_FAMILY_STR + "regioninfo");
|
||||
|
||||
/** Array of column - contains HRegionInfo */
|
||||
static final byte[][] COL_REGIONINFO_ARRAY = new byte[][] {COL_REGIONINFO};
|
||||
|
||||
/** ROOT/META column family member - contains HServerAddress.toString() */
|
||||
static final byte[] COL_SERVER = Bytes.toBytes(COLUMN_FAMILY_STR + "server");
|
||||
|
||||
/** ROOT/META column family member - contains server start code (a long) */
|
||||
static final byte [] COL_STARTCODE =
|
||||
Bytes.toBytes(COLUMN_FAMILY_STR + "serverstartcode");
|
||||
|
||||
/** the lower half of a split region */
|
||||
static final byte [] COL_SPLITA = Bytes.toBytes(COLUMN_FAMILY_STR + "splitA");
|
||||
|
||||
/** the upper half of a split region */
|
||||
static final byte [] COL_SPLITB = Bytes.toBytes(COLUMN_FAMILY_STR + "splitB");
|
||||
|
||||
/** All the columns in the catalog -ROOT- and .META. tables.
|
||||
*/
|
||||
static final byte[][] ALL_META_COLUMNS = {COL_REGIONINFO, COL_SERVER,
|
||||
COL_STARTCODE, COL_SPLITA, COL_SPLITB};
|
||||
|
||||
// Other constants
|
||||
|
||||
/**
|
||||
* An empty instance.
|
||||
*/
|
||||
static final byte [] EMPTY_BYTE_ARRAY = new byte [0];
|
||||
|
||||
/**
|
||||
* Used by scanners, etc when they want to start at the beginning of a region
|
||||
*/
|
||||
static final byte [] EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
|
||||
|
||||
/**
|
||||
* Last row in a table.
|
||||
*/
|
||||
static final byte [] EMPTY_END_ROW = EMPTY_START_ROW;
|
||||
|
||||
/**
|
||||
* Used by scanners and others when they're trying to detect the end of a
|
||||
* table
|
||||
*/
|
||||
static final byte [] LAST_ROW = EMPTY_BYTE_ARRAY;
|
||||
|
||||
/** When we encode strings, we always specify UTF8 encoding */
|
||||
static final String UTF8_ENCODING = "UTF-8";
|
||||
|
||||
/**
|
||||
* Timestamp to use when we want to refer to the latest cell.
|
||||
* This is the timestamp sent by clients when no timestamp is specified on
|
||||
* commit.
|
||||
*/
|
||||
static final long LATEST_TIMESTAMP = Long.MAX_VALUE;
|
||||
|
||||
/**
|
||||
* Define for 'return-all-versions'.
|
||||
*/
|
||||
static final int ALL_VERSIONS = Integer.MAX_VALUE;
|
||||
|
||||
/**
|
||||
* Unlimited time-to-live.
|
||||
*/
|
||||
static final int FOREVER = -1;
|
||||
|
||||
public static final String HBASE_CLIENT_RETRIES_NUMBER_KEY =
|
||||
"hbase.client.retries.number";
|
||||
public static final int DEFAULT_CLIENT_RETRIES = 5;
|
||||
|
||||
public static final String NAME = "NAME";
|
||||
public static final String VERSIONS = "VERSIONS";
|
||||
public static final String IN_MEMORY = "IN_MEMORY";
|
||||
|
||||
/**
|
||||
* This is a retry backoff multiplier table similar to the BSD TCP syn
|
||||
* backoff table, a bit more aggressive than simple exponential backoff.
|
||||
*/
|
||||
public static int RETRY_BACKOFF[] = { 1, 1, 1, 1, 2, 4, 8, 16, 32, 64 };
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,210 +1,210 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
|
||||
/**
|
||||
* Clients interact with HRegionServers using a handle to the HRegionInterface.
|
||||
*/
|
||||
public interface HRegionInterface extends VersionedProtocol {
|
||||
/**
|
||||
* Protocol version.
|
||||
* Upped to 3 when we went from Text to byte arrays for row and column names.
|
||||
*/
|
||||
public static final long versionID = 3L;
|
||||
|
||||
/**
|
||||
* Get metainfo about an HRegion
|
||||
*
|
||||
* @param regionName name of the region
|
||||
* @return HRegionInfo object for region
|
||||
* @throws NotServingRegionException
|
||||
*/
|
||||
public HRegionInfo getRegionInfo(final byte [] regionName)
|
||||
throws NotServingRegionException;
|
||||
|
||||
/**
|
||||
* Retrieve a single value from the specified region for the specified row
|
||||
* and column keys
|
||||
*
|
||||
* @param regionName name of region
|
||||
* @param row row key
|
||||
* @param column column key
|
||||
* @return alue for that region/row/column
|
||||
* @throws IOException
|
||||
*/
|
||||
public Cell get(final byte [] regionName, final byte [] row, final byte [] column)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get the specified number of versions of the specified row and column
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param column column key
|
||||
* @param numVersions number of versions to return
|
||||
* @return array of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public Cell[] get(final byte [] regionName, final byte [] row,
|
||||
final byte [] column, final int numVersions)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get the specified number of versions of the specified row and column with
|
||||
* the specified timestamp.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param column column key
|
||||
* @param timestamp timestamp
|
||||
* @param numVersions number of versions to return
|
||||
* @return array of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public Cell[] get(final byte [] regionName, final byte [] row,
|
||||
final byte [] column, final long timestamp, final int numVersions)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Return all the data for the row that matches <i>row</i> exactly,
|
||||
* or the one that immediately preceeds it.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult getClosestRowBefore(final byte [] regionName,
|
||||
final byte [] row)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get selected columns for the specified row at a given timestamp.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult getRow(final byte [] regionName, final byte [] row,
|
||||
final byte[][] columns, final long ts)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Applies a batch of updates via one RPC
|
||||
*
|
||||
* @param regionName name of the region to update
|
||||
* @param b BatchUpdate
|
||||
* @throws IOException
|
||||
*/
|
||||
public void batchUpdate(final byte [] regionName, final BatchUpdate b)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Delete all cells that match the passed row and column and whose
|
||||
* timestamp is equal-to or older than the passed timestamp.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param column column key
|
||||
* @param timestamp Delete all entries that have this timestamp or older
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteAll(byte [] regionName, byte [] row, byte [] column,
|
||||
long timestamp)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Delete all cells that match the passed row and whose
|
||||
* timestamp is equal-to or older than the passed timestamp.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param timestamp Delete all entries that have this timestamp or older
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteAll(byte [] regionName, byte [] row, long timestamp)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Delete all cells for a row with matching column family with timestamps
|
||||
* less than or equal to <i>timestamp</i>.
|
||||
*
|
||||
* @param regionName The name of the region to operate on
|
||||
* @param row The row to operate on
|
||||
* @param family The column family to match
|
||||
* @param timestamp Timestamp to match
|
||||
*/
|
||||
public void deleteFamily(byte [] regionName, byte [] row, byte [] family,
|
||||
long timestamp)
|
||||
throws IOException;
|
||||
|
||||
|
||||
//
|
||||
// remote scanner interface
|
||||
//
|
||||
|
||||
/**
|
||||
* Opens a remote scanner with a RowFilter.
|
||||
*
|
||||
* @param regionName name of region to scan
|
||||
* @param columns columns to scan. If column name is a column family, all
|
||||
* columns of the specified column family are returned. Its also possible
|
||||
* to pass a regex for column family name. A column name is judged to be
|
||||
* regex if it contains at least one of the following characters:
|
||||
* <code>\+|^&*$[]]}{)(</code>.
|
||||
* @param startRow starting row to scan
|
||||
* @param timestamp only return values whose timestamp is <= this value
|
||||
* @param filter RowFilter for filtering results at the row-level.
|
||||
*
|
||||
* @return scannerId scanner identifier used in other calls
|
||||
* @throws IOException
|
||||
*/
|
||||
public long openScanner(final byte [] regionName, final byte [][] columns,
|
||||
final byte [] startRow, long timestamp, RowFilterInterface filter)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get the next set of values
|
||||
* @param scannerId clientId passed to openScanner
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult next(long scannerId) throws IOException;
|
||||
|
||||
/**
|
||||
* Close a scanner
|
||||
*
|
||||
* @param scannerId the scanner id returned by openScanner
|
||||
* @throws IOException
|
||||
*/
|
||||
public void close(long scannerId) throws IOException;
|
||||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
|
||||
/**
|
||||
* Clients interact with HRegionServers using a handle to the HRegionInterface.
|
||||
*/
|
||||
public interface HRegionInterface extends VersionedProtocol {
|
||||
/**
|
||||
* Protocol version.
|
||||
* Upped to 3 when we went from Text to byte arrays for row and column names.
|
||||
*/
|
||||
public static final long versionID = 3L;
|
||||
|
||||
/**
|
||||
* Get metainfo about an HRegion
|
||||
*
|
||||
* @param regionName name of the region
|
||||
* @return HRegionInfo object for region
|
||||
* @throws NotServingRegionException
|
||||
*/
|
||||
public HRegionInfo getRegionInfo(final byte [] regionName)
|
||||
throws NotServingRegionException;
|
||||
|
||||
/**
|
||||
* Retrieve a single value from the specified region for the specified row
|
||||
* and column keys
|
||||
*
|
||||
* @param regionName name of region
|
||||
* @param row row key
|
||||
* @param column column key
|
||||
* @return alue for that region/row/column
|
||||
* @throws IOException
|
||||
*/
|
||||
public Cell get(final byte [] regionName, final byte [] row, final byte [] column)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get the specified number of versions of the specified row and column
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param column column key
|
||||
* @param numVersions number of versions to return
|
||||
* @return array of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public Cell[] get(final byte [] regionName, final byte [] row,
|
||||
final byte [] column, final int numVersions)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get the specified number of versions of the specified row and column with
|
||||
* the specified timestamp.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param column column key
|
||||
* @param timestamp timestamp
|
||||
* @param numVersions number of versions to return
|
||||
* @return array of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public Cell[] get(final byte [] regionName, final byte [] row,
|
||||
final byte [] column, final long timestamp, final int numVersions)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Return all the data for the row that matches <i>row</i> exactly,
|
||||
* or the one that immediately preceeds it.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult getClosestRowBefore(final byte [] regionName,
|
||||
final byte [] row)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get selected columns for the specified row at a given timestamp.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult getRow(final byte [] regionName, final byte [] row,
|
||||
final byte[][] columns, final long ts)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Applies a batch of updates via one RPC
|
||||
*
|
||||
* @param regionName name of the region to update
|
||||
* @param b BatchUpdate
|
||||
* @throws IOException
|
||||
*/
|
||||
public void batchUpdate(final byte [] regionName, final BatchUpdate b)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Delete all cells that match the passed row and column and whose
|
||||
* timestamp is equal-to or older than the passed timestamp.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param column column key
|
||||
* @param timestamp Delete all entries that have this timestamp or older
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteAll(byte [] regionName, byte [] row, byte [] column,
|
||||
long timestamp)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Delete all cells that match the passed row and whose
|
||||
* timestamp is equal-to or older than the passed timestamp.
|
||||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param timestamp Delete all entries that have this timestamp or older
|
||||
* @throws IOException
|
||||
*/
|
||||
public void deleteAll(byte [] regionName, byte [] row, long timestamp)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Delete all cells for a row with matching column family with timestamps
|
||||
* less than or equal to <i>timestamp</i>.
|
||||
*
|
||||
* @param regionName The name of the region to operate on
|
||||
* @param row The row to operate on
|
||||
* @param family The column family to match
|
||||
* @param timestamp Timestamp to match
|
||||
*/
|
||||
public void deleteFamily(byte [] regionName, byte [] row, byte [] family,
|
||||
long timestamp)
|
||||
throws IOException;
|
||||
|
||||
|
||||
//
|
||||
// remote scanner interface
|
||||
//
|
||||
|
||||
/**
|
||||
* Opens a remote scanner with a RowFilter.
|
||||
*
|
||||
* @param regionName name of region to scan
|
||||
* @param columns columns to scan. If column name is a column family, all
|
||||
* columns of the specified column family are returned. Its also possible
|
||||
* to pass a regex for column family name. A column name is judged to be
|
||||
* regex if it contains at least one of the following characters:
|
||||
* <code>\+|^&*$[]]}{)(</code>.
|
||||
* @param startRow starting row to scan
|
||||
* @param timestamp only return values whose timestamp is <= this value
|
||||
* @param filter RowFilter for filtering results at the row-level.
|
||||
*
|
||||
* @return scannerId scanner identifier used in other calls
|
||||
* @throws IOException
|
||||
*/
|
||||
public long openScanner(final byte [] regionName, final byte [][] columns,
|
||||
final byte [] startRow, long timestamp, RowFilterInterface filter)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Get the next set of values
|
||||
* @param scannerId clientId passed to openScanner
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult next(long scannerId) throws IOException;
|
||||
|
||||
/**
|
||||
* Close a scanner
|
||||
*
|
||||
* @param scannerId the scanner id returned by openScanner
|
||||
* @throws IOException
|
||||
*/
|
||||
public void close(long scannerId) throws IOException;
|
||||
}
|
|
@ -1,151 +1,151 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Test batch updates
|
||||
*/
|
||||
public class TestBatchUpdate extends HBaseClusterTestCase {
|
||||
private static final String CONTENTS_STR = "contents:";
|
||||
private static final byte [] CONTENTS = Bytes.toBytes(CONTENTS_STR);
|
||||
private static final String SMALLFAM_STR = "smallfam:";
|
||||
private static final byte [] SMALLFAM = Bytes.toBytes(SMALLFAM_STR);
|
||||
private static final int SMALL_LENGTH = 1;
|
||||
private static final int NB_BATCH_ROWS = 10;
|
||||
private byte[] value;
|
||||
private byte[] smallValue;
|
||||
|
||||
private HTableDescriptor desc = null;
|
||||
private HTable table = null;
|
||||
|
||||
/**
|
||||
* @throws UnsupportedEncodingException
|
||||
*/
|
||||
public TestBatchUpdate() throws UnsupportedEncodingException {
|
||||
super();
|
||||
value = "abcd".getBytes(HConstants.UTF8_ENCODING);
|
||||
smallValue = "a".getBytes(HConstants.UTF8_ENCODING);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
this.desc = new HTableDescriptor("test");
|
||||
desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
|
||||
desc.addFamily(new HColumnDescriptor(SMALLFAM,
|
||||
HColumnDescriptor.DEFAULT_VERSIONS,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE, SMALL_LENGTH,
|
||||
HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER));
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
admin.createTable(desc);
|
||||
table = new HTable(conf, desc.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws IOException
|
||||
*/
|
||||
public void testBatchUpdate() throws IOException {
|
||||
BatchUpdate bu = new BatchUpdate("row1");
|
||||
bu.put(CONTENTS, value);
|
||||
bu.delete(CONTENTS);
|
||||
table.commit(bu);
|
||||
|
||||
bu = new BatchUpdate("row2");
|
||||
bu.put(CONTENTS, value);
|
||||
table.commit(bu);
|
||||
|
||||
byte [][] columns = { CONTENTS };
|
||||
Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
|
||||
for (RowResult r : scanner) {
|
||||
for(Map.Entry<byte [], Cell> e: r.entrySet()) {
|
||||
System.out.println(r.getRow() + ": row: " + e.getKey() + " value: " +
|
||||
new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testBatchUpdateMaxLength() {
|
||||
// Test for a single good value
|
||||
BatchUpdate batchUpdate = new BatchUpdate("row1");
|
||||
batchUpdate.put(SMALLFAM, value);
|
||||
try {
|
||||
table.commit(batchUpdate);
|
||||
fail("Value is too long, should throw exception");
|
||||
} catch (IOException e) {
|
||||
// This is expected
|
||||
}
|
||||
// Try to see if it's still inserted
|
||||
try {
|
||||
Cell cell = table.get("row1", SMALLFAM_STR);
|
||||
assertNull(cell);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail("This is unexpected");
|
||||
}
|
||||
// Try to put a good value
|
||||
batchUpdate = new BatchUpdate("row1");
|
||||
batchUpdate.put(SMALLFAM, smallValue);
|
||||
try {
|
||||
table.commit(batchUpdate);
|
||||
} catch (IOException e) {
|
||||
fail("Value is long enough, should not throw exception");
|
||||
}
|
||||
}
|
||||
|
||||
public void testRowsBatchUpdate() {
|
||||
ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
|
||||
for(int i = 0; i < NB_BATCH_ROWS; i++) {
|
||||
BatchUpdate batchUpdate = new BatchUpdate("row"+i);
|
||||
batchUpdate.put(CONTENTS, value);
|
||||
rowsUpdate.add(batchUpdate);
|
||||
}
|
||||
try {
|
||||
table.commit(rowsUpdate);
|
||||
|
||||
byte [][] columns = { CONTENTS };
|
||||
Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
|
||||
int nbRows = 0;
|
||||
for(RowResult row : scanner)
|
||||
nbRows++;
|
||||
assertEquals(NB_BATCH_ROWS, nbRows);
|
||||
} catch (IOException e) {
|
||||
fail("This is unexpected : " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Test batch updates
|
||||
*/
|
||||
public class TestBatchUpdate extends HBaseClusterTestCase {
|
||||
private static final String CONTENTS_STR = "contents:";
|
||||
private static final byte [] CONTENTS = Bytes.toBytes(CONTENTS_STR);
|
||||
private static final String SMALLFAM_STR = "smallfam:";
|
||||
private static final byte [] SMALLFAM = Bytes.toBytes(SMALLFAM_STR);
|
||||
private static final int SMALL_LENGTH = 1;
|
||||
private static final int NB_BATCH_ROWS = 10;
|
||||
private byte[] value;
|
||||
private byte[] smallValue;
|
||||
|
||||
private HTableDescriptor desc = null;
|
||||
private HTable table = null;
|
||||
|
||||
/**
|
||||
* @throws UnsupportedEncodingException
|
||||
*/
|
||||
public TestBatchUpdate() throws UnsupportedEncodingException {
|
||||
super();
|
||||
value = "abcd".getBytes(HConstants.UTF8_ENCODING);
|
||||
smallValue = "a".getBytes(HConstants.UTF8_ENCODING);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
this.desc = new HTableDescriptor("test");
|
||||
desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
|
||||
desc.addFamily(new HColumnDescriptor(SMALLFAM,
|
||||
HColumnDescriptor.DEFAULT_VERSIONS,
|
||||
HColumnDescriptor.DEFAULT_COMPRESSION,
|
||||
HColumnDescriptor.DEFAULT_IN_MEMORY,
|
||||
HColumnDescriptor.DEFAULT_BLOCKCACHE, SMALL_LENGTH,
|
||||
HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER));
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
admin.createTable(desc);
|
||||
table = new HTable(conf, desc.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws IOException
|
||||
*/
|
||||
public void testBatchUpdate() throws IOException {
|
||||
BatchUpdate bu = new BatchUpdate("row1");
|
||||
bu.put(CONTENTS, value);
|
||||
bu.delete(CONTENTS);
|
||||
table.commit(bu);
|
||||
|
||||
bu = new BatchUpdate("row2");
|
||||
bu.put(CONTENTS, value);
|
||||
table.commit(bu);
|
||||
|
||||
byte [][] columns = { CONTENTS };
|
||||
Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
|
||||
for (RowResult r : scanner) {
|
||||
for(Map.Entry<byte [], Cell> e: r.entrySet()) {
|
||||
System.out.println(r.getRow() + ": row: " + e.getKey() + " value: " +
|
||||
new String(e.getValue().getValue(), HConstants.UTF8_ENCODING));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testBatchUpdateMaxLength() {
|
||||
// Test for a single good value
|
||||
BatchUpdate batchUpdate = new BatchUpdate("row1");
|
||||
batchUpdate.put(SMALLFAM, value);
|
||||
try {
|
||||
table.commit(batchUpdate);
|
||||
fail("Value is too long, should throw exception");
|
||||
} catch (IOException e) {
|
||||
// This is expected
|
||||
}
|
||||
// Try to see if it's still inserted
|
||||
try {
|
||||
Cell cell = table.get("row1", SMALLFAM_STR);
|
||||
assertNull(cell);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail("This is unexpected");
|
||||
}
|
||||
// Try to put a good value
|
||||
batchUpdate = new BatchUpdate("row1");
|
||||
batchUpdate.put(SMALLFAM, smallValue);
|
||||
try {
|
||||
table.commit(batchUpdate);
|
||||
} catch (IOException e) {
|
||||
fail("Value is long enough, should not throw exception");
|
||||
}
|
||||
}
|
||||
|
||||
public void testRowsBatchUpdate() {
|
||||
ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
|
||||
for(int i = 0; i < NB_BATCH_ROWS; i++) {
|
||||
BatchUpdate batchUpdate = new BatchUpdate("row"+i);
|
||||
batchUpdate.put(CONTENTS, value);
|
||||
rowsUpdate.add(batchUpdate);
|
||||
}
|
||||
try {
|
||||
table.commit(rowsUpdate);
|
||||
|
||||
byte [][] columns = { CONTENTS };
|
||||
Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
|
||||
int nbRows = 0;
|
||||
for(RowResult row : scanner)
|
||||
nbRows++;
|
||||
assertEquals(NB_BATCH_ROWS, nbRows);
|
||||
} catch (IOException e) {
|
||||
fail("This is unexpected : " + e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue