HBASE-468 Move HStoreKey back to o.a.h.h
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@630605 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4c9a4b4cb0
commit
d280d4d0ec
|
@ -64,6 +64,7 @@ Hbase Change Log
|
|||
HBASE-419 Move RegionServer and related classes into regionserver package
|
||||
HBASE-457 Factor Master into Master, RegionManager, and ServerManager
|
||||
HBASE-464 HBASE-419 introduced javadoc errors
|
||||
HBASE-468 Move HStoreKey back to o.a.h.h
|
||||
|
||||
Branch 0.1
|
||||
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.client.HConnectionManager;
|
|||
|
||||
import org.apache.hadoop.hbase.regionserver.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* A non-instantiable class that has a static method capable of compacting
|
||||
|
@ -355,7 +354,6 @@ class HMerge implements HConstants {
|
|||
oldRegion2
|
||||
};
|
||||
for(int r = 0; r < regionsToDelete.length; r++) {
|
||||
long lockid = Math.abs(rand.nextLong());
|
||||
BatchUpdate b = new BatchUpdate(regionsToDelete[r]);
|
||||
b.delete(COL_REGIONINFO);
|
||||
b.delete(COL_SERVER);
|
||||
|
|
|
@ -26,7 +26,6 @@ import java.util.Map;
|
|||
import java.util.SortedMap;
|
||||
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* HScannerInterface iterates through a set of rows. It's implemented by
|
||||
|
|
|
@ -17,12 +17,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import org.apache.hadoop.hbase.io.TextSequence;
|
||||
import org.apache.hadoop.hbase.InvalidColumnNameException;
|
||||
import org.apache.hadoop.io.*;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -227,6 +225,7 @@ public class HStoreKey implements WritableComparable {
|
|||
|
||||
// Comparable
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public int compareTo(Object o) {
|
||||
HStoreKey other = (HStoreKey)o;
|
||||
int result = this.row.compareTo(other.row);
|
||||
|
@ -286,6 +285,7 @@ public class HStoreKey implements WritableComparable {
|
|||
* Extracts the column family name from a column
|
||||
* For example, returns 'info' if the specified column was 'info:server'
|
||||
* @param col name of column
|
||||
* @param withColon set to true if colon separator should be returned
|
||||
* @return column famile as a TextSequence based on the passed
|
||||
* <code>col</code>. If <code>col</code> is reused, make a new Text of
|
||||
* the result by calling {@link TextSequence#toText()}.
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.ipc.RemoteException;
|
|||
import org.apache.hadoop.hbase.master.HMasterInterface;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -42,10 +43,8 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
|||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* Provides administrative functions for HBase
|
||||
|
@ -118,7 +117,6 @@ public class HBaseAdmin implements HConstants {
|
|||
*
|
||||
* @throws IllegalArgumentException if the table name is reserved
|
||||
* @throws MasterNotRunningException if master is not running
|
||||
* @throws NoServerForRegionException if root region is not being served
|
||||
* @throws TableExistsException if table already exists (If concurrent
|
||||
* threads, the table may have been created between test-for-existence
|
||||
* and attempt-at-creation).
|
||||
|
@ -155,7 +153,6 @@ public class HBaseAdmin implements HConstants {
|
|||
*
|
||||
* @throws IllegalArgumentException if the table name is reserved
|
||||
* @throws MasterNotRunningException if master is not running
|
||||
* @throws NoServerForRegionException if root region is not being served
|
||||
* @throws TableExistsException if table already exists (If concurrent
|
||||
* threads, the table may have been created between test-for-existence
|
||||
* and attempt-at-creation).
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.io.Writable;
|
|||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.hbase.master.HMasterInterface;
|
||||
import org.apache.hadoop.hbase.util.SoftSortedMap;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -46,14 +47,12 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.LocalHBaseCluster;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.NoServerForRegionException;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* A non-instantiable class that manages connections to multiple tables in
|
||||
|
@ -130,7 +129,7 @@ public class HConnectionManager implements HConstants {
|
|||
|
||||
private Map<Text, SoftSortedMap<Text, HRegionLocation>>
|
||||
cachedRegionLocations = new ConcurrentHashMap<Text,
|
||||
SoftSortedMap<Text, HRegionLocation>>();;
|
||||
SoftSortedMap<Text, HRegionLocation>>();
|
||||
|
||||
/**
|
||||
* constructor
|
||||
|
@ -300,7 +299,7 @@ public class HConnectionManager implements HConstants {
|
|||
continue;
|
||||
}
|
||||
finally {
|
||||
if (scannerId != -1L) {
|
||||
if (scannerId != -1L && server != null) {
|
||||
server.close(scannerId);
|
||||
}
|
||||
}
|
||||
|
@ -309,11 +308,13 @@ public class HConnectionManager implements HConstants {
|
|||
return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public HRegionLocation locateRegion(Text tableName, Text row)
|
||||
throws IOException{
|
||||
return locateRegion(tableName, row, true);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public HRegionLocation relocateRegion(Text tableName, Text row)
|
||||
throws IOException{
|
||||
return locateRegion(tableName, row, false);
|
||||
|
|
|
@ -47,12 +47,12 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionInterface;
|
||||
|
||||
/**
|
||||
|
@ -93,6 +93,7 @@ public class HTable implements HConstants {
|
|||
* Find region location hosting passed row using cached info
|
||||
* @param row Row to find.
|
||||
* @return Location of row.
|
||||
* @throws IOException
|
||||
*/
|
||||
public HRegionLocation getRegionLocation(Text row) throws IOException {
|
||||
return this.connection.locateRegion(this.tableName, row);
|
||||
|
@ -744,6 +745,8 @@ public class HTable implements HConstants {
|
|||
|
||||
/**
|
||||
* Commit a BatchUpdate to the table.
|
||||
* @param batchUpdate
|
||||
* @throws IOException
|
||||
*/
|
||||
public synchronized void commit(final BatchUpdate batchUpdate)
|
||||
throws IOException {
|
||||
|
@ -1024,6 +1027,9 @@ public class HTable implements HConstants {
|
|||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing anymore
|
||||
*/
|
||||
@Deprecated
|
||||
public void close() {
|
||||
// do nothing...
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.util.TreeMap;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.Shell;
|
||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
|
||||
/**
|
||||
* A Writable Map.
|
||||
|
@ -167,6 +167,7 @@ public class HbaseMapWritable implements Map<Writable, Writable>, Writable,
|
|||
return b;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void write(DataOutput out) throws IOException {
|
||||
// Write out the number of entries in the map
|
||||
out.writeInt(instance.size());
|
||||
|
@ -180,6 +181,7 @@ public class HbaseMapWritable implements Map<Writable, Writable>, Writable,
|
|||
}
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
// First clear the map. Otherwise we will just accumulate
|
||||
// entries every time this method is called.
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.util.ArrayList;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -85,7 +85,7 @@ public class GroupingTableMap extends TableMap<Text,MapWritable> {
|
|||
* Pass the new key and value to reduce.
|
||||
* If any of the grouping columns are not found in the value, the record is skipped.
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.regionserver.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)
|
||||
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)
|
||||
*/
|
||||
@Override
|
||||
public void map(@SuppressWarnings("unused") HStoreKey key,
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.mapred;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.mapred.OutputCollector;
|
||||
|
@ -41,7 +41,7 @@ public class IdentityTableMap extends TableMap<Text, MapWritable> {
|
|||
/**
|
||||
* Pass the key, value to reduce
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.regionserver.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)
|
||||
* @see org.apache.hadoop.hbase.mapred.TableMap#map(org.apache.hadoop.hbase.HStoreKey, org.apache.hadoop.io.MapWritable, org.apache.hadoop.mapred.OutputCollector, org.apache.hadoop.mapred.Reporter)
|
||||
*/
|
||||
@Override
|
||||
public void map(HStoreKey key, MapWritable value,
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.hadoop.mapred.Reporter;
|
|||
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
|
||||
|
@ -84,6 +84,7 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
|||
}
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void close() throws IOException {
|
||||
this.m_scanner.close();
|
||||
}
|
||||
|
@ -147,6 +148,7 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
|||
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public RecordReader<HStoreKey, MapWritable> getRecordReader(
|
||||
InputSplit split,
|
||||
@SuppressWarnings("unused") JobConf job,
|
||||
|
@ -178,6 +180,7 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
|||
return splits;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void configure(JobConf job) {
|
||||
Path[] tableNames = job.getInputPaths();
|
||||
m_tableName = new Text(tableNames[0].getName());
|
||||
|
@ -194,6 +197,7 @@ implements InputFormat<HStoreKey, MapWritable>, JobConfigurable {
|
|||
}
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public void validateInput(JobConf job) throws IOException {
|
||||
// expecting exactly one path
|
||||
Path[] tableNames = job.getInputPaths();
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.mapred;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
@ -37,6 +37,8 @@ import org.apache.hadoop.mapred.Reporter;
|
|||
* Scan an HBase table to sort by a specified sort column.
|
||||
* If the column does not exist, the record is not passed to Reduce.
|
||||
*
|
||||
* @param <K> WritableComparable key class
|
||||
* @param <V> Writable value class
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public abstract class TableMap<K extends WritableComparable, V extends Writable>
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.ipc.Server;
|
|||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.Leases;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -80,7 +81,6 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* HMaster is the "master server" for a HBase.
|
||||
|
|
|
@ -27,9 +27,9 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
|
||||
/*
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.regex.Pattern;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
/**
|
||||
|
@ -200,7 +201,7 @@ public abstract class HAbstractScanner implements HInternalScannerInterface {
|
|||
* @return true if a match was found
|
||||
* @throws IOException
|
||||
*
|
||||
* @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.regionserver.HStoreKey, java.util.SortedMap)
|
||||
* @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap)
|
||||
*/
|
||||
public boolean next(HStoreKey key, SortedMap<Text, byte []> results)
|
||||
throws IOException {
|
||||
|
|
|
@ -35,6 +35,7 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -44,6 +45,7 @@ import org.apache.hadoop.io.SequenceFile.CompressionType;
|
|||
import org.apache.hadoop.io.SequenceFile.Reader;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -501,7 +503,7 @@ public class HLog implements HConstants {
|
|||
*/
|
||||
public static void splitLog(Path rootDir, Path srcDir, FileSystem fs,
|
||||
Configuration conf) throws IOException {
|
||||
Path logfiles[] = fs.listPaths(new Path[] { srcDir });
|
||||
FileStatus logfiles[] = fs.listStatus(srcDir);
|
||||
LOG.info("splitting " + logfiles.length + " log(s) in " +
|
||||
srcDir.toString());
|
||||
Map<Text, SequenceFile.Writer> logWriters =
|
||||
|
@ -513,14 +515,15 @@ public class HLog implements HConstants {
|
|||
logfiles[i]);
|
||||
}
|
||||
// Check for empty file.
|
||||
if (fs.getFileStatus(logfiles[i]).getLen() <= 0) {
|
||||
if (logfiles[i].getLen() <= 0) {
|
||||
LOG.info("Skipping " + logfiles[i].toString() +
|
||||
" because zero length");
|
||||
continue;
|
||||
}
|
||||
HLogKey key = new HLogKey();
|
||||
HLogEdit val = new HLogEdit();
|
||||
SequenceFile.Reader in = new SequenceFile.Reader(fs, logfiles[i], conf);
|
||||
SequenceFile.Reader in =
|
||||
new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
|
||||
try {
|
||||
int count = 0;
|
||||
for (; in.next(key, val); count++) {
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
|
@ -104,6 +105,11 @@ public class HRegion implements HConstants {
|
|||
* Merge two HRegions. They must be available on the current
|
||||
* HRegionServer. Returns a brand-new active HRegion, also
|
||||
* running on the current HRegionServer.
|
||||
*
|
||||
* @param srcA
|
||||
* @param srcB
|
||||
* @return new merged HRegion
|
||||
* @throws IOException
|
||||
*/
|
||||
public static HRegion closeAndMerge(final HRegion srcA, final HRegion srcB)
|
||||
throws IOException {
|
||||
|
@ -538,6 +544,7 @@ public class HRegion implements HConstants {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* @param midkey
|
||||
* @return returns size of largest HStore. Also returns whether store is
|
||||
* splitable or not (Its not splitable if region has a store that has a
|
||||
* reference store file).
|
||||
|
@ -769,6 +776,8 @@ public class HRegion implements HConstants {
|
|||
* Note that no locking is necessary at this level because compaction only
|
||||
* conflicts with a region split, and that cannot happen because the region
|
||||
* server does them sequentially and not in parallel.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean compactStores() throws IOException {
|
||||
if (this.closed.get()) {
|
||||
|
@ -1612,6 +1621,7 @@ public class HRegion implements HConstants {
|
|||
return regionInfo.getRegionName().toString();
|
||||
}
|
||||
|
||||
/** @return Path of region base directory */
|
||||
public Path getBaseDir() {
|
||||
return this.basedir;
|
||||
}
|
||||
|
|
|
@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HMsg;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.Leases;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.RegionServerRunningException;
|
||||
|
@ -1590,6 +1591,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
return this.serverInfo;
|
||||
}
|
||||
|
||||
/** @return the info server */
|
||||
public InfoServer getInfoServer() {
|
||||
return infoServer;
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
|
@ -60,6 +61,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
|
||||
|
@ -221,11 +223,12 @@ public class HStore implements HConstants {
|
|||
}
|
||||
|
||||
/**
|
||||
* Find the key that matches <i>row</i> exactly, or the one that immediately
|
||||
* preceeds it.
|
||||
* @param row
|
||||
* @param timestamp
|
||||
* @return the key that matches <i>row</i> exactly, or the one that
|
||||
* immediately preceeds it.
|
||||
*/
|
||||
public Text getRowKeyAtOrBefore(final Text row, long timestamp)
|
||||
throws IOException{
|
||||
public Text getRowKeyAtOrBefore(final Text row, long timestamp) {
|
||||
this.lock.readLock().lock();
|
||||
|
||||
Text key_memcache = null;
|
||||
|
@ -246,17 +249,17 @@ public class HStore implements HConstants {
|
|||
return key_snapshot;
|
||||
} else if (key_memcache != null && key_snapshot == null) {
|
||||
return key_memcache;
|
||||
} else {
|
||||
// if either is a precise match, return the original row.
|
||||
if ( (key_memcache != null && key_memcache.equals(row))
|
||||
} else if ( (key_memcache != null && key_memcache.equals(row))
|
||||
|| (key_snapshot != null && key_snapshot.equals(row)) ) {
|
||||
// if either is a precise match, return the original row.
|
||||
return row;
|
||||
}
|
||||
} else if (key_memcache != null) {
|
||||
// no precise matches, so return the one that is closer to the search
|
||||
// key (greatest)
|
||||
return key_memcache.compareTo(key_snapshot) > 0 ?
|
||||
key_memcache : key_snapshot;
|
||||
}
|
||||
return null;
|
||||
} finally {
|
||||
this.lock.readLock().unlock();
|
||||
}
|
||||
|
@ -869,10 +872,11 @@ public class HStore implements HConstants {
|
|||
}
|
||||
// Look first at info files. If a reference, these contain info we need
|
||||
// to create the HStoreFile.
|
||||
Path infofiles[] = fs.listPaths(new Path[] {infodir});
|
||||
FileStatus infofiles[] = fs.listStatus(infodir);
|
||||
ArrayList<HStoreFile> results = new ArrayList<HStoreFile>(infofiles.length);
|
||||
ArrayList<Path> mapfiles = new ArrayList<Path>(infofiles.length);
|
||||
for (Path p: infofiles) {
|
||||
for (int i = 0; i < infofiles.length; i++) {
|
||||
Path p = infofiles[i].getPath();
|
||||
Matcher m = REF_NAME_PARSER.matcher(p.getName());
|
||||
/*
|
||||
* * * * * N O T E * * * * *
|
||||
|
@ -912,11 +916,12 @@ public class HStore implements HConstants {
|
|||
|
||||
// List paths by experience returns fully qualified names -- at least when
|
||||
// running on a mini hdfs cluster.
|
||||
Path datfiles[] = fs.listPaths(new Path[] {mapdir});
|
||||
FileStatus datfiles[] = fs.listStatus(mapdir);
|
||||
for (int i = 0; i < datfiles.length; i++) {
|
||||
Path p = datfiles[i].getPath();
|
||||
// If does not have sympathetic info file, delete.
|
||||
if (!mapfiles.contains(fs.makeQualified(datfiles[i]))) {
|
||||
fs.delete(datfiles[i]);
|
||||
if (!mapfiles.contains(fs.makeQualified(p))) {
|
||||
fs.delete(p);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
|
@ -1817,8 +1822,11 @@ public class HStore implements HConstants {
|
|||
}
|
||||
|
||||
/**
|
||||
* Find the key that matches <i>row</i> exactly, or the one that immediately
|
||||
* @return the key that matches <i>row</i> exactly, or the one that immediately
|
||||
* preceeds it.
|
||||
* @param row
|
||||
* @param timestamp
|
||||
* @throws IOException
|
||||
*/
|
||||
public Text getRowKeyAtOrBefore(final Text row, final long timestamp)
|
||||
throws IOException{
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.onelab.filter.Key;
|
|||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
|
||||
/**
|
||||
* A HStore data file. HStores usually have one or more of these files. They
|
||||
|
@ -523,18 +524,34 @@ public class HStoreFile implements HConstants {
|
|||
((encodedRegionName != null) ? "." + encodedRegionName : "");
|
||||
}
|
||||
|
||||
/**
|
||||
* @param dir
|
||||
* @param encodedRegionName
|
||||
* @param colFamily
|
||||
* @return path for map file directory
|
||||
*/
|
||||
public static Path getMapDir(Path dir, String encodedRegionName, Text colFamily) {
|
||||
return new Path(dir, new Path(encodedRegionName,
|
||||
new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
|
||||
}
|
||||
|
||||
/** @return the info directory path */
|
||||
/**
|
||||
* @param dir
|
||||
* @param encodedRegionName
|
||||
* @param colFamily
|
||||
* @return the info directory path
|
||||
*/
|
||||
public static Path getInfoDir(Path dir, String encodedRegionName, Text colFamily) {
|
||||
return new Path(dir, new Path(encodedRegionName,
|
||||
new Path(colFamily.toString(), HSTORE_INFO_DIR)));
|
||||
}
|
||||
|
||||
/** @return the bloom filter directory path */
|
||||
/**
|
||||
* @param dir
|
||||
* @param encodedRegionName
|
||||
* @param colFamily
|
||||
* @return the bloom filter directory path
|
||||
*/
|
||||
public static Path getFilterDir(Path dir, String encodedRegionName, Text colFamily) {
|
||||
return new Path(dir, new Path(encodedRegionName,
|
||||
new Path(colFamily.toString(), HSTORE_FILTER_DIR)));
|
||||
|
@ -723,6 +740,14 @@ public class HStoreFile implements HConstants {
|
|||
bloomFilter = filter;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param fs
|
||||
* @param dirName
|
||||
* @param conf
|
||||
* @param filter
|
||||
* @param blockCacheEnabled
|
||||
* @throws IOException
|
||||
*/
|
||||
public Reader(FileSystem fs, String dirName, Configuration conf,
|
||||
final Filter filter, final boolean blockCacheEnabled)
|
||||
throws IOException {
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.client.HTable;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.util.JenkinsHash;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.mortbay.servlet.MultiPartResponse;
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
|
||||
|
|
|
@ -58,12 +58,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* Perform a file system upgrade to convert older file layouts to that
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.client.HTable;
|
|||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionInterface;
|
||||
|
||||
/**
|
||||
|
|
|
@ -330,6 +330,7 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
public static interface Incommon {
|
||||
/**
|
||||
* @param row
|
||||
* @return update id
|
||||
* @throws IOException
|
||||
*/
|
||||
public long startUpdate(Text row) throws IOException;
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.client.HTable;
|
|||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* Utility class to build a table of multiple regions.
|
||||
|
|
|
@ -48,8 +48,8 @@ import org.apache.hadoop.mapred.TextOutputFormat;
|
|||
import org.apache.log4j.Logger;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* Script used evaluating HBase performance and scalability. Runs a HBase
|
||||
|
@ -386,9 +386,9 @@ public class PerformanceEvaluation implements HConstants {
|
|||
@Override
|
||||
void testRow(@SuppressWarnings("unused") final int i) throws IOException {
|
||||
Text row = getRandomRow(this.rand, this.totalRows);
|
||||
long lockid = table.startUpdate(row);
|
||||
table.put(lockid, COLUMN_NAME, generateValue(this.rand));
|
||||
table.commit(lockid);
|
||||
BatchUpdate b = new BatchUpdate(row);
|
||||
b.put(COLUMN_NAME, generateValue(this.rand));
|
||||
table.commit(b);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -460,9 +460,9 @@ public class PerformanceEvaluation implements HConstants {
|
|||
|
||||
@Override
|
||||
void testRow(final int i) throws IOException {
|
||||
long lockid = table.startUpdate(format(i));
|
||||
table.put(lockid, COLUMN_NAME, generateValue(this.rand));
|
||||
table.commit(lockid);
|
||||
BatchUpdate b = new BatchUpdate(format(i));
|
||||
b.put(COLUMN_NAME, generateValue(this.rand));
|
||||
table.commit(b);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -535,7 +535,7 @@ public class PerformanceEvaluation implements HConstants {
|
|||
return totalElapsedTime;
|
||||
}
|
||||
|
||||
private void runNIsOne(final String cmd) throws IOException {
|
||||
private void runNIsOne(final String cmd) {
|
||||
Status status = new Status() {
|
||||
@SuppressWarnings("unused")
|
||||
public void setStatus(String msg) throws IOException {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase;
|
|||
import org.apache.hadoop.io.Text;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* Test comparing HBase objects.
|
||||
|
|
|
@ -27,7 +27,7 @@ import java.util.TreeMap;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/**
|
||||
* Test HBase Master and Region servers, client API
|
||||
|
@ -97,12 +97,12 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
|||
// Write out a bunch of values
|
||||
|
||||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||
long writeid = table.startUpdate(new Text("row_" + k));
|
||||
table.put(writeid, CONTENTS_BASIC,
|
||||
BatchUpdate b = new BatchUpdate(new Text("row_" + k));
|
||||
b.put(CONTENTS_BASIC,
|
||||
(CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING));
|
||||
table.put(writeid, new Text(ANCHORNUM + k),
|
||||
b.put(new Text(ANCHORNUM + k),
|
||||
(ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING));
|
||||
table.commit(writeid);
|
||||
table.commit(b);
|
||||
}
|
||||
System.out.println("Write " + NUM_VALS + " rows. Elapsed time: "
|
||||
+ ((System.currentTimeMillis() - startTime) / 1000.0));
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
||||
/** test the scanner API at all levels */
|
||||
|
|
|
@ -25,10 +25,10 @@ import java.util.Map;
|
|||
import java.util.TreeMap;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,10 +30,10 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
|
||||
/**
|
||||
* Tests HTable
|
||||
|
|
|
@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MultiRegionTable;
|
||||
|
|
|
@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreKey;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MultiRegionTable;
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.io.MapFile;
|
|||
import org.apache.hadoop.io.Text;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
|
||||
|
|
|
@ -30,8 +30,8 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -148,7 +148,10 @@ public class TestGet2 extends HBaseTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/** For HADOOP-2443 */
|
||||
/**
|
||||
* For HADOOP-2443
|
||||
* @throws IOException
|
||||
*/
|
||||
public void testGetClosestRowBefore() throws IOException{
|
||||
|
||||
HRegion region = null;
|
||||
|
@ -156,7 +159,6 @@ public class TestGet2 extends HBaseTestCase {
|
|||
|
||||
try {
|
||||
HTableDescriptor htd = createTableDescriptor(getName());
|
||||
HRegionInfo hri = new HRegionInfo(htd, null, null);
|
||||
region = createNewHRegion(htd, null, null);
|
||||
region_incommon = new HRegionIncommon(region);
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.io.SequenceFile.Reader;
|
|||
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
|
||||
/** JUnit test case for HLog */
|
||||
|
|
|
@ -29,6 +29,7 @@ import junit.framework.TestCase;
|
|||
import org.apache.hadoop.io.Text;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
|
||||
/** memcache test case */
|
||||
public class TestHMemcache extends TestCase {
|
||||
|
|
|
@ -32,9 +32,9 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
/**
|
||||
* Test HStoreFile
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
|
||||
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.LocalHBaseCluster;
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
|
||||
/**
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.dfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.MultiRegionTable;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.HTable;
|
|||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
|
|
Loading…
Reference in New Issue