Compare commits
30 Commits
Author | SHA1 | Date |
---|---|---|
Joe Schaefer | 76bd7a4357 | |
Jean-Daniel Cryans | 15aaec0a67 | |
Michael Stack | e3e678fe30 | |
Michael Stack | 00949213b8 | |
Jean-Daniel Cryans | 3a2c6f5e90 | |
Jim Kellerman | d7540b9af4 | |
Jean-Daniel Cryans | 6e6c79d93e | |
Jim Kellerman | cfb9cb2946 | |
Jim Kellerman | 38b5c1f5c1 | |
Jim Kellerman | d86e9a58b2 | |
Michael Stack | e68aeb8d70 | |
Michael Stack | 03bb977770 | |
Michael Stack | 54e73589cb | |
Michael Stack | cfa79319e5 | |
Michael Stack | 1ef1955aa8 | |
Michael Stack | 1f66d54b5d | |
Michael Stack | 8dcc95d790 | |
Michael Stack | 06d1a00f83 | |
Michael Stack | 8b7d4da0f9 | |
Jim Kellerman | 2d65f47517 | |
Michael Stack | 6720a3f1ed | |
Michael Stack | 47e17e0007 | |
Jim Kellerman | cc6c62bc20 | |
Jim Kellerman | 4e41ef9c69 | |
Jean-Daniel Cryans | 9c218f0b04 | |
Michael Stack | 7c3e3cab20 | |
Michael Stack | 0a0f8ea3c0 | |
Michael Stack | aaf96533a2 | |
Michael Stack | 55512dd3e9 | |
Michael Stack | 1c0cc7dba8 |
42
CHANGES.txt
42
CHANGES.txt
|
@ -1,5 +1,47 @@
|
|||
HBase Change Log
|
||||
|
||||
Release 0.18.2 - Unreleased
|
||||
|
||||
BUG FIXES
|
||||
HBASE-602 HBase Crash when network card has a IPv6 address
|
||||
HBASE-927 We don't recover if HRS hosting -ROOT-/.META. goes down -
|
||||
(back port from trunk)
|
||||
HBASE-1052 Stopping a HRegionServer with unflushed cache causes data loss
|
||||
from org.apache.hadoop.hbase.DroppedSnapshotException
|
||||
HBASE-981 hbase.io.index.interval doesn't seem to have an effect;
|
||||
interval is 128 rather than the configured 32
|
||||
HBASE-1070 Up default index interval in TRUNK and branch
|
||||
HBASE-1079 Dumb NPE in ServerCallable hides the RetriesExhausted exception
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-1046 Narrow getClosestRowBefore by passing column family (backport)
|
||||
HBASE-1069 Show whether HRegion major compacts or not in INFO level
|
||||
|
||||
|
||||
Release 0.18.1 - Released October 27, 2008
|
||||
|
||||
BUG FIXES
|
||||
HBASE-891 HRS.validateValuesLength throws IOE, gets caught in the retries
|
||||
HBASE-906 [shell] Truncates output
|
||||
HBASE-912 PE is broken when other tables exist
|
||||
HBASE-918 Region balancing during startup makes cluster unstable
|
||||
HBASE-921 region close and open processed out of order; makes for
|
||||
disagreement between master and regionserver on region state
|
||||
HBASE-925 HRS NPE on way out if no master to connect to
|
||||
HBASE-928 NPE throwing RetriesExhaustedException
|
||||
HBASE-576 Investigate IPC performance; partial.
|
||||
HBASE-924 Update hadoop in lib on 0.18 hbase branch to 0.18.1
|
||||
HBASE-930 RegionServer stuck: HLog: Could not append. Requesting close of
|
||||
log java.io.IOException: Could not get block locations. Aborting...
|
||||
HBASE-933 missing hbase.regions.slop in hbase-default.xml for 0.18 branch
|
||||
(Rong-en Fan via Stack)
|
||||
HBASE-926 If no master, regionservers should hang out rather than fail on
|
||||
connection and shut themselves down
|
||||
HBASE-946 Row with 55k deletes timesout scanner lease
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-920 Make region balancing sloppier
|
||||
|
||||
Release 0.18.0 - September 21st, 2008
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -84,11 +84,11 @@ module Formatter
|
|||
end
|
||||
|
||||
def dump(str)
|
||||
# Remove double-quotes added by 'dump'.
|
||||
if str.instance_of? Fixnum
|
||||
return
|
||||
end
|
||||
return str.dump.slice(1, str.length)
|
||||
# Remove double-quotes added by 'dump'.
|
||||
return str.dump[1..-2]
|
||||
end
|
||||
|
||||
def output(width, str)
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
-->
|
||||
|
||||
<project name="hbase" default="jar">
|
||||
<property name="version" value="0.18.0"/>
|
||||
<property name="version" value="0.18.2-dev"/>
|
||||
<property name="Name" value="HBase"/>
|
||||
<property name="final.name" value="hbase-${version}"/>
|
||||
<property name="year" value="2008"/>
|
||||
|
|
|
@ -263,6 +263,13 @@
|
|||
HStoreFiles in a region. Default: 1 day.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regions.slop</name>
|
||||
<value>0.1</value>
|
||||
<description>Rebalance if regionserver has average + (average * slop) regions.
|
||||
Default is 10% slop.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.regionserver.nbreservationblocks</name>
|
||||
<value>4</value>
|
||||
|
@ -272,12 +279,13 @@
|
|||
</property>
|
||||
<property>
|
||||
<name>hbase.io.index.interval</name>
|
||||
<value>32</value>
|
||||
<value>128</value>
|
||||
<description>The interval at which we record offsets in hbase
|
||||
store files/mapfiles. Default for stock mapfiles is 128. Index
|
||||
files are read into memory. If there are many of them, could prove
|
||||
a burden. If so play with the hadoop io.map.index.skip property and
|
||||
skip every nth index member when reading back the index into memory.
|
||||
Downside to high index interval is lowered access times.
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -30,6 +30,7 @@ import java.net.InetSocketAddress;
|
|||
* HServerAddress is a "label" for a HBase server that combines the host
|
||||
* name and port number.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public class HServerAddress implements WritableComparable {
|
||||
private InetSocketAddress address;
|
||||
String stringValue;
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||
/**
|
||||
* This class encapsulates metrics for determining the load on a HRegionServer
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public class HServerLoad implements WritableComparable {
|
||||
private int numberOfRequests; // number of requests since last report
|
||||
private int numberOfRegions; // number of regions being served
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.io.WritableComparator;
|
|||
/**
|
||||
* A Key for a stored row.
|
||||
*/
|
||||
public class HStoreKey implements WritableComparable {
|
||||
public class HStoreKey implements WritableComparable<HStoreKey> {
|
||||
/**
|
||||
* Colon character in UTF-8
|
||||
*/
|
||||
|
@ -332,7 +332,14 @@ public class HStoreKey implements WritableComparable {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return compareTo(obj) == 0;
|
||||
HStoreKey other = (HStoreKey)obj;
|
||||
// Do a quick check.
|
||||
if (this.row.length != other.row.length ||
|
||||
this.column.length != other.column.length ||
|
||||
this.timestamp != other.timestamp) {
|
||||
return false;
|
||||
}
|
||||
return compareTo(other) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -345,7 +352,7 @@ public class HStoreKey implements WritableComparable {
|
|||
|
||||
// Comparable
|
||||
|
||||
public int compareTo(Object o) {
|
||||
public int compareTo(final HStoreKey o) {
|
||||
return compareTo(this.regionInfo, this, (HStoreKey)o);
|
||||
}
|
||||
|
||||
|
@ -509,8 +516,7 @@ public class HStoreKey implements WritableComparable {
|
|||
*/
|
||||
public static int compareTwoRowKeys(HRegionInfo regionInfo,
|
||||
byte[] rowA, byte[] rowB) {
|
||||
if(regionInfo != null && (regionInfo.isMetaRegion() ||
|
||||
regionInfo.isRootRegion())) {
|
||||
if (regionInfo != null && regionInfo.isMetaRegion()) {
|
||||
byte[][] keysA = stripStartKeyMeta(rowA);
|
||||
byte[][] KeysB = stripStartKeyMeta(rowB);
|
||||
int rowCompare = Bytes.compareTo(keysA[0], KeysB[0]);
|
||||
|
|
|
@ -37,23 +37,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||
* HTableDescriptor contains the name of an HTable, and its
|
||||
* column families.
|
||||
*/
|
||||
public class HTableDescriptor implements WritableComparable {
|
||||
/** Table descriptor for <core>-ROOT-</code> catalog table */
|
||||
public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor(
|
||||
HConstants.ROOT_TABLE_NAME,
|
||||
new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY,
|
||||
1, HColumnDescriptor.CompressionType.NONE, false, false,
|
||||
Integer.MAX_VALUE, HConstants.FOREVER, false) });
|
||||
|
||||
/** Table descriptor for <code>.META.</code> catalog table */
|
||||
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
|
||||
HConstants.META_TABLE_NAME, new HColumnDescriptor[] {
|
||||
new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1,
|
||||
HColumnDescriptor.CompressionType.NONE, false, false,
|
||||
Integer.MAX_VALUE, HConstants.FOREVER, false),
|
||||
new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN,
|
||||
HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE,
|
||||
false, false, Integer.MAX_VALUE, HConstants.FOREVER, false) });
|
||||
public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
||||
|
||||
// Changes prior to version 3 were not recorded here.
|
||||
// Version 3 adds metadata as a map where keys and values are byte[].
|
||||
|
@ -63,18 +47,35 @@ public class HTableDescriptor implements WritableComparable {
|
|||
private String nameAsString = "";
|
||||
|
||||
// Table metadata
|
||||
protected Map<ImmutableBytesWritable,ImmutableBytesWritable> values =
|
||||
new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
|
||||
|
||||
//TODO: Why can't the following be private? They are only used within this class.
|
||||
protected Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
|
||||
new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
|
||||
|
||||
public static final String FAMILIES = "FAMILIES";
|
||||
|
||||
public static final ImmutableBytesWritable FAMILIES_KEY =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(FAMILIES));
|
||||
public static final String MAX_FILESIZE = "MAX_FILESIZE";
|
||||
public static final ImmutableBytesWritable MAX_FILESIZE_KEY =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
|
||||
public static final String READONLY = "READONLY";
|
||||
public static final ImmutableBytesWritable READONLY_KEY =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(READONLY));
|
||||
public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE";
|
||||
public static final ImmutableBytesWritable MEMCACHE_FLUSHSIZE_KEY =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(MEMCACHE_FLUSHSIZE));
|
||||
public static final String IS_ROOT = "IS_ROOT";
|
||||
public static final ImmutableBytesWritable IS_ROOT_KEY =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
|
||||
public static final String IS_META = "IS_META";
|
||||
public static final ImmutableBytesWritable IS_META_KEY =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(IS_META));
|
||||
|
||||
// The below are ugly but better than creating them each time till we
|
||||
// replace booleans being saved as Strings with plain booleans. Need a
|
||||
// migration script to do this. TODO.
|
||||
private static final ImmutableBytesWritable FALSE =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
|
||||
private static final ImmutableBytesWritable TRUE =
|
||||
new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
|
||||
|
||||
public static final boolean DEFAULT_IN_MEMORY = false;
|
||||
|
||||
|
@ -82,9 +83,8 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64;
|
||||
|
||||
private transient Boolean meta = null;
|
||||
|
||||
// End TODO:
|
||||
private volatile Boolean meta = null;
|
||||
private volatile Boolean root = null;
|
||||
|
||||
// Key is hash of the family name.
|
||||
private final Map<Integer, HColumnDescriptor> families =
|
||||
|
@ -96,6 +96,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
*/
|
||||
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) {
|
||||
this.name = name.clone();
|
||||
this.nameAsString = Bytes.toString(this.name);
|
||||
setMetaFlags(name);
|
||||
for(HColumnDescriptor descriptor : families) {
|
||||
this.families.put(Bytes.mapKey(descriptor.getName()), descriptor);
|
||||
|
@ -109,6 +110,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families,
|
||||
Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
|
||||
this.name = name.clone();
|
||||
this.nameAsString = Bytes.toString(this.name);
|
||||
setMetaFlags(name);
|
||||
for(HColumnDescriptor descriptor : families) {
|
||||
this.families.put(Bytes.mapKey(descriptor.getName()), descriptor);
|
||||
|
@ -150,9 +152,9 @@ public class HTableDescriptor implements WritableComparable {
|
|||
*/
|
||||
public HTableDescriptor(final byte [] name) {
|
||||
super();
|
||||
setMetaFlags(this.name);
|
||||
this.name = this.isMetaRegion() ? name: isLegalTableName(name);
|
||||
this.nameAsString = Bytes.toString(this.name);
|
||||
setMetaFlags(this.name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -162,8 +164,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
* Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
|
||||
* @param desc The descriptor.
|
||||
*/
|
||||
public HTableDescriptor(final HTableDescriptor desc)
|
||||
{
|
||||
public HTableDescriptor(final HTableDescriptor desc) {
|
||||
super();
|
||||
this.name = desc.name.clone();
|
||||
this.nameAsString = Bytes.toString(this.name);
|
||||
|
@ -190,16 +191,16 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
/** @return true if this is the root region */
|
||||
public boolean isRootRegion() {
|
||||
String value = getValue(IS_ROOT);
|
||||
if (value != null)
|
||||
return Boolean.valueOf(value);
|
||||
return false;
|
||||
if (this.root == null) {
|
||||
this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
|
||||
}
|
||||
return this.root.booleanValue();
|
||||
}
|
||||
|
||||
/** @param isRoot true if this is the root region */
|
||||
protected void setRootRegion(boolean isRoot) {
|
||||
values.put(new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT)),
|
||||
new ImmutableBytesWritable(Bytes.toBytes(Boolean.toString(isRoot))));
|
||||
// TODO: Make the value a boolean rather than String of boolean.
|
||||
values.put(IS_ROOT_KEY, isRoot? TRUE: FALSE);
|
||||
}
|
||||
|
||||
/** @return true if this is a meta region (part of the root or meta tables) */
|
||||
|
@ -211,16 +212,25 @@ public class HTableDescriptor implements WritableComparable {
|
|||
}
|
||||
|
||||
private synchronized Boolean calculateIsMetaRegion() {
|
||||
String value = getValue(IS_META);
|
||||
return (value != null)? new Boolean(value): Boolean.FALSE;
|
||||
byte [] value = getValue(IS_META_KEY);
|
||||
return (value != null)? new Boolean(Bytes.toString(value)): Boolean.FALSE;
|
||||
}
|
||||
|
||||
private boolean isSomething(final ImmutableBytesWritable key,
|
||||
final boolean valueIfNull) {
|
||||
byte [] value = getValue(key);
|
||||
if (value != null) {
|
||||
// TODO: Make value be a boolean rather than String of boolean.
|
||||
return Boolean.valueOf(Bytes.toString(value)).booleanValue();
|
||||
}
|
||||
return valueIfNull;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param isMeta true if this is a meta region (part of the root or meta
|
||||
* tables) */
|
||||
protected void setMetaRegion(boolean isMeta) {
|
||||
values.put(new ImmutableBytesWritable(Bytes.toBytes(IS_META)),
|
||||
new ImmutableBytesWritable(Bytes.toBytes(Boolean.toString(isMeta))));
|
||||
values.put(IS_META_KEY, isMeta? TRUE: FALSE);
|
||||
}
|
||||
|
||||
/** @return true if table is the meta table */
|
||||
|
@ -263,7 +273,11 @@ public class HTableDescriptor implements WritableComparable {
|
|||
* @return The value.
|
||||
*/
|
||||
public byte[] getValue(byte[] key) {
|
||||
ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
|
||||
return getValue(new ImmutableBytesWritable(key));
|
||||
}
|
||||
|
||||
private byte[] getValue(final ImmutableBytesWritable key) {
|
||||
ImmutableBytesWritable ibw = values.get(key);
|
||||
if (ibw == null)
|
||||
return null;
|
||||
return ibw.get();
|
||||
|
@ -292,8 +306,25 @@ public class HTableDescriptor implements WritableComparable {
|
|||
* @param value The value.
|
||||
*/
|
||||
public void setValue(byte[] key, byte[] value) {
|
||||
values.put(new ImmutableBytesWritable(key),
|
||||
new ImmutableBytesWritable(value));
|
||||
setValue(new ImmutableBytesWritable(key), value);
|
||||
}
|
||||
|
||||
/*
|
||||
* @param key The key.
|
||||
* @param value The value.
|
||||
*/
|
||||
private void setValue(final ImmutableBytesWritable key,
|
||||
final byte[] value) {
|
||||
values.put(key, new ImmutableBytesWritable(value));
|
||||
}
|
||||
|
||||
/*
|
||||
* @param key The key.
|
||||
* @param value The value.
|
||||
*/
|
||||
private void setValue(final ImmutableBytesWritable key,
|
||||
final ImmutableBytesWritable value) {
|
||||
values.put(key, value);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -311,7 +342,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
public boolean isInMemory() {
|
||||
String value = getValue(HConstants.IN_MEMORY);
|
||||
if (value != null)
|
||||
return Boolean.valueOf(value);
|
||||
return Boolean.valueOf(value).booleanValue();
|
||||
return DEFAULT_IN_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -327,18 +358,15 @@ public class HTableDescriptor implements WritableComparable {
|
|||
* @return true if all columns in the table should be read only
|
||||
*/
|
||||
public boolean isReadOnly() {
|
||||
String value = getValue(READONLY);
|
||||
if (value != null)
|
||||
return Boolean.valueOf(value);
|
||||
return DEFAULT_READONLY;
|
||||
return isSomething(READONLY_KEY, DEFAULT_READONLY);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param readOnly True if all of the columns in the table should be read
|
||||
* only.
|
||||
*/
|
||||
public void setReadOnly(boolean readOnly) {
|
||||
setValue(READONLY, Boolean.toString(readOnly));
|
||||
public void setReadOnly(final boolean readOnly) {
|
||||
setValue(READONLY_KEY, readOnly? TRUE: FALSE);
|
||||
}
|
||||
|
||||
/** @return name of table */
|
||||
|
@ -353,9 +381,9 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
/** @return max hregion size for table */
|
||||
public long getMaxFileSize() {
|
||||
String value = getValue(MAX_FILESIZE);
|
||||
byte [] value = getValue(MAX_FILESIZE_KEY);
|
||||
if (value != null)
|
||||
return Long.valueOf(value);
|
||||
return Long.valueOf(Bytes.toString(value)).longValue();
|
||||
return HConstants.DEFAULT_MAX_FILE_SIZE;
|
||||
}
|
||||
|
||||
|
@ -364,16 +392,16 @@ public class HTableDescriptor implements WritableComparable {
|
|||
* before a split is triggered.
|
||||
*/
|
||||
public void setMaxFileSize(long maxFileSize) {
|
||||
setValue(MAX_FILESIZE, Long.toString(maxFileSize));
|
||||
setValue(MAX_FILESIZE_KEY, Bytes.toBytes(Long.toString(maxFileSize)));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return memory cache flush size for each hregion
|
||||
*/
|
||||
public int getMemcacheFlushSize() {
|
||||
String value = getValue(MEMCACHE_FLUSHSIZE);
|
||||
byte [] value = getValue(MEMCACHE_FLUSHSIZE_KEY);
|
||||
if (value != null)
|
||||
return Integer.valueOf(value);
|
||||
return Integer.valueOf(Bytes.toString(value)).intValue();
|
||||
return DEFAULT_MEMCACHE_FLUSH_SIZE;
|
||||
}
|
||||
|
||||
|
@ -381,7 +409,8 @@ public class HTableDescriptor implements WritableComparable {
|
|||
* @param memcacheFlushSize memory cache flush size for each hregion
|
||||
*/
|
||||
public void setMemcacheFlushSize(int memcacheFlushSize) {
|
||||
setValue(MEMCACHE_FLUSHSIZE, Integer.toString(memcacheFlushSize));
|
||||
setValue(MEMCACHE_FLUSHSIZE_KEY,
|
||||
Bytes.toBytes(Integer.toString(memcacheFlushSize)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -447,7 +476,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return compareTo(obj) == 0;
|
||||
return compareTo((HTableDescriptor)obj) == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -513,8 +542,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||
|
||||
// Comparable
|
||||
|
||||
public int compareTo(Object o) {
|
||||
HTableDescriptor other = (HTableDescriptor) o;
|
||||
public int compareTo(final HTableDescriptor other) {
|
||||
int result = Bytes.compareTo(this.name, other.name);
|
||||
if (result == 0) {
|
||||
result = families.size() - other.families.size();
|
||||
|
@ -576,4 +604,21 @@ public class HTableDescriptor implements WritableComparable {
|
|||
public static Path getTableDir(Path rootdir, final byte [] tableName) {
|
||||
return new Path(rootdir, Bytes.toString(tableName));
|
||||
}
|
||||
|
||||
/** Table descriptor for <core>-ROOT-</code> catalog table */
|
||||
public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor(
|
||||
HConstants.ROOT_TABLE_NAME,
|
||||
new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY,
|
||||
1, HColumnDescriptor.CompressionType.NONE, false, false,
|
||||
Integer.MAX_VALUE, HConstants.FOREVER, false) });
|
||||
|
||||
/** Table descriptor for <code>.META.</code> catalog table */
|
||||
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
|
||||
HConstants.META_TABLE_NAME, new HColumnDescriptor[] {
|
||||
new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1,
|
||||
HColumnDescriptor.CompressionType.NONE, false, false,
|
||||
Integer.MAX_VALUE, HConstants.FOREVER, false),
|
||||
new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN,
|
||||
HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE,
|
||||
false, false, Integer.MAX_VALUE, HConstants.FOREVER, false) });
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ public class RegionHistorian implements HConstants {
|
|||
* Get the RegionHistorian Singleton instance.
|
||||
* @return The region historian
|
||||
*/
|
||||
public static RegionHistorian getInstance() {
|
||||
public synchronized static RegionHistorian getInstance() {
|
||||
if (historian == null) {
|
||||
historian = new RegionHistorian();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
/**
|
||||
* Thrown when a value is longer than the specified LENGTH
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
public class ValueOverMaxLengthException extends DoNotRetryIOException {
|
||||
|
||||
/**
|
||||
* default constructor
|
||||
*/
|
||||
public ValueOverMaxLengthException() {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param message
|
||||
*/
|
||||
public ValueOverMaxLengthException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
}
|
|
@ -486,9 +486,10 @@ public class HConnectionManager implements HConstants {
|
|||
HRegionInterface server =
|
||||
getHRegionConnection(metaLocation.getServerAddress());
|
||||
|
||||
// query the root region for the location of the meta region
|
||||
// Query the root region for the location of the meta region
|
||||
RowResult regionInfoRow = server.getClosestRowBefore(
|
||||
metaLocation.getRegionInfo().getRegionName(), metaKey);
|
||||
metaLocation.getRegionInfo().getRegionName(), metaKey,
|
||||
HConstants.COLUMN_FAMILY);
|
||||
|
||||
if (regionInfoRow == null) {
|
||||
throw new TableNotFoundException(Bytes.toString(tableName));
|
||||
|
|
|
@ -37,8 +37,9 @@ public interface HRegionInterface extends VersionedProtocol {
|
|||
/**
|
||||
* Protocol version.
|
||||
* Upped to 4 when we removed overloaded methods from the protocol.
|
||||
* Upped to 5 when we changed getClosestRowBefore signature.
|
||||
*/
|
||||
public static final long versionID = 4L;
|
||||
public static final long versionID = 5L;
|
||||
|
||||
/**
|
||||
* Get metainfo about an HRegion
|
||||
|
@ -72,11 +73,12 @@ public interface HRegionInterface extends VersionedProtocol {
|
|||
*
|
||||
* @param regionName region name
|
||||
* @param row row key
|
||||
* @param columnFamily Column family to look for row in.
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult getClosestRowBefore(final byte [] regionName,
|
||||
final byte [] row)
|
||||
final byte [] row, final byte [] columnFamily)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
|
|
@ -22,9 +22,7 @@ package org.apache.hadoop.hbase.master;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.fs.FileSystem; //TODO: remove
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreFile; //TODO: remove
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
final int metaRescanInterval;
|
||||
|
||||
// A Sleeper that sleeps for threadWakeFrequency
|
||||
protected final Sleeper sleeper;
|
||||
private final Sleeper sleeper;
|
||||
|
||||
// Default access so accesible from unit tests. MASTER is name of the webapp
|
||||
// and the attribute name used stuffing this instance into web context.
|
||||
|
@ -587,10 +587,10 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
|
||||
for (int tries = 0; tries < numRetries; tries++) {
|
||||
try {
|
||||
// We can not access meta regions if they have not already been
|
||||
// assigned and scanned. If we timeout waiting, just shutdown.
|
||||
if (regionManager.waitForMetaRegionsOrClose()) {
|
||||
break;
|
||||
// We can not create a table unless meta regions have already been
|
||||
// assigned and scanned.
|
||||
if (!regionManager.areAllMetaRegionsOnline()) {
|
||||
throw new NotAllMetaRegionsOnlineException();
|
||||
}
|
||||
createTable(newRegion);
|
||||
LOG.info("created table " + desc.getNameAsString());
|
||||
|
|
|
@ -62,7 +62,7 @@ class MetaScanner extends BaseScanner {
|
|||
boolean scanSuccessful = false;
|
||||
while (!master.closed.get() && !regionManager.isInitialRootScanComplete() &&
|
||||
regionManager.getRootRegionLocation() == null) {
|
||||
master.sleeper.sleep();
|
||||
sleep();
|
||||
}
|
||||
if (master.closed.get()) {
|
||||
return scanSuccessful;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.util.Map; //TODO: remove
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
|
||||
/**
|
||||
* Thrown when an operation requires the root and all meta regions to be online
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException {
|
||||
/**
|
||||
* default constructor
|
||||
*/
|
||||
public NotAllMetaRegionsOnlineException() {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param message
|
||||
*/
|
||||
public NotAllMetaRegionsOnlineException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
}
|
|
@ -33,19 +33,21 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
* necessary.
|
||||
*/
|
||||
class ProcessRegionClose extends ProcessRegionStatusChange {
|
||||
protected final boolean offlineRegion;
|
||||
protected final boolean offlineRegion;
|
||||
protected final boolean reassignRegion;
|
||||
|
||||
/**
|
||||
* @param master
|
||||
* @param regionInfo Region to operate on
|
||||
* @param offlineRegion if true, set the region to offline in meta
|
||||
* delete the region files from disk.
|
||||
* @param reassignRegion if true, region is to be reassigned
|
||||
*/
|
||||
public ProcessRegionClose(HMaster master, HRegionInfo regionInfo,
|
||||
boolean offlineRegion) {
|
||||
boolean offlineRegion, boolean reassignRegion) {
|
||||
|
||||
super(master, regionInfo);
|
||||
this.offlineRegion = offlineRegion;
|
||||
this.reassignRegion = reassignRegion;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,32 +58,35 @@ class ProcessRegionClose extends ProcessRegionStatusChange {
|
|||
|
||||
@Override
|
||||
protected boolean process() throws IOException {
|
||||
Boolean result =
|
||||
new RetryableMetaOperation<Boolean>(this.metaRegion, this.master) {
|
||||
public Boolean call() throws IOException {
|
||||
LOG.info("region closed: " + regionInfo.getRegionNameAsString());
|
||||
Boolean result = null;
|
||||
if (offlineRegion) {
|
||||
result =
|
||||
new RetryableMetaOperation<Boolean>(this.metaRegion, this.master) {
|
||||
public Boolean call() throws IOException {
|
||||
LOG.info("region closed: " + regionInfo.getRegionNameAsString());
|
||||
|
||||
// Mark the Region as unavailable in the appropriate meta table
|
||||
|
||||
if (!metaRegionAvailable()) {
|
||||
// We can't proceed unless the meta region we are going to update
|
||||
// is online. metaRegionAvailable() has put this operation on the
|
||||
// is online. metaRegionAvailable() will put this operation on the
|
||||
// delayedToDoQueue, so return true so the operation is not put
|
||||
// back on the toDoQueue
|
||||
|
||||
if (metaRegionAvailable()) {
|
||||
// offline the region in meta and then note that we've offlined
|
||||
// the region.
|
||||
HRegion.offlineRegionInMETA(server, metaRegionName,
|
||||
regionInfo);
|
||||
master.regionManager.regionOfflined(regionInfo.getRegionName());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}.doWithRetries();
|
||||
|
||||
if (offlineRegion) {
|
||||
// offline the region in meta and then note that we've offlined the
|
||||
// region.
|
||||
HRegion.offlineRegionInMETA(server, metaRegionName,
|
||||
regionInfo);
|
||||
master.regionManager.regionOfflined(regionInfo.getRegionName());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}.doWithRetries();
|
||||
result = result == null ? true : result;
|
||||
|
||||
} else if (reassignRegion) {
|
||||
// we are reassigning the region eventually, so set it unassigned
|
||||
master.regionManager.setUnassigned(regionInfo);
|
||||
}
|
||||
return result == null ? true : result;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,13 +96,8 @@ class ProcessRegionOpen extends ProcessRegionStatusChange {
|
|||
regionInfo.getRegionName(), regionInfo.getStartKey());
|
||||
if (!master.regionManager.isInitialMetaScanComplete()) {
|
||||
// Put it on the queue to be scanned for the first time.
|
||||
try {
|
||||
LOG.debug("Adding " + m.toString() + " to regions to scan");
|
||||
master.regionManager.addMetaRegionToScan(m);
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(
|
||||
"Putting into metaRegionsToScan was interrupted.", e);
|
||||
}
|
||||
LOG.debug("Adding " + m.toString() + " to regions to scan");
|
||||
master.regionManager.addMetaRegionToScan(m);
|
||||
} else {
|
||||
// Add it to the online meta regions
|
||||
LOG.debug("Adding to onlineMetaRegions: " + m.toString());
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException; //TODO: remove
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -46,6 +45,8 @@ import org.apache.hadoop.hbase.io.RowResult;
|
|||
class ProcessServerShutdown extends RegionServerOperation {
|
||||
private HServerAddress deadServer;
|
||||
private String deadServerName;
|
||||
private final boolean rootRegionServer;
|
||||
private boolean rootRegionReassigned = false;
|
||||
private Path oldLogDir;
|
||||
private boolean logSplit;
|
||||
private boolean rootRescanned;
|
||||
|
@ -65,20 +66,18 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
/**
|
||||
* @param master
|
||||
* @param serverInfo
|
||||
* @param rootRegionServer
|
||||
*/
|
||||
public ProcessServerShutdown(HMaster master, HServerInfo serverInfo) {
|
||||
public ProcessServerShutdown(HMaster master, HServerInfo serverInfo,
|
||||
boolean rootRegionServer) {
|
||||
super(master);
|
||||
this.deadServer = serverInfo.getServerAddress();
|
||||
this.deadServerName = this.deadServer.toString();
|
||||
this.rootRegionServer = rootRegionServer;
|
||||
this.logSplit = false;
|
||||
this.rootRescanned = false;
|
||||
StringBuilder dirName = new StringBuilder("log_");
|
||||
dirName.append(deadServer.getBindAddress());
|
||||
dirName.append("_");
|
||||
dirName.append(serverInfo.getStartCode());
|
||||
dirName.append("_");
|
||||
dirName.append(deadServer.getPort());
|
||||
this.oldLogDir = new Path(master.rootdir, dirName.toString());
|
||||
this.oldLogDir =
|
||||
new Path(master.rootdir, HLog.getHLogDirectoryName(serverInfo));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -250,6 +249,17 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
logSplit = true;
|
||||
}
|
||||
|
||||
if (this.rootRegionServer && !this.rootRegionReassigned) {
|
||||
// The server that died was serving the root region. Now that the log
|
||||
// has been split, get it reassigned.
|
||||
master.regionManager.reassignRootRegion();
|
||||
// avoid multiple root region reassignment
|
||||
this.rootRegionReassigned = true;
|
||||
// When we call rootAvailable below, it will put us on the delayed
|
||||
// to do queue to allow some time to pass during which the root
|
||||
// region will hopefully get reassigned.
|
||||
}
|
||||
|
||||
if (!rootAvailable()) {
|
||||
// Return true so that worker does not put this request back on the
|
||||
// toDoQueue.
|
||||
|
|
|
@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.util.Writables;
|
|||
class RegionManager implements HConstants {
|
||||
protected static final Log LOG = LogFactory.getLog(RegionManager.class);
|
||||
|
||||
private volatile AtomicReference<HServerAddress> rootRegionLocation =
|
||||
private AtomicReference<HServerAddress> rootRegionLocation =
|
||||
new AtomicReference<HServerAddress>(null);
|
||||
|
||||
final Lock splitLogLock = new ReentrantLock();
|
||||
|
@ -118,14 +118,16 @@ class RegionManager implements HConstants {
|
|||
private final int maxAssignInOneGo;
|
||||
|
||||
private final HMaster master;
|
||||
|
||||
private final RegionHistorian historian;
|
||||
private final float slop;
|
||||
|
||||
RegionManager(HMaster master) {
|
||||
this.master = master;
|
||||
this.historian = RegionHistorian.getInstance();
|
||||
this.maxAssignInOneGo = this.master.getConfiguration().
|
||||
getInt("hbase.regions.percheckin", 10);
|
||||
this.slop = this.master.getConfiguration().getFloat("hbase.regions.slop",
|
||||
(float)0.1);
|
||||
|
||||
// The root region
|
||||
rootScannerThread = new RootScanner(master, this);
|
||||
|
@ -158,6 +160,17 @@ class RegionManager implements HConstants {
|
|||
}
|
||||
}
|
||||
|
||||
void unsetRootRegion() {
|
||||
rootRegionLocation.set(null);
|
||||
}
|
||||
|
||||
void reassignRootRegion() {
|
||||
unsetRootRegion();
|
||||
if (!master.shutdownRequested) {
|
||||
unassignedRegions.put(HRegionInfo.ROOT_REGIONINFO, ZERO_L);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Assigns regions to region servers attempting to balance the load across
|
||||
* all region servers
|
||||
|
@ -178,16 +191,24 @@ class RegionManager implements HConstants {
|
|||
// worked on elsewhere.
|
||||
Set<HRegionInfo> regionsToAssign = regionsAwaitingAssignment();
|
||||
if (regionsToAssign.size() == 0) {
|
||||
// There are no regions waiting to be assigned. This is an opportunity
|
||||
// for us to check if this server is overloaded.
|
||||
double avgLoad = master.serverManager.getAverageLoad();
|
||||
if (avgLoad > 2.0 && thisServersLoad.getNumberOfRegions() > avgLoad) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Server " + serverName + " is overloaded. Server load: " +
|
||||
thisServersLoad.getNumberOfRegions() + " avg: " + avgLoad);
|
||||
// There are no regions waiting to be assigned.
|
||||
if (allRegionsAssigned()) {
|
||||
// We only do load balancing once all regions are assigned.
|
||||
// This prevents churn while the cluster is starting up.
|
||||
double avgLoad = master.serverManager.getAverageLoad();
|
||||
double avgLoadWithSlop = avgLoad +
|
||||
((this.slop != 0)? avgLoad * this.slop: avgLoad);
|
||||
if (avgLoad > 2.0 &&
|
||||
thisServersLoad.getNumberOfRegions() > avgLoadWithSlop) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Server " + serverName +
|
||||
" is overloaded. Server load: " +
|
||||
thisServersLoad.getNumberOfRegions() + " avg: " + avgLoad +
|
||||
", slop: " + this.slop);
|
||||
}
|
||||
unassignSomeRegions(thisServersLoad, avgLoad, mostLoadedRegions,
|
||||
returnMsgs);
|
||||
}
|
||||
unassignSomeRegions(thisServersLoad, avgLoad, mostLoadedRegions,
|
||||
returnMsgs);
|
||||
}
|
||||
} else {
|
||||
// if there's only one server, just give it all the regions
|
||||
|
@ -483,10 +504,16 @@ class RegionManager implements HConstants {
|
|||
* Block until meta regions are online or we're shutting down.
|
||||
* @return true if we found meta regions, false if we're closing.
|
||||
*/
|
||||
public boolean waitForMetaRegionsOrClose() {
|
||||
return metaScannerThread.waitForMetaRegionsOrClose();
|
||||
public boolean areAllMetaRegionsOnline() {
|
||||
boolean result = false;
|
||||
if (rootRegionLocation.get() != null &&
|
||||
numberOfMetaRegions.get() == onlineMetaRegions.size()) {
|
||||
result = true;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Search our map of online meta regions to find the first meta region that
|
||||
* should contain a pointer to <i>newRegion</i>.
|
||||
|
@ -513,16 +540,25 @@ class RegionManager implements HConstants {
|
|||
* Get a set of all the meta regions that contain info about a given table.
|
||||
* @param tableName Table you need to know all the meta regions for
|
||||
* @return set of MetaRegion objects that contain the table
|
||||
* @throws NotAllMetaRegionsOnlineException
|
||||
*/
|
||||
public Set<MetaRegion> getMetaRegionsForTable(byte [] tableName) {
|
||||
public Set<MetaRegion> getMetaRegionsForTable(byte [] tableName)
|
||||
throws NotAllMetaRegionsOnlineException {
|
||||
byte [] firstMetaRegion = null;
|
||||
Set<MetaRegion> metaRegions = new HashSet<MetaRegion>();
|
||||
|
||||
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
|
||||
if (rootRegionLocation.get() == null) {
|
||||
throw new NotAllMetaRegionsOnlineException(
|
||||
Bytes.toString(HConstants.ROOT_TABLE_NAME));
|
||||
}
|
||||
metaRegions.add(new MetaRegion(rootRegionLocation.get(),
|
||||
HRegionInfo.ROOT_REGIONINFO.getRegionName()));
|
||||
|
||||
} else {
|
||||
if (!areAllMetaRegionsOnline()) {
|
||||
throw new NotAllMetaRegionsOnlineException();
|
||||
}
|
||||
synchronized (onlineMetaRegions) {
|
||||
if (onlineMetaRegions.size() == 1) {
|
||||
firstMetaRegion = onlineMetaRegions.firstKey();
|
||||
|
@ -582,9 +618,9 @@ class RegionManager implements HConstants {
|
|||
* @return list of MetaRegion objects
|
||||
*/
|
||||
public List<MetaRegion> getListOfOnlineMetaRegions() {
|
||||
List<MetaRegion> regions = new ArrayList<MetaRegion>();
|
||||
List<MetaRegion> regions = null;
|
||||
synchronized(onlineMetaRegions) {
|
||||
regions.addAll(onlineMetaRegions.values());
|
||||
regions = new ArrayList<MetaRegion>(onlineMetaRegions.values());
|
||||
}
|
||||
return regions;
|
||||
}
|
||||
|
@ -795,9 +831,8 @@ class RegionManager implements HConstants {
|
|||
/**
|
||||
* Add a meta region to the scan queue
|
||||
* @param m MetaRegion that needs to get scanned
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public void addMetaRegionToScan(MetaRegion m) throws InterruptedException {
|
||||
public void addMetaRegionToScan(MetaRegion m) {
|
||||
metaScannerThread.addMetaRegionToScan(m);
|
||||
}
|
||||
|
||||
|
@ -842,6 +877,15 @@ class RegionManager implements HConstants {
|
|||
return metaScannerThread.isInitialScanComplete();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the initial meta scan is complete and there are no
|
||||
* unassigned or pending regions
|
||||
*/
|
||||
public boolean allRegionsAssigned() {
|
||||
return isInitialMetaScanComplete() && unassignedRegions.size() == 0 &&
|
||||
pendingRegions.size() == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the root region location.
|
||||
* @return HServerAddress describing root region server.
|
||||
|
|
|
@ -69,12 +69,6 @@ abstract class RegionServerOperation implements Delayed, HConstants {
|
|||
|
||||
protected boolean metaTableAvailable() {
|
||||
boolean available = true;
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("numberOfMetaRegions: " +
|
||||
master.regionManager.numMetaRegions() +
|
||||
", onlineMetaRegions.size(): " +
|
||||
master.regionManager.numOnlineMetaRegions());
|
||||
}
|
||||
if (master.regionManager.numMetaRegions() !=
|
||||
master.regionManager.numOnlineMetaRegions()) {
|
||||
// We can't proceed because not all of the meta regions are online.
|
||||
|
@ -83,6 +77,10 @@ abstract class RegionServerOperation implements Delayed, HConstants {
|
|||
// in the run queue, put this request on the delay queue to give
|
||||
// other threads the opportunity to get the meta regions on-line.
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("numberOfMetaRegions: " +
|
||||
master.regionManager.numMetaRegions() +
|
||||
", onlineMetaRegions.size(): " +
|
||||
master.regionManager.numOnlineMetaRegions());
|
||||
LOG.debug("Requeuing because not all meta regions are online");
|
||||
}
|
||||
available = false;
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Sleeper;
|
||||
|
||||
/**
|
||||
* Uses Callable pattern so that operations against meta regions do not need
|
||||
|
@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
|||
*/
|
||||
abstract class RetryableMetaOperation<T> implements Callable<T> {
|
||||
protected final Log LOG = LogFactory.getLog(this.getClass());
|
||||
protected final Sleeper sleeper;
|
||||
protected final MetaRegion m;
|
||||
protected final HMaster master;
|
||||
|
||||
|
@ -47,6 +49,7 @@ abstract class RetryableMetaOperation<T> implements Callable<T> {
|
|||
protected RetryableMetaOperation(MetaRegion m, HMaster master) {
|
||||
this.m = m;
|
||||
this.master = master;
|
||||
this.sleeper = new Sleeper(master.threadWakeFrequency, master.closed);
|
||||
}
|
||||
|
||||
protected T doWithRetries()
|
||||
|
@ -89,7 +92,7 @@ abstract class RetryableMetaOperation<T> implements Callable<T> {
|
|||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
master.sleeper.sleep();
|
||||
sleeper.sleep();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -124,10 +124,17 @@ class ServerManager implements HConstants {
|
|||
// The startup message was from a known server with the same name.
|
||||
// Timeout the old one right away.
|
||||
HServerAddress root = master.getRootRegionLocation();
|
||||
boolean rootServer = false;
|
||||
if (root != null && root.equals(storedInfo.getServerAddress())) {
|
||||
master.regionManager.unassignRootRegion();
|
||||
master.regionManager.unsetRootRegion();
|
||||
rootServer = true;
|
||||
}
|
||||
try {
|
||||
master.toDoQueue.put(
|
||||
new ProcessServerShutdown(master, storedInfo, rootServer));
|
||||
} catch (InterruptedException e) {
|
||||
LOG.error("Insertion into toDoQueue was interrupted", e);
|
||||
}
|
||||
master.delayedToDoQueue.put(new ProcessServerShutdown(master, storedInfo));
|
||||
}
|
||||
|
||||
// record new server
|
||||
|
@ -285,16 +292,18 @@ class ServerManager implements HConstants {
|
|||
serversToServerInfo.put(serverName, serverInfo);
|
||||
|
||||
HServerLoad load = serversToLoad.get(serverName);
|
||||
if (load != null && !load.equals(serverInfo.getLoad())) {
|
||||
// We have previous information about the load on this server
|
||||
// and the load on this server has changed
|
||||
synchronized (loadToServers) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
if (load != null) {
|
||||
if (!load.equals(serverInfo.getLoad())) {
|
||||
// We have previous information about the load on this server
|
||||
// and the load on this server has changed
|
||||
synchronized (loadToServers) {
|
||||
Set<String> servers = loadToServers.get(load);
|
||||
|
||||
// Note that servers should never be null because loadToServers
|
||||
// and serversToLoad are manipulated in pairs
|
||||
servers.remove(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
// Note that servers should never be null because loadToServers
|
||||
// and serversToLoad are manipulated in pairs
|
||||
servers.remove(serverName);
|
||||
loadToServers.put(load, servers);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -499,19 +508,17 @@ class ServerManager implements HConstants {
|
|||
// the ProcessRegionClose going on asynchronously.
|
||||
master.regionManager.noLongerUnassigned(region);
|
||||
|
||||
if (!reassignRegion) {
|
||||
// either the region is being offlined or deleted. we want to do those
|
||||
// operations asynchronously, so we'll creating a todo item for that.
|
||||
try {
|
||||
master.toDoQueue.put(new ProcessRegionClose(master, region,
|
||||
offlineRegion));
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(
|
||||
"Putting into toDoQueue was interrupted.", e);
|
||||
}
|
||||
} else {
|
||||
// we are reassigning the region eventually, so set it unassigned
|
||||
master.regionManager.setUnassigned(region);
|
||||
// NOTE: we cannot put the region into unassignedRegions as that
|
||||
// changes the ordering of the messages we've received. In
|
||||
// this case, a close could be processed before an open
|
||||
// resulting in the master not agreeing on the region's
|
||||
// state.
|
||||
try {
|
||||
master.toDoQueue.put(new ProcessRegionClose(master, region,
|
||||
offlineRegion, reassignRegion));
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(
|
||||
"Putting into toDoQueue was interrupted.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -670,10 +677,12 @@ class ServerManager implements HConstants {
|
|||
LOG.info(server + " lease expired");
|
||||
// Remove the server from the known servers list and update load info
|
||||
HServerInfo info = serversToServerInfo.remove(server);
|
||||
boolean rootServer = false;
|
||||
if (info != null) {
|
||||
HServerAddress root = master.getRootRegionLocation();
|
||||
if (root != null && root.equals(info.getServerAddress())) {
|
||||
master.regionManager.unassignRootRegion();
|
||||
master.regionManager.unsetRootRegion();
|
||||
rootServer = true;
|
||||
}
|
||||
String serverName = info.getServerAddress().toString();
|
||||
HServerLoad load = serversToLoad.remove(serverName);
|
||||
|
@ -687,17 +696,16 @@ class ServerManager implements HConstants {
|
|||
}
|
||||
}
|
||||
deadServers.add(server);
|
||||
try {
|
||||
master.toDoQueue.put(
|
||||
new ProcessServerShutdown(master, info, rootServer));
|
||||
} catch (InterruptedException e) {
|
||||
LOG.error("insert into toDoQueue was interrupted", e);
|
||||
}
|
||||
}
|
||||
synchronized (serversToServerInfo) {
|
||||
serversToServerInfo.notifyAll();
|
||||
}
|
||||
|
||||
// NOTE: If the server was serving the root region, we cannot reassign it
|
||||
// here because the new server will start serving the root region before
|
||||
// the ProcessServerShutdown operation has a chance to split the log file.
|
||||
if (info != null) {
|
||||
master.delayedToDoQueue.put(new ProcessServerShutdown(master, info));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ public class BeforeThisStoreKey extends HStoreKey {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Object o) {
|
||||
public int compareTo(final HStoreKey o) {
|
||||
int result = this.beforeThisKey.compareTo(o);
|
||||
return result == 0? -1: result;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Thrown when we fail close of the write-ahead-log file.
|
||||
* Package private. Only used inside this package.
|
||||
*/
|
||||
class FailedLogCloseException extends IOException {
|
||||
private static final long serialVersionUID = 1759152841462990925L;
|
||||
|
||||
public FailedLogCloseException() {
|
||||
super();
|
||||
}
|
||||
|
||||
public FailedLogCloseException(String arg0) {
|
||||
super(arg0);
|
||||
}
|
||||
}
|
|
@ -178,7 +178,7 @@ class Flusher extends Thread implements FlushRequester {
|
|||
// is required. Currently the only way to do this is a restart of
|
||||
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
|
||||
// where hdfs was bad but passed the hdfs check).
|
||||
LOG.fatal("Replay of hlog required. Forcing server restart", ex);
|
||||
LOG.fatal("Replay of hlog required. Forcing server shutdown", ex);
|
||||
server.abort();
|
||||
return false;
|
||||
} catch (IOException ex) {
|
||||
|
|
|
@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URLEncoder;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.SortedMap;
|
||||
|
@ -39,6 +41,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
@ -168,7 +171,7 @@ public class HLog implements HConstants {
|
|||
}
|
||||
|
||||
/*
|
||||
* Accessor for tests.
|
||||
* Accessor for tests. Not part of public API. Hence no javadoc.
|
||||
* @return Current state of the monotonically increasing file id.
|
||||
*/
|
||||
public long getFilenum() {
|
||||
|
@ -226,9 +229,10 @@ public class HLog implements HConstants {
|
|||
* cacheFlushLock and then completeCacheFlush could be called which would wait
|
||||
* for the lock on this and consequently never release the cacheFlushLock
|
||||
*
|
||||
* @throws FailedLogCloseException
|
||||
* @throws IOException
|
||||
*/
|
||||
public void rollWriter() throws IOException {
|
||||
public void rollWriter() throws FailedLogCloseException, IOException {
|
||||
this.cacheFlushLock.lock();
|
||||
try {
|
||||
if (closed) {
|
||||
|
@ -237,7 +241,17 @@ public class HLog implements HConstants {
|
|||
synchronized (updateLock) {
|
||||
if (this.writer != null) {
|
||||
// Close the current writer, get a new one.
|
||||
this.writer.close();
|
||||
try {
|
||||
this.writer.close();
|
||||
} catch (IOException e) {
|
||||
// Failed close of log file. Means we're losing edits. For now,
|
||||
// shut ourselves down to minimize loss. Alternative is to try and
|
||||
// keep going. See HBASE-930.
|
||||
FailedLogCloseException flce =
|
||||
new FailedLogCloseException("#" + this.filenum);
|
||||
flce.initCause(e);
|
||||
throw flce;
|
||||
}
|
||||
Path p = computeFilename(old_filenum);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Closing current log writer " + FSUtils.getPath(p));
|
||||
|
@ -608,85 +622,90 @@ public class HLog implements HConstants {
|
|||
LOG.debug("Splitting " + i + " of " + logfiles.length + ": " +
|
||||
logfiles[i].getPath());
|
||||
}
|
||||
// Check for empty file.
|
||||
if (logfiles[i].getLen() <= 0) {
|
||||
LOG.info("Skipping " + logfiles[i].toString() +
|
||||
" because zero length");
|
||||
continue;
|
||||
}
|
||||
// Check for possibly empty file. With appends, currently Hadoop reports
|
||||
// a zero length even if the file has been sync'd. Revisit if
|
||||
// HADOOP-4751 is committed.
|
||||
boolean possiblyEmpty = logfiles[i].getLen() <= 0;
|
||||
HLogKey key = new HLogKey();
|
||||
HLogEdit val = new HLogEdit();
|
||||
SequenceFile.Reader in =
|
||||
new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
|
||||
try {
|
||||
int count = 0;
|
||||
for (; in.next(key, val); count++) {
|
||||
byte [] tableName = key.getTablename();
|
||||
byte [] regionName = key.getRegionName();
|
||||
SequenceFile.Writer w = logWriters.get(regionName);
|
||||
if (w == null) {
|
||||
Path logfile = new Path(
|
||||
HRegion.getRegionDir(
|
||||
HTableDescriptor.getTableDir(rootDir, tableName),
|
||||
HRegionInfo.encodeRegionName(regionName)),
|
||||
HREGION_OLDLOGFILE_NAME);
|
||||
Path oldlogfile = null;
|
||||
SequenceFile.Reader old = null;
|
||||
if (fs.exists(logfile)) {
|
||||
LOG.warn("Old log file " + logfile +
|
||||
" already exists. Copying existing file to new file");
|
||||
oldlogfile = new Path(logfile.toString() + ".old");
|
||||
fs.rename(logfile, oldlogfile);
|
||||
old = new SequenceFile.Reader(fs, oldlogfile, conf);
|
||||
}
|
||||
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
|
||||
HLogEdit.class, getCompressionType(conf));
|
||||
// Use copy of regionName; regionName object is reused inside in
|
||||
// HStoreKey.getRegionName so its content changes as we iterate.
|
||||
logWriters.put(regionName, w);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Creating new log file writer for path " + logfile +
|
||||
" and region " + regionName);
|
||||
}
|
||||
|
||||
if (old != null) {
|
||||
// Copy from existing log file
|
||||
HLogKey oldkey = new HLogKey();
|
||||
HLogEdit oldval = new HLogEdit();
|
||||
for (; old.next(oldkey, oldval); count++) {
|
||||
if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
|
||||
LOG.debug("Copied " + count + " edits");
|
||||
}
|
||||
w.append(oldkey, oldval);
|
||||
SequenceFile.Reader in =
|
||||
new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
|
||||
try {
|
||||
int count = 0;
|
||||
for (; in.next(key, val); count++) {
|
||||
byte [] tableName = key.getTablename();
|
||||
byte [] regionName = key.getRegionName();
|
||||
SequenceFile.Writer w = logWriters.get(regionName);
|
||||
if (w == null) {
|
||||
Path logfile = new Path(
|
||||
HRegion.getRegionDir(
|
||||
HTableDescriptor.getTableDir(rootDir, tableName),
|
||||
HRegionInfo.encodeRegionName(regionName)),
|
||||
HREGION_OLDLOGFILE_NAME);
|
||||
Path oldlogfile = null;
|
||||
SequenceFile.Reader old = null;
|
||||
if (fs.exists(logfile)) {
|
||||
LOG.warn("Old log file " + logfile +
|
||||
" already exists. Copying existing file to new file");
|
||||
oldlogfile = new Path(logfile.toString() + ".old");
|
||||
fs.rename(logfile, oldlogfile);
|
||||
old = new SequenceFile.Reader(fs, oldlogfile, conf);
|
||||
}
|
||||
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
|
||||
HLogEdit.class, getCompressionType(conf));
|
||||
// Use copy of regionName; regionName object is reused inside in
|
||||
// HStoreKey.getRegionName so its content changes as we iterate.
|
||||
logWriters.put(regionName, w);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Creating new log file writer for path " + logfile +
|
||||
" and region " + regionName);
|
||||
}
|
||||
|
||||
if (old != null) {
|
||||
// Copy from existing log file
|
||||
HLogKey oldkey = new HLogKey();
|
||||
HLogEdit oldval = new HLogEdit();
|
||||
for (; old.next(oldkey, oldval); count++) {
|
||||
if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
|
||||
LOG.debug("Copied " + count + " edits");
|
||||
}
|
||||
w.append(oldkey, oldval);
|
||||
}
|
||||
old.close();
|
||||
fs.delete(oldlogfile, true);
|
||||
}
|
||||
old.close();
|
||||
fs.delete(oldlogfile, true);
|
||||
}
|
||||
w.append(key, val);
|
||||
}
|
||||
w.append(key, val);
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Applied " + count + " total edits from " +
|
||||
logfiles[i].getPath().toString());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Applied " + count + " total edits from " +
|
||||
logfiles[i].getPath().toString());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
if (!(e instanceof EOFException)) {
|
||||
LOG.warn("Exception processing " + logfiles[i].getPath() +
|
||||
" -- continuing. Possible DATA LOSS!", e);
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Close in finally threw exception -- continuing", e);
|
||||
}
|
||||
// Delete the input file now so we do not replay edits. We could
|
||||
// have gotten here because of an exception. If so, probably
|
||||
// nothing we can do about it. Replaying it, it could work but we
|
||||
// could be stuck replaying for ever. Just continue though we
|
||||
// could have lost some edits.
|
||||
fs.delete(logfiles[i].getPath(), true);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
if (!(e instanceof EOFException)) {
|
||||
LOG.warn("Exception processing " + logfiles[i].getPath() +
|
||||
" -- continuing. Possible DATA LOSS!", e);
|
||||
if (possiblyEmpty) {
|
||||
continue;
|
||||
}
|
||||
} finally {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Close in finally threw exception -- continuing", e);
|
||||
}
|
||||
// Delete the input file now so we do not replay edits. We could
|
||||
// have gotten here because of an exception. If so, probably
|
||||
// nothing we can do about it. Replaying it, it could work but we
|
||||
// could be stuck replaying for ever. Just continue though we
|
||||
// could have lost some edits.
|
||||
fs.delete(logfiles[i].getPath(), true);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
@ -706,6 +725,28 @@ public class HLog implements HConstants {
|
|||
LOG.info("log file splitting completed for " + srcDir.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct the HLog directory name
|
||||
*
|
||||
* @param info HServerInfo for server
|
||||
* @return the HLog directory name
|
||||
*/
|
||||
public static String getHLogDirectoryName(HServerInfo info) {
|
||||
StringBuilder dirName = new StringBuilder("log_");
|
||||
try {
|
||||
dirName.append(URLEncoder.encode(
|
||||
info.getServerAddress().getBindAddress(), UTF8_ENCODING));
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
LOG.error("Error encoding '" + info.getServerAddress().getBindAddress()
|
||||
+ "'", e);
|
||||
}
|
||||
dirName.append("_");
|
||||
dirName.append(info.getStartCode());
|
||||
dirName.append("_");
|
||||
dirName.append(info.getServerAddress().getPort());
|
||||
return dirName.toString();
|
||||
}
|
||||
|
||||
private static void usage() {
|
||||
System.err.println("Usage: java org.apache.hbase.HLog" +
|
||||
" {--dump <logfile>... | --split <logdir>...}");
|
||||
|
|
|
@ -326,7 +326,8 @@ public class HRegion implements HConstants {
|
|||
private final Map<Integer, TreeMap<HStoreKey, byte []>> targetColumns =
|
||||
new ConcurrentHashMap<Integer, TreeMap<HStoreKey, byte []>>();
|
||||
// Default access because read by tests.
|
||||
protected final Map<Integer, HStore> stores = new ConcurrentHashMap<Integer, HStore>();
|
||||
protected final Map<Integer, HStore> stores =
|
||||
new ConcurrentHashMap<Integer, HStore>();
|
||||
final AtomicLong memcacheSize = new AtomicLong(0);
|
||||
|
||||
final Path basedir;
|
||||
|
@ -894,7 +895,8 @@ public class HRegion implements HConstants {
|
|||
return midKey;
|
||||
}
|
||||
}
|
||||
LOG.info("starting compaction on region " + this);
|
||||
LOG.info("starting " + (majorCompaction? "major" : "") +
|
||||
" compaction on region " + this);
|
||||
long startTime = System.currentTimeMillis();
|
||||
doRegionCompactionPrep();
|
||||
long maxSize = -1;
|
||||
|
@ -1236,40 +1238,47 @@ public class HRegion implements HConstants {
|
|||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult getClosestRowBefore(final byte [] row)
|
||||
throws IOException{
|
||||
RowResult getClosestRowBefore(final byte [] row) throws IOException{
|
||||
return getClosestRowBefore(row, HConstants.COLUMN_FAMILY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return all the data for the row that matches <i>row</i> exactly,
|
||||
* or the one that immediately preceeds it, at or immediately before
|
||||
* <i>ts</i>.
|
||||
*
|
||||
* @param row row key
|
||||
* @param columnFamily
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public RowResult getClosestRowBefore(final byte [] row,
|
||||
final byte [] columnFamily) throws IOException{
|
||||
// look across all the HStores for this region and determine what the
|
||||
// closest key is across all column families, since the data may be sparse
|
||||
HStoreKey key = null;
|
||||
checkRow(row);
|
||||
splitsAndClosesLock.readLock().lock();
|
||||
try {
|
||||
// examine each column family for the preceeding or matching key
|
||||
for (HStore store : stores.values()) {
|
||||
// get the closest key
|
||||
byte [] closestKey = store.getRowKeyAtOrBefore(row);
|
||||
// if it happens to be an exact match, we can stop looping
|
||||
if (HStoreKey.equalsTwoRowKeys(regionInfo,row, closestKey)) {
|
||||
key = new HStoreKey(closestKey, this.regionInfo);
|
||||
break;
|
||||
}
|
||||
// otherwise, we need to check if it's the max and move to the next
|
||||
if (closestKey != null
|
||||
&& (key == null || HStoreKey.compareTwoRowKeys(
|
||||
HStore store = getStore(columnFamily);
|
||||
// get the closest key
|
||||
byte [] closestKey = store.getRowKeyAtOrBefore(row);
|
||||
// If it happens to be an exact match, we can stop looping.
|
||||
// Otherwise, we need to check if it's the max and move to the next
|
||||
if (HStoreKey.equalsTwoRowKeys(regionInfo, row, closestKey)) {
|
||||
key = new HStoreKey(closestKey, this.regionInfo);
|
||||
} else if (closestKey != null &&
|
||||
(key == null || HStoreKey.compareTwoRowKeys(
|
||||
regionInfo,closestKey, key.getRow()) > 0) ) {
|
||||
key = new HStoreKey(closestKey, this.regionInfo);
|
||||
}
|
||||
}
|
||||
if (key == null) {
|
||||
key = new HStoreKey(closestKey, this.regionInfo);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
||||
// now that we've found our key, get the values
|
||||
// Now that we've found our key, get the values
|
||||
HbaseMapWritable<byte [], Cell> cells =
|
||||
new HbaseMapWritable<byte [], Cell>();
|
||||
for (HStore s: stores.values()) {
|
||||
s.getFull(key, null, cells);
|
||||
}
|
||||
store.getFull(key, null, cells);
|
||||
return new RowResult(key.getRow(), cells);
|
||||
} finally {
|
||||
splitsAndClosesLock.readLock().unlock();
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import java.io.IOException;
|
||||
import java.lang.Thread.UncaughtExceptionHandler;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Field;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -68,6 +69,7 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
|
|||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.RegionHistorian;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.ValueOverMaxLengthException;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.UnknownRowLockException;
|
||||
import org.apache.hadoop.hbase.Leases.LeaseStillHeldException;
|
||||
|
@ -171,25 +173,34 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
*/
|
||||
class ShutdownThread extends Thread {
|
||||
private final HRegionServer instance;
|
||||
private final Thread mainThread;
|
||||
|
||||
/**
|
||||
* @param instance
|
||||
* @param mainThread
|
||||
*/
|
||||
public ShutdownThread(HRegionServer instance) {
|
||||
public ShutdownThread(HRegionServer instance, Thread mainThread) {
|
||||
this.instance = instance;
|
||||
this.mainThread = mainThread;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.info("Starting shutdown thread.");
|
||||
|
||||
// tell the region server to stop and wait for it to complete
|
||||
// tell the region server to stop
|
||||
instance.stop();
|
||||
instance.join();
|
||||
|
||||
// Wait for main thread to exit.
|
||||
instance.join(mainThread);
|
||||
|
||||
LOG.info("Shutdown thread complete");
|
||||
}
|
||||
}
|
||||
|
||||
// We need to call HDFS shutdown when we are done shutting down
|
||||
private Thread hdfsShutdownThread;
|
||||
|
||||
// Compactions
|
||||
final CompactSplitThread compactSplitThread;
|
||||
|
||||
|
@ -269,10 +280,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
for(int i = 0; i < nbBlocks; i++) {
|
||||
reservedSpace.add(new byte[DEFAULT_SIZE_RESERVATION_BLOCK]);
|
||||
}
|
||||
|
||||
// Register shutdown hook for HRegionServer, runs an orderly shutdown
|
||||
// when a kill signal is recieved
|
||||
Runtime.getRuntime().addShutdownHook(new ShutdownThread(this));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -293,11 +300,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
long now = System.currentTimeMillis();
|
||||
if (lastMsg != 0 && (now - lastMsg) >= serverLeaseTimeout) {
|
||||
// It has been way too long since we last reported to the master.
|
||||
// Commit suicide.
|
||||
LOG.fatal("unable to report to master for " + (now - lastMsg) +
|
||||
" milliseconds - aborting server");
|
||||
abort();
|
||||
break;
|
||||
LOG.warn("unable to report to master for " + (now - lastMsg) +
|
||||
" milliseconds - retrying");
|
||||
}
|
||||
if ((now - lastMsg) >= msgInterval) {
|
||||
HMsg outboundArray[] = null;
|
||||
|
@ -402,12 +406,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
LOG.warn("Processing message (Retry: " + tries + ")", e);
|
||||
tries++;
|
||||
} else {
|
||||
LOG.fatal("Exceeded max retries: " + this.numRetries, e);
|
||||
if (!checkFileSystem()) {
|
||||
continue;
|
||||
}
|
||||
// Something seriously wrong. Shutdown.
|
||||
stop();
|
||||
LOG.error("Exceeded max retries: " + this.numRetries, e);
|
||||
checkFileSystem();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -445,8 +445,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
if (this.fsOk) {
|
||||
// Only try to clean up if the file system is available
|
||||
try {
|
||||
this.log.close();
|
||||
LOG.info("On abort, closed hlog");
|
||||
if (this.log != null) {
|
||||
this.log.close();
|
||||
LOG.info("On abort, closed hlog");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Unable to close log in abort",
|
||||
RemoteExceptionHandler.checkIOException(e));
|
||||
|
@ -488,6 +490,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
this.hbaseMaster = null;
|
||||
}
|
||||
join();
|
||||
|
||||
LOG.info("Running hdfs shutdown thread");
|
||||
hdfsShutdownThread.start();
|
||||
try {
|
||||
hdfsShutdownThread.join();
|
||||
LOG.info("Hdfs shutdown thread completed.");
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("hdfsShutdownThread.join() was interrupted", e);
|
||||
}
|
||||
LOG.info(Thread.currentThread().getName() + " exiting");
|
||||
}
|
||||
|
||||
|
@ -518,6 +529,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
// to defaults).
|
||||
this.conf.set("fs.default.name", this.conf.get("hbase.rootdir"));
|
||||
this.fs = FileSystem.get(this.conf);
|
||||
|
||||
// Register shutdown hook for HRegionServer, runs an orderly shutdown
|
||||
// when a kill signal is recieved
|
||||
Runtime.getRuntime().addShutdownHook(new ShutdownThread(this,
|
||||
Thread.currentThread()));
|
||||
this.hdfsShutdownThread = suppressHdfsShutdownHook();
|
||||
|
||||
this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
|
||||
this.log = setupHLog();
|
||||
startServiceThreads();
|
||||
|
@ -533,6 +551,43 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* So, HDFS caches FileSystems so when you call FileSystem.get it's fast. In
|
||||
* order to make sure things are cleaned up, it also creates a shutdown hook
|
||||
* so that all filesystems can be closed when the process is terminated. This
|
||||
* conveniently runs concurrently with our own shutdown handler, and
|
||||
* therefore causes all the filesystems to be closed before the server can do
|
||||
* all its necessary cleanup.
|
||||
*
|
||||
* The crazy dirty reflection in this method sneaks into the FileSystem cache
|
||||
* and grabs the shutdown hook, removes it from the list of active shutdown
|
||||
* hooks, and hangs onto it until later. Then, after we're properly done with
|
||||
* our graceful shutdown, we can execute the hdfs hook manually to make sure
|
||||
* loose ends are tied up.
|
||||
*
|
||||
* This seems quite fragile and susceptible to breaking if Hadoop changes
|
||||
* anything about the way this cleanup is managed. Keep an eye on things.
|
||||
*/
|
||||
private Thread suppressHdfsShutdownHook() {
|
||||
try {
|
||||
Field field = FileSystem.class.getDeclaredField ("clientFinalizer");
|
||||
field.setAccessible(true);
|
||||
Thread hdfsClientFinalizer = (Thread)field.get(null);
|
||||
if (hdfsClientFinalizer == null) {
|
||||
throw new RuntimeException("client finalizer is null, can't suppress!");
|
||||
}
|
||||
Runtime.getRuntime().removeShutdownHook(hdfsClientFinalizer);
|
||||
return hdfsClientFinalizer;
|
||||
|
||||
} catch (NoSuchFieldException nsfe) {
|
||||
LOG.fatal("Couldn't find field 'clientFinalizer' in FileSystem!", nsfe);
|
||||
throw new RuntimeException("Failed to suppress HDFS shutdown hook");
|
||||
} catch (IllegalAccessException iae) {
|
||||
LOG.fatal("Couldn't access field 'clientFinalizer' in FileSystem!", iae);
|
||||
throw new RuntimeException("Failed to suppress HDFS shutdown hook");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Report the status of the server. A server is online once all the startup
|
||||
* is completed (setting up filesystem, starting service threads, etc.). This
|
||||
|
@ -546,10 +601,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
private HLog setupHLog() throws RegionServerRunningException,
|
||||
IOException {
|
||||
|
||||
Path logdir = new Path(rootDir, "log" + "_" +
|
||||
serverInfo.getServerAddress().getBindAddress() + "_" +
|
||||
this.serverInfo.getStartCode() + "_" +
|
||||
this.serverInfo.getServerAddress().getPort());
|
||||
Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(serverInfo));
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Log dir " + logdir);
|
||||
}
|
||||
|
@ -684,7 +736,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
join(this.logRoller);
|
||||
}
|
||||
|
||||
private void join(final Thread t) {
|
||||
void join(final Thread t) {
|
||||
while (t.isAlive()) {
|
||||
try {
|
||||
t.join();
|
||||
|
@ -698,17 +750,26 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
* Let the master know we're here
|
||||
* Run initialization using parameters passed us by the master.
|
||||
*/
|
||||
private MapWritable reportForDuty(final Sleeper sleeper)
|
||||
throws IOException {
|
||||
private MapWritable reportForDuty(final Sleeper sleeper) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Telling master at " +
|
||||
conf.get(MASTER_ADDRESS) + " that we are up");
|
||||
}
|
||||
// Do initial RPC setup. The final argument indicates that the RPC should retry indefinitely.
|
||||
this.hbaseMaster = (HMasterRegionInterface)HbaseRPC.waitForProxy(
|
||||
HMasterRegionInterface.class, HMasterRegionInterface.versionID,
|
||||
new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
|
||||
this.conf, -1);
|
||||
HMasterRegionInterface master = null;
|
||||
while (!stopRequested.get() && master == null) {
|
||||
try {
|
||||
// Do initial RPC setup. The final argument indicates that the RPC
|
||||
// should retry indefinitely.
|
||||
master = (HMasterRegionInterface)HbaseRPC.waitForProxy(
|
||||
HMasterRegionInterface.class, HMasterRegionInterface.versionID,
|
||||
new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
|
||||
this.conf, -1);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to connect to master. Retrying. Error was:", e);
|
||||
sleeper.sleep();
|
||||
}
|
||||
}
|
||||
this.hbaseMaster = master;
|
||||
MapWritable result = null;
|
||||
long lastMsg = 0;
|
||||
while(!stopRequested.get()) {
|
||||
|
@ -806,7 +867,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
if(e == null || stopRequested.get()) {
|
||||
continue;
|
||||
}
|
||||
LOG.info(e.msg);
|
||||
LOG.info("Worker: " + e.msg);
|
||||
switch(e.msg.getType()) {
|
||||
|
||||
case MSG_REGIONSERVER_QUIESCE:
|
||||
|
@ -1046,7 +1107,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
}
|
||||
|
||||
public RowResult getClosestRowBefore(final byte [] regionName,
|
||||
final byte [] row)
|
||||
final byte [] row, final byte [] columnFamily)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
|
@ -1054,7 +1115,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
// locate the region we're operating on
|
||||
HRegion region = getRegion(regionName);
|
||||
// ask the region for all the data
|
||||
RowResult rr = region.getClosestRowBefore(row);
|
||||
RowResult rr = region.getClosestRowBefore(row, columnFamily);
|
||||
return rr;
|
||||
} catch (IOException e) {
|
||||
checkFileSystem();
|
||||
|
@ -1134,7 +1195,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
if (fam != null) {
|
||||
int maxLength = fam.getMaxValueLength();
|
||||
if (operation.getValue().length > maxLength) {
|
||||
throw new IOException("Value in column "
|
||||
throw new ValueOverMaxLengthException("Value in column "
|
||||
+ Bytes.toString(operation.getColumn()) + " is too long. "
|
||||
+ operation.getValue().length + " instead of " + maxLength);
|
||||
}
|
||||
|
|
|
@ -600,7 +600,7 @@ public class HStore implements HConstants {
|
|||
this.info, family.getName(), -1L, null);
|
||||
MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression,
|
||||
this.family.isBloomfilter(), cache.size());
|
||||
out.setIndexInterval(family.getMapFileIndexInterval());
|
||||
setIndexInterval(out);
|
||||
|
||||
// Here we tried picking up an existing HStoreFile from disk and
|
||||
// interlacing the memcache flush compacting as we go. The notion was
|
||||
|
@ -649,6 +649,27 @@ public class HStore implements HConstants {
|
|||
return storefiles.size() >= compactionThreshold;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the index interval for the mapfile. There are two sources for
|
||||
* configuration information: the HCD, and the global HBase config.
|
||||
* If a source returns the default value, it is ignored. Otherwise,
|
||||
* the smallest non-default value is preferred.
|
||||
*/
|
||||
private void setIndexInterval(MapFile.Writer writer) {
|
||||
int familyInterval = this.family.getMapFileIndexInterval();
|
||||
int interval = this.conf.getInt("hbase.io.index.interval",
|
||||
HColumnDescriptor.DEFAULT_MAPFILE_INDEX_INTERVAL);
|
||||
if (familyInterval != HColumnDescriptor.DEFAULT_MAPFILE_INDEX_INTERVAL) {
|
||||
if (interval != HColumnDescriptor.DEFAULT_MAPFILE_INDEX_INTERVAL) {
|
||||
if (familyInterval < interval)
|
||||
interval = familyInterval;
|
||||
} else {
|
||||
interval = familyInterval;
|
||||
}
|
||||
}
|
||||
writer.setIndexInterval(interval);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change readers adding into place the Reader produced by this new flush.
|
||||
* @param logCacheFlushId
|
||||
|
@ -888,7 +909,7 @@ public class HStore implements HConstants {
|
|||
}
|
||||
MapFile.Writer writer = compactedOutputFile.getWriter(this.fs,
|
||||
this.compression, this.family.isBloomfilter(), nrows);
|
||||
writer.setIndexInterval(family.getMapFileIndexInterval());
|
||||
setIndexInterval(writer);
|
||||
try {
|
||||
compact(writer, rdrs, majorCompaction);
|
||||
} finally {
|
||||
|
|
|
@ -22,8 +22,10 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
|
@ -146,7 +148,7 @@ class HStoreScanner implements InternalScanner {
|
|||
// are only keeping rows and columns that match those set on the
|
||||
// scanner and which have delete values. If memory usage becomes a
|
||||
// problem, could redo as bloom filter.
|
||||
List<HStoreKey> deletes = new ArrayList<HStoreKey>();
|
||||
Set<HStoreKey> deletes = new HashSet<HStoreKey>();
|
||||
for (int i = 0; i < scanners.length && !filtered; i++) {
|
||||
while ((scanners[i] != null
|
||||
&& !filtered
|
||||
|
@ -166,16 +168,14 @@ class HStoreScanner implements InternalScanner {
|
|||
// but this had the effect of overwriting newer
|
||||
// values with older ones. So now we only insert
|
||||
// a result if the map does not contain the key.
|
||||
HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY,
|
||||
HStoreKey hsk = new HStoreKey(key.getRow(),
|
||||
HConstants.EMPTY_BYTE_ARRAY,
|
||||
key.getTimestamp(), this.store.getHRegionInfo());
|
||||
for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
|
||||
hsk.setColumn(e.getKey());
|
||||
if (HLogEdit.isDeleted(e.getValue().getValue())) {
|
||||
if (!deletes.contains(hsk)) {
|
||||
// Key changes as we cycle the for loop so add a copy to
|
||||
// the set of deletes.
|
||||
deletes.add(new HStoreKey(hsk));
|
||||
}
|
||||
// Only first key encountered is added; deletes is a Set.
|
||||
deletes.add(new HStoreKey(hsk));
|
||||
} else if (!deletes.contains(hsk) &&
|
||||
!filtered &&
|
||||
moreToFollow &&
|
||||
|
|
|
@ -77,8 +77,11 @@ class LogRoller extends Thread implements LogRollListener {
|
|||
try {
|
||||
LOG.info("Rolling hlog. Number of entries: " + server.getLog().getNumEntries());
|
||||
server.getLog().rollWriter();
|
||||
} catch (FailedLogCloseException e) {
|
||||
LOG.fatal("Forcing server shutdown", e);
|
||||
server.abort();
|
||||
} catch (IOException ex) {
|
||||
LOG.error("Log rolling failed",
|
||||
LOG.error("Log rolling failed with ioe: ",
|
||||
RemoteExceptionHandler.checkIOException(ex));
|
||||
server.checkFileSystem();
|
||||
} catch (Exception ex) {
|
||||
|
|
|
@ -72,10 +72,8 @@ public class FSUtils {
|
|||
} catch (IOException e) {
|
||||
exception = RemoteExceptionHandler.checkIOException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
fs.close();
|
||||
|
||||
} catch (Exception e) {
|
||||
LOG.error("file system close failed: ", e);
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ public class PerformanceEvaluation implements HConstants {
|
|||
if (extantTables.length > 0) {
|
||||
// Check to see if our table already exists. Print warning if it does.
|
||||
for (int i = 0; i < extantTables.length; i++) {
|
||||
if (extantTables[0].equals(tableDescriptor)) {
|
||||
if (extantTables[i].equals(tableDescriptor)) {
|
||||
LOG.warn("Table " + tableDescriptor + " already exists");
|
||||
tableExists = true;
|
||||
break;
|
||||
|
|
|
@ -59,6 +59,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
public void testHTable() throws IOException {
|
||||
LOG.info("TEST: " + getName());
|
||||
byte[] value = "value".getBytes(UTF8_ENCODING);
|
||||
|
||||
try {
|
||||
|
@ -179,6 +180,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||
* For HADOOP-2579
|
||||
*/
|
||||
public void testTableNotFoundExceptionWithoutAnyTables() {
|
||||
LOG.info("TEST: " + getName());
|
||||
try {
|
||||
new HTable(conf, "notATable");
|
||||
fail("Should have thrown a TableNotFoundException");
|
||||
|
@ -195,6 +197,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||
* For HADOOP-2579
|
||||
*/
|
||||
public void testTableNotFoundExceptionWithATable() {
|
||||
LOG.info("TEST: " + getName());
|
||||
try {
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
HTableDescriptor testTableADesc =
|
||||
|
@ -216,6 +219,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||
}
|
||||
|
||||
public void testGetRow() {
|
||||
LOG.info("TEST: " + getName());
|
||||
HTable table = null;
|
||||
try {
|
||||
HColumnDescriptor column2 =
|
||||
|
|
|
@ -25,17 +25,15 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.JenkinsHash;
|
||||
import org.apache.hadoop.io.MapFile;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.MapFile;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
/**
|
||||
* Test HStoreFile
|
||||
*/
|
||||
|
@ -234,7 +232,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
first = false;
|
||||
LOG.info("First in bottom: " + previous);
|
||||
}
|
||||
assertTrue(key.compareTo(midkey) < 0);
|
||||
assertTrue(key.compareTo((HStoreKey)midkey) < 0);
|
||||
}
|
||||
if (previous != null) {
|
||||
LOG.info("Last in bottom: " + previous.toString());
|
||||
|
@ -244,7 +242,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
HStoreFile.Range.top, midkey, null);
|
||||
first = true;
|
||||
while (top.next(key, value)) {
|
||||
assertTrue(key.compareTo(midkey) >= 0);
|
||||
assertTrue(key.compareTo((HStoreKey)midkey) >= 0);
|
||||
if (first) {
|
||||
first = false;
|
||||
assertTrue(Bytes.equals(((HStoreKey)midkey).getRow(),
|
||||
|
@ -255,7 +253,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
LOG.info("Last in top: " + key.toString());
|
||||
top.getClosest(midkey, value);
|
||||
// Assert value is same as key.
|
||||
assertTrue(Bytes.equals(value.get(), ((HStoreKey) midkey).getRow()));
|
||||
assertTrue(Bytes.equals(value.get(), ((HStoreKey)midkey).getRow()));
|
||||
|
||||
// Next test using a midkey that does not exist in the file.
|
||||
// First, do a key that is < than first key. Ensure splits behave
|
||||
|
@ -270,7 +268,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||
HStoreFile.Range.top, badkey, null);
|
||||
first = true;
|
||||
while (top.next(key, value)) {
|
||||
assertTrue(key.compareTo(badkey) >= 0);
|
||||
assertTrue(key.compareTo((HStoreKey)badkey) >= 0);
|
||||
if (first) {
|
||||
first = false;
|
||||
LOG.info("First top when key < bottom: " + key.toString());
|
||||
|
|
Loading…
Reference in New Issue