Compare commits
30 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
76bd7a4357 | ||
|
15aaec0a67 | ||
|
e3e678fe30 | ||
|
00949213b8 | ||
|
3a2c6f5e90 | ||
|
d7540b9af4 | ||
|
6e6c79d93e | ||
|
cfb9cb2946 | ||
|
38b5c1f5c1 | ||
|
d86e9a58b2 | ||
|
e68aeb8d70 | ||
|
03bb977770 | ||
|
54e73589cb | ||
|
cfa79319e5 | ||
|
1ef1955aa8 | ||
|
1f66d54b5d | ||
|
8dcc95d790 | ||
|
06d1a00f83 | ||
|
8b7d4da0f9 | ||
|
2d65f47517 | ||
|
6720a3f1ed | ||
|
47e17e0007 | ||
|
cc6c62bc20 | ||
|
4e41ef9c69 | ||
|
9c218f0b04 | ||
|
7c3e3cab20 | ||
|
0a0f8ea3c0 | ||
|
aaf96533a2 | ||
|
55512dd3e9 | ||
|
1c0cc7dba8 |
42
CHANGES.txt
42
CHANGES.txt
@ -1,5 +1,47 @@
|
|||||||
HBase Change Log
|
HBase Change Log
|
||||||
|
|
||||||
|
Release 0.18.2 - Unreleased
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
HBASE-602 HBase Crash when network card has a IPv6 address
|
||||||
|
HBASE-927 We don't recover if HRS hosting -ROOT-/.META. goes down -
|
||||||
|
(back port from trunk)
|
||||||
|
HBASE-1052 Stopping a HRegionServer with unflushed cache causes data loss
|
||||||
|
from org.apache.hadoop.hbase.DroppedSnapshotException
|
||||||
|
HBASE-981 hbase.io.index.interval doesn't seem to have an effect;
|
||||||
|
interval is 128 rather than the configured 32
|
||||||
|
HBASE-1070 Up default index interval in TRUNK and branch
|
||||||
|
HBASE-1079 Dumb NPE in ServerCallable hides the RetriesExhausted exception
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
HBASE-1046 Narrow getClosestRowBefore by passing column family (backport)
|
||||||
|
HBASE-1069 Show whether HRegion major compacts or not in INFO level
|
||||||
|
|
||||||
|
|
||||||
|
Release 0.18.1 - Released October 27, 2008
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
HBASE-891 HRS.validateValuesLength throws IOE, gets caught in the retries
|
||||||
|
HBASE-906 [shell] Truncates output
|
||||||
|
HBASE-912 PE is broken when other tables exist
|
||||||
|
HBASE-918 Region balancing during startup makes cluster unstable
|
||||||
|
HBASE-921 region close and open processed out of order; makes for
|
||||||
|
disagreement between master and regionserver on region state
|
||||||
|
HBASE-925 HRS NPE on way out if no master to connect to
|
||||||
|
HBASE-928 NPE throwing RetriesExhaustedException
|
||||||
|
HBASE-576 Investigate IPC performance; partial.
|
||||||
|
HBASE-924 Update hadoop in lib on 0.18 hbase branch to 0.18.1
|
||||||
|
HBASE-930 RegionServer stuck: HLog: Could not append. Requesting close of
|
||||||
|
log java.io.IOException: Could not get block locations. Aborting...
|
||||||
|
HBASE-933 missing hbase.regions.slop in hbase-default.xml for 0.18 branch
|
||||||
|
(Rong-en Fan via Stack)
|
||||||
|
HBASE-926 If no master, regionservers should hang out rather than fail on
|
||||||
|
connection and shut themselves down
|
||||||
|
HBASE-946 Row with 55k deletes timesout scanner lease
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
HBASE-920 Make region balancing sloppier
|
||||||
|
|
||||||
Release 0.18.0 - September 21st, 2008
|
Release 0.18.0 - September 21st, 2008
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -84,11 +84,11 @@ module Formatter
|
|||||||
end
|
end
|
||||||
|
|
||||||
def dump(str)
|
def dump(str)
|
||||||
# Remove double-quotes added by 'dump'.
|
|
||||||
if str.instance_of? Fixnum
|
if str.instance_of? Fixnum
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
return str.dump.slice(1, str.length)
|
# Remove double-quotes added by 'dump'.
|
||||||
|
return str.dump[1..-2]
|
||||||
end
|
end
|
||||||
|
|
||||||
def output(width, str)
|
def output(width, str)
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
<project name="hbase" default="jar">
|
<project name="hbase" default="jar">
|
||||||
<property name="version" value="0.18.0"/>
|
<property name="version" value="0.18.2-dev"/>
|
||||||
<property name="Name" value="HBase"/>
|
<property name="Name" value="HBase"/>
|
||||||
<property name="final.name" value="hbase-${version}"/>
|
<property name="final.name" value="hbase-${version}"/>
|
||||||
<property name="year" value="2008"/>
|
<property name="year" value="2008"/>
|
||||||
|
@ -263,6 +263,13 @@
|
|||||||
HStoreFiles in a region. Default: 1 day.
|
HStoreFiles in a region. Default: 1 day.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.regions.slop</name>
|
||||||
|
<value>0.1</value>
|
||||||
|
<description>Rebalance if regionserver has average + (average * slop) regions.
|
||||||
|
Default is 10% slop.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.regionserver.nbreservationblocks</name>
|
<name>hbase.regionserver.nbreservationblocks</name>
|
||||||
<value>4</value>
|
<value>4</value>
|
||||||
@ -272,12 +279,13 @@
|
|||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.io.index.interval</name>
|
<name>hbase.io.index.interval</name>
|
||||||
<value>32</value>
|
<value>128</value>
|
||||||
<description>The interval at which we record offsets in hbase
|
<description>The interval at which we record offsets in hbase
|
||||||
store files/mapfiles. Default for stock mapfiles is 128. Index
|
store files/mapfiles. Default for stock mapfiles is 128. Index
|
||||||
files are read into memory. If there are many of them, could prove
|
files are read into memory. If there are many of them, could prove
|
||||||
a burden. If so play with the hadoop io.map.index.skip property and
|
a burden. If so play with the hadoop io.map.index.skip property and
|
||||||
skip every nth index member when reading back the index into memory.
|
skip every nth index member when reading back the index into memory.
|
||||||
|
Downside to high index interval is lowered access times.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
|
Binary file not shown.
Binary file not shown.
@ -30,6 +30,7 @@ import java.net.InetSocketAddress;
|
|||||||
* HServerAddress is a "label" for a HBase server that combines the host
|
* HServerAddress is a "label" for a HBase server that combines the host
|
||||||
* name and port number.
|
* name and port number.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
public class HServerAddress implements WritableComparable {
|
public class HServerAddress implements WritableComparable {
|
||||||
private InetSocketAddress address;
|
private InetSocketAddress address;
|
||||||
String stringValue;
|
String stringValue;
|
||||||
|
@ -28,6 +28,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||||||
/**
|
/**
|
||||||
* This class encapsulates metrics for determining the load on a HRegionServer
|
* This class encapsulates metrics for determining the load on a HRegionServer
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
public class HServerLoad implements WritableComparable {
|
public class HServerLoad implements WritableComparable {
|
||||||
private int numberOfRequests; // number of requests since last report
|
private int numberOfRequests; // number of requests since last report
|
||||||
private int numberOfRegions; // number of regions being served
|
private int numberOfRegions; // number of regions being served
|
||||||
|
@ -32,7 +32,7 @@ import org.apache.hadoop.io.WritableComparator;
|
|||||||
/**
|
/**
|
||||||
* A Key for a stored row.
|
* A Key for a stored row.
|
||||||
*/
|
*/
|
||||||
public class HStoreKey implements WritableComparable {
|
public class HStoreKey implements WritableComparable<HStoreKey> {
|
||||||
/**
|
/**
|
||||||
* Colon character in UTF-8
|
* Colon character in UTF-8
|
||||||
*/
|
*/
|
||||||
@ -332,7 +332,14 @@ public class HStoreKey implements WritableComparable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object obj) {
|
public boolean equals(Object obj) {
|
||||||
return compareTo(obj) == 0;
|
HStoreKey other = (HStoreKey)obj;
|
||||||
|
// Do a quick check.
|
||||||
|
if (this.row.length != other.row.length ||
|
||||||
|
this.column.length != other.column.length ||
|
||||||
|
this.timestamp != other.timestamp) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return compareTo(other) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -345,7 +352,7 @@ public class HStoreKey implements WritableComparable {
|
|||||||
|
|
||||||
// Comparable
|
// Comparable
|
||||||
|
|
||||||
public int compareTo(Object o) {
|
public int compareTo(final HStoreKey o) {
|
||||||
return compareTo(this.regionInfo, this, (HStoreKey)o);
|
return compareTo(this.regionInfo, this, (HStoreKey)o);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -509,8 +516,7 @@ public class HStoreKey implements WritableComparable {
|
|||||||
*/
|
*/
|
||||||
public static int compareTwoRowKeys(HRegionInfo regionInfo,
|
public static int compareTwoRowKeys(HRegionInfo regionInfo,
|
||||||
byte[] rowA, byte[] rowB) {
|
byte[] rowA, byte[] rowB) {
|
||||||
if(regionInfo != null && (regionInfo.isMetaRegion() ||
|
if (regionInfo != null && regionInfo.isMetaRegion()) {
|
||||||
regionInfo.isRootRegion())) {
|
|
||||||
byte[][] keysA = stripStartKeyMeta(rowA);
|
byte[][] keysA = stripStartKeyMeta(rowA);
|
||||||
byte[][] KeysB = stripStartKeyMeta(rowB);
|
byte[][] KeysB = stripStartKeyMeta(rowB);
|
||||||
int rowCompare = Bytes.compareTo(keysA[0], KeysB[0]);
|
int rowCompare = Bytes.compareTo(keysA[0], KeysB[0]);
|
||||||
|
@ -37,23 +37,7 @@ import org.apache.hadoop.io.WritableComparable;
|
|||||||
* HTableDescriptor contains the name of an HTable, and its
|
* HTableDescriptor contains the name of an HTable, and its
|
||||||
* column families.
|
* column families.
|
||||||
*/
|
*/
|
||||||
public class HTableDescriptor implements WritableComparable {
|
public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
||||||
/** Table descriptor for <core>-ROOT-</code> catalog table */
|
|
||||||
public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor(
|
|
||||||
HConstants.ROOT_TABLE_NAME,
|
|
||||||
new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY,
|
|
||||||
1, HColumnDescriptor.CompressionType.NONE, false, false,
|
|
||||||
Integer.MAX_VALUE, HConstants.FOREVER, false) });
|
|
||||||
|
|
||||||
/** Table descriptor for <code>.META.</code> catalog table */
|
|
||||||
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
|
|
||||||
HConstants.META_TABLE_NAME, new HColumnDescriptor[] {
|
|
||||||
new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1,
|
|
||||||
HColumnDescriptor.CompressionType.NONE, false, false,
|
|
||||||
Integer.MAX_VALUE, HConstants.FOREVER, false),
|
|
||||||
new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN,
|
|
||||||
HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE,
|
|
||||||
false, false, Integer.MAX_VALUE, HConstants.FOREVER, false) });
|
|
||||||
|
|
||||||
// Changes prior to version 3 were not recorded here.
|
// Changes prior to version 3 were not recorded here.
|
||||||
// Version 3 adds metadata as a map where keys and values are byte[].
|
// Version 3 adds metadata as a map where keys and values are byte[].
|
||||||
@ -63,18 +47,35 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
private String nameAsString = "";
|
private String nameAsString = "";
|
||||||
|
|
||||||
// Table metadata
|
// Table metadata
|
||||||
protected Map<ImmutableBytesWritable,ImmutableBytesWritable> values =
|
protected Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
|
||||||
new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
|
new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
|
||||||
|
|
||||||
//TODO: Why can't the following be private? They are only used within this class.
|
|
||||||
|
|
||||||
public static final String FAMILIES = "FAMILIES";
|
public static final String FAMILIES = "FAMILIES";
|
||||||
|
public static final ImmutableBytesWritable FAMILIES_KEY =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(FAMILIES));
|
||||||
public static final String MAX_FILESIZE = "MAX_FILESIZE";
|
public static final String MAX_FILESIZE = "MAX_FILESIZE";
|
||||||
|
public static final ImmutableBytesWritable MAX_FILESIZE_KEY =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(MAX_FILESIZE));
|
||||||
public static final String READONLY = "READONLY";
|
public static final String READONLY = "READONLY";
|
||||||
|
public static final ImmutableBytesWritable READONLY_KEY =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(READONLY));
|
||||||
public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE";
|
public static final String MEMCACHE_FLUSHSIZE = "MEMCACHE_FLUSHSIZE";
|
||||||
|
public static final ImmutableBytesWritable MEMCACHE_FLUSHSIZE_KEY =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(MEMCACHE_FLUSHSIZE));
|
||||||
public static final String IS_ROOT = "IS_ROOT";
|
public static final String IS_ROOT = "IS_ROOT";
|
||||||
|
public static final ImmutableBytesWritable IS_ROOT_KEY =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT));
|
||||||
public static final String IS_META = "IS_META";
|
public static final String IS_META = "IS_META";
|
||||||
|
public static final ImmutableBytesWritable IS_META_KEY =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(IS_META));
|
||||||
|
|
||||||
|
// The below are ugly but better than creating them each time till we
|
||||||
|
// replace booleans being saved as Strings with plain booleans. Need a
|
||||||
|
// migration script to do this. TODO.
|
||||||
|
private static final ImmutableBytesWritable FALSE =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(Boolean.FALSE.toString()));
|
||||||
|
private static final ImmutableBytesWritable TRUE =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(Boolean.TRUE.toString()));
|
||||||
|
|
||||||
public static final boolean DEFAULT_IN_MEMORY = false;
|
public static final boolean DEFAULT_IN_MEMORY = false;
|
||||||
|
|
||||||
@ -82,10 +83,9 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
|
|
||||||
public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64;
|
public static final int DEFAULT_MEMCACHE_FLUSH_SIZE = 1024*1024*64;
|
||||||
|
|
||||||
private transient Boolean meta = null;
|
private volatile Boolean meta = null;
|
||||||
|
private volatile Boolean root = null;
|
||||||
// End TODO:
|
|
||||||
|
|
||||||
// Key is hash of the family name.
|
// Key is hash of the family name.
|
||||||
private final Map<Integer, HColumnDescriptor> families =
|
private final Map<Integer, HColumnDescriptor> families =
|
||||||
new HashMap<Integer, HColumnDescriptor>();
|
new HashMap<Integer, HColumnDescriptor>();
|
||||||
@ -96,6 +96,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
*/
|
*/
|
||||||
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) {
|
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) {
|
||||||
this.name = name.clone();
|
this.name = name.clone();
|
||||||
|
this.nameAsString = Bytes.toString(this.name);
|
||||||
setMetaFlags(name);
|
setMetaFlags(name);
|
||||||
for(HColumnDescriptor descriptor : families) {
|
for(HColumnDescriptor descriptor : families) {
|
||||||
this.families.put(Bytes.mapKey(descriptor.getName()), descriptor);
|
this.families.put(Bytes.mapKey(descriptor.getName()), descriptor);
|
||||||
@ -109,6 +110,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families,
|
protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families,
|
||||||
Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
|
Map<ImmutableBytesWritable,ImmutableBytesWritable> values) {
|
||||||
this.name = name.clone();
|
this.name = name.clone();
|
||||||
|
this.nameAsString = Bytes.toString(this.name);
|
||||||
setMetaFlags(name);
|
setMetaFlags(name);
|
||||||
for(HColumnDescriptor descriptor : families) {
|
for(HColumnDescriptor descriptor : families) {
|
||||||
this.families.put(Bytes.mapKey(descriptor.getName()), descriptor);
|
this.families.put(Bytes.mapKey(descriptor.getName()), descriptor);
|
||||||
@ -150,9 +152,9 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
*/
|
*/
|
||||||
public HTableDescriptor(final byte [] name) {
|
public HTableDescriptor(final byte [] name) {
|
||||||
super();
|
super();
|
||||||
|
setMetaFlags(this.name);
|
||||||
this.name = this.isMetaRegion() ? name: isLegalTableName(name);
|
this.name = this.isMetaRegion() ? name: isLegalTableName(name);
|
||||||
this.nameAsString = Bytes.toString(this.name);
|
this.nameAsString = Bytes.toString(this.name);
|
||||||
setMetaFlags(this.name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -162,8 +164,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
* Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
|
* Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
|
||||||
* @param desc The descriptor.
|
* @param desc The descriptor.
|
||||||
*/
|
*/
|
||||||
public HTableDescriptor(final HTableDescriptor desc)
|
public HTableDescriptor(final HTableDescriptor desc) {
|
||||||
{
|
|
||||||
super();
|
super();
|
||||||
this.name = desc.name.clone();
|
this.name = desc.name.clone();
|
||||||
this.nameAsString = Bytes.toString(this.name);
|
this.nameAsString = Bytes.toString(this.name);
|
||||||
@ -190,16 +191,16 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
|
|
||||||
/** @return true if this is the root region */
|
/** @return true if this is the root region */
|
||||||
public boolean isRootRegion() {
|
public boolean isRootRegion() {
|
||||||
String value = getValue(IS_ROOT);
|
if (this.root == null) {
|
||||||
if (value != null)
|
this.root = isSomething(IS_ROOT_KEY, false)? Boolean.TRUE: Boolean.FALSE;
|
||||||
return Boolean.valueOf(value);
|
}
|
||||||
return false;
|
return this.root.booleanValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @param isRoot true if this is the root region */
|
/** @param isRoot true if this is the root region */
|
||||||
protected void setRootRegion(boolean isRoot) {
|
protected void setRootRegion(boolean isRoot) {
|
||||||
values.put(new ImmutableBytesWritable(Bytes.toBytes(IS_ROOT)),
|
// TODO: Make the value a boolean rather than String of boolean.
|
||||||
new ImmutableBytesWritable(Bytes.toBytes(Boolean.toString(isRoot))));
|
values.put(IS_ROOT_KEY, isRoot? TRUE: FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return true if this is a meta region (part of the root or meta tables) */
|
/** @return true if this is a meta region (part of the root or meta tables) */
|
||||||
@ -211,16 +212,25 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private synchronized Boolean calculateIsMetaRegion() {
|
private synchronized Boolean calculateIsMetaRegion() {
|
||||||
String value = getValue(IS_META);
|
byte [] value = getValue(IS_META_KEY);
|
||||||
return (value != null)? new Boolean(value): Boolean.FALSE;
|
return (value != null)? new Boolean(Bytes.toString(value)): Boolean.FALSE;
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isSomething(final ImmutableBytesWritable key,
|
||||||
|
final boolean valueIfNull) {
|
||||||
|
byte [] value = getValue(key);
|
||||||
|
if (value != null) {
|
||||||
|
// TODO: Make value be a boolean rather than String of boolean.
|
||||||
|
return Boolean.valueOf(Bytes.toString(value)).booleanValue();
|
||||||
|
}
|
||||||
|
return valueIfNull;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param isMeta true if this is a meta region (part of the root or meta
|
* @param isMeta true if this is a meta region (part of the root or meta
|
||||||
* tables) */
|
* tables) */
|
||||||
protected void setMetaRegion(boolean isMeta) {
|
protected void setMetaRegion(boolean isMeta) {
|
||||||
values.put(new ImmutableBytesWritable(Bytes.toBytes(IS_META)),
|
values.put(IS_META_KEY, isMeta? TRUE: FALSE);
|
||||||
new ImmutableBytesWritable(Bytes.toBytes(Boolean.toString(isMeta))));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return true if table is the meta table */
|
/** @return true if table is the meta table */
|
||||||
@ -263,7 +273,11 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
* @return The value.
|
* @return The value.
|
||||||
*/
|
*/
|
||||||
public byte[] getValue(byte[] key) {
|
public byte[] getValue(byte[] key) {
|
||||||
ImmutableBytesWritable ibw = values.get(new ImmutableBytesWritable(key));
|
return getValue(new ImmutableBytesWritable(key));
|
||||||
|
}
|
||||||
|
|
||||||
|
private byte[] getValue(final ImmutableBytesWritable key) {
|
||||||
|
ImmutableBytesWritable ibw = values.get(key);
|
||||||
if (ibw == null)
|
if (ibw == null)
|
||||||
return null;
|
return null;
|
||||||
return ibw.get();
|
return ibw.get();
|
||||||
@ -292,8 +306,25 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
* @param value The value.
|
* @param value The value.
|
||||||
*/
|
*/
|
||||||
public void setValue(byte[] key, byte[] value) {
|
public void setValue(byte[] key, byte[] value) {
|
||||||
values.put(new ImmutableBytesWritable(key),
|
setValue(new ImmutableBytesWritable(key), value);
|
||||||
new ImmutableBytesWritable(value));
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @param key The key.
|
||||||
|
* @param value The value.
|
||||||
|
*/
|
||||||
|
private void setValue(final ImmutableBytesWritable key,
|
||||||
|
final byte[] value) {
|
||||||
|
values.put(key, new ImmutableBytesWritable(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @param key The key.
|
||||||
|
* @param value The value.
|
||||||
|
*/
|
||||||
|
private void setValue(final ImmutableBytesWritable key,
|
||||||
|
final ImmutableBytesWritable value) {
|
||||||
|
values.put(key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -311,7 +342,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
public boolean isInMemory() {
|
public boolean isInMemory() {
|
||||||
String value = getValue(HConstants.IN_MEMORY);
|
String value = getValue(HConstants.IN_MEMORY);
|
||||||
if (value != null)
|
if (value != null)
|
||||||
return Boolean.valueOf(value);
|
return Boolean.valueOf(value).booleanValue();
|
||||||
return DEFAULT_IN_MEMORY;
|
return DEFAULT_IN_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,18 +358,15 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
* @return true if all columns in the table should be read only
|
* @return true if all columns in the table should be read only
|
||||||
*/
|
*/
|
||||||
public boolean isReadOnly() {
|
public boolean isReadOnly() {
|
||||||
String value = getValue(READONLY);
|
return isSomething(READONLY_KEY, DEFAULT_READONLY);
|
||||||
if (value != null)
|
|
||||||
return Boolean.valueOf(value);
|
|
||||||
return DEFAULT_READONLY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param readOnly True if all of the columns in the table should be read
|
* @param readOnly True if all of the columns in the table should be read
|
||||||
* only.
|
* only.
|
||||||
*/
|
*/
|
||||||
public void setReadOnly(boolean readOnly) {
|
public void setReadOnly(final boolean readOnly) {
|
||||||
setValue(READONLY, Boolean.toString(readOnly));
|
setValue(READONLY_KEY, readOnly? TRUE: FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return name of table */
|
/** @return name of table */
|
||||||
@ -353,9 +381,9 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
|
|
||||||
/** @return max hregion size for table */
|
/** @return max hregion size for table */
|
||||||
public long getMaxFileSize() {
|
public long getMaxFileSize() {
|
||||||
String value = getValue(MAX_FILESIZE);
|
byte [] value = getValue(MAX_FILESIZE_KEY);
|
||||||
if (value != null)
|
if (value != null)
|
||||||
return Long.valueOf(value);
|
return Long.valueOf(Bytes.toString(value)).longValue();
|
||||||
return HConstants.DEFAULT_MAX_FILE_SIZE;
|
return HConstants.DEFAULT_MAX_FILE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,16 +392,16 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
* before a split is triggered.
|
* before a split is triggered.
|
||||||
*/
|
*/
|
||||||
public void setMaxFileSize(long maxFileSize) {
|
public void setMaxFileSize(long maxFileSize) {
|
||||||
setValue(MAX_FILESIZE, Long.toString(maxFileSize));
|
setValue(MAX_FILESIZE_KEY, Bytes.toBytes(Long.toString(maxFileSize)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return memory cache flush size for each hregion
|
* @return memory cache flush size for each hregion
|
||||||
*/
|
*/
|
||||||
public int getMemcacheFlushSize() {
|
public int getMemcacheFlushSize() {
|
||||||
String value = getValue(MEMCACHE_FLUSHSIZE);
|
byte [] value = getValue(MEMCACHE_FLUSHSIZE_KEY);
|
||||||
if (value != null)
|
if (value != null)
|
||||||
return Integer.valueOf(value);
|
return Integer.valueOf(Bytes.toString(value)).intValue();
|
||||||
return DEFAULT_MEMCACHE_FLUSH_SIZE;
|
return DEFAULT_MEMCACHE_FLUSH_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -381,7 +409,8 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
* @param memcacheFlushSize memory cache flush size for each hregion
|
* @param memcacheFlushSize memory cache flush size for each hregion
|
||||||
*/
|
*/
|
||||||
public void setMemcacheFlushSize(int memcacheFlushSize) {
|
public void setMemcacheFlushSize(int memcacheFlushSize) {
|
||||||
setValue(MEMCACHE_FLUSHSIZE, Integer.toString(memcacheFlushSize));
|
setValue(MEMCACHE_FLUSHSIZE_KEY,
|
||||||
|
Bytes.toBytes(Integer.toString(memcacheFlushSize)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -447,7 +476,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object obj) {
|
public boolean equals(Object obj) {
|
||||||
return compareTo(obj) == 0;
|
return compareTo((HTableDescriptor)obj) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -513,8 +542,7 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
|
|
||||||
// Comparable
|
// Comparable
|
||||||
|
|
||||||
public int compareTo(Object o) {
|
public int compareTo(final HTableDescriptor other) {
|
||||||
HTableDescriptor other = (HTableDescriptor) o;
|
|
||||||
int result = Bytes.compareTo(this.name, other.name);
|
int result = Bytes.compareTo(this.name, other.name);
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
result = families.size() - other.families.size();
|
result = families.size() - other.families.size();
|
||||||
@ -576,4 +604,21 @@ public class HTableDescriptor implements WritableComparable {
|
|||||||
public static Path getTableDir(Path rootdir, final byte [] tableName) {
|
public static Path getTableDir(Path rootdir, final byte [] tableName) {
|
||||||
return new Path(rootdir, Bytes.toString(tableName));
|
return new Path(rootdir, Bytes.toString(tableName));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Table descriptor for <core>-ROOT-</code> catalog table */
|
||||||
|
public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor(
|
||||||
|
HConstants.ROOT_TABLE_NAME,
|
||||||
|
new HColumnDescriptor[] { new HColumnDescriptor(HConstants.COLUMN_FAMILY,
|
||||||
|
1, HColumnDescriptor.CompressionType.NONE, false, false,
|
||||||
|
Integer.MAX_VALUE, HConstants.FOREVER, false) });
|
||||||
|
|
||||||
|
/** Table descriptor for <code>.META.</code> catalog table */
|
||||||
|
public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor(
|
||||||
|
HConstants.META_TABLE_NAME, new HColumnDescriptor[] {
|
||||||
|
new HColumnDescriptor(HConstants.COLUMN_FAMILY, 1,
|
||||||
|
HColumnDescriptor.CompressionType.NONE, false, false,
|
||||||
|
Integer.MAX_VALUE, HConstants.FOREVER, false),
|
||||||
|
new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN,
|
||||||
|
HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE,
|
||||||
|
false, false, Integer.MAX_VALUE, HConstants.FOREVER, false) });
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ public class RegionHistorian implements HConstants {
|
|||||||
* Get the RegionHistorian Singleton instance.
|
* Get the RegionHistorian Singleton instance.
|
||||||
* @return The region historian
|
* @return The region historian
|
||||||
*/
|
*/
|
||||||
public static RegionHistorian getInstance() {
|
public synchronized static RegionHistorian getInstance() {
|
||||||
if (historian == null) {
|
if (historian == null) {
|
||||||
historian = new RegionHistorian();
|
historian = new RegionHistorian();
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,42 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2008 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when a value is longer than the specified LENGTH
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("serial")
|
||||||
|
public class ValueOverMaxLengthException extends DoNotRetryIOException {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* default constructor
|
||||||
|
*/
|
||||||
|
public ValueOverMaxLengthException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param message
|
||||||
|
*/
|
||||||
|
public ValueOverMaxLengthException(String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -486,9 +486,10 @@ public class HConnectionManager implements HConstants {
|
|||||||
HRegionInterface server =
|
HRegionInterface server =
|
||||||
getHRegionConnection(metaLocation.getServerAddress());
|
getHRegionConnection(metaLocation.getServerAddress());
|
||||||
|
|
||||||
// query the root region for the location of the meta region
|
// Query the root region for the location of the meta region
|
||||||
RowResult regionInfoRow = server.getClosestRowBefore(
|
RowResult regionInfoRow = server.getClosestRowBefore(
|
||||||
metaLocation.getRegionInfo().getRegionName(), metaKey);
|
metaLocation.getRegionInfo().getRegionName(), metaKey,
|
||||||
|
HConstants.COLUMN_FAMILY);
|
||||||
|
|
||||||
if (regionInfoRow == null) {
|
if (regionInfoRow == null) {
|
||||||
throw new TableNotFoundException(Bytes.toString(tableName));
|
throw new TableNotFoundException(Bytes.toString(tableName));
|
||||||
|
@ -78,4 +78,4 @@ public abstract class ServerCallable<T> implements Callable<T> {
|
|||||||
public byte [] getRow() {
|
public byte [] getRow() {
|
||||||
return row;
|
return row;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -37,8 +37,9 @@ public interface HRegionInterface extends VersionedProtocol {
|
|||||||
/**
|
/**
|
||||||
* Protocol version.
|
* Protocol version.
|
||||||
* Upped to 4 when we removed overloaded methods from the protocol.
|
* Upped to 4 when we removed overloaded methods from the protocol.
|
||||||
|
* Upped to 5 when we changed getClosestRowBefore signature.
|
||||||
*/
|
*/
|
||||||
public static final long versionID = 4L;
|
public static final long versionID = 5L;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get metainfo about an HRegion
|
* Get metainfo about an HRegion
|
||||||
@ -72,11 +73,12 @@ public interface HRegionInterface extends VersionedProtocol {
|
|||||||
*
|
*
|
||||||
* @param regionName region name
|
* @param regionName region name
|
||||||
* @param row row key
|
* @param row row key
|
||||||
|
* @param columnFamily Column family to look for row in.
|
||||||
* @return map of values
|
* @return map of values
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public RowResult getClosestRowBefore(final byte [] regionName,
|
public RowResult getClosestRowBefore(final byte [] regionName,
|
||||||
final byte [] row)
|
final byte [] row, final byte [] columnFamily)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -22,9 +22,7 @@ package org.apache.hadoop.hbase.master;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.fs.FileSystem; //TODO: remove
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.regionserver.HStoreFile; //TODO: remove
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||||||
final int metaRescanInterval;
|
final int metaRescanInterval;
|
||||||
|
|
||||||
// A Sleeper that sleeps for threadWakeFrequency
|
// A Sleeper that sleeps for threadWakeFrequency
|
||||||
protected final Sleeper sleeper;
|
private final Sleeper sleeper;
|
||||||
|
|
||||||
// Default access so accesible from unit tests. MASTER is name of the webapp
|
// Default access so accesible from unit tests. MASTER is name of the webapp
|
||||||
// and the attribute name used stuffing this instance into web context.
|
// and the attribute name used stuffing this instance into web context.
|
||||||
@ -587,10 +587,10 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||||||
|
|
||||||
for (int tries = 0; tries < numRetries; tries++) {
|
for (int tries = 0; tries < numRetries; tries++) {
|
||||||
try {
|
try {
|
||||||
// We can not access meta regions if they have not already been
|
// We can not create a table unless meta regions have already been
|
||||||
// assigned and scanned. If we timeout waiting, just shutdown.
|
// assigned and scanned.
|
||||||
if (regionManager.waitForMetaRegionsOrClose()) {
|
if (!regionManager.areAllMetaRegionsOnline()) {
|
||||||
break;
|
throw new NotAllMetaRegionsOnlineException();
|
||||||
}
|
}
|
||||||
createTable(newRegion);
|
createTable(newRegion);
|
||||||
LOG.info("created table " + desc.getNameAsString());
|
LOG.info("created table " + desc.getNameAsString());
|
||||||
|
@ -62,7 +62,7 @@ class MetaScanner extends BaseScanner {
|
|||||||
boolean scanSuccessful = false;
|
boolean scanSuccessful = false;
|
||||||
while (!master.closed.get() && !regionManager.isInitialRootScanComplete() &&
|
while (!master.closed.get() && !regionManager.isInitialRootScanComplete() &&
|
||||||
regionManager.getRootRegionLocation() == null) {
|
regionManager.getRootRegionLocation() == null) {
|
||||||
master.sleeper.sleep();
|
sleep();
|
||||||
}
|
}
|
||||||
if (master.closed.get()) {
|
if (master.closed.get()) {
|
||||||
return scanSuccessful;
|
return scanSuccessful;
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.master;
|
package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
import java.util.Map; //TODO: remove
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
|
@ -0,0 +1,44 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2008 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when an operation requires the root and all meta regions to be online
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("serial")
|
||||||
|
public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException {
|
||||||
|
/**
|
||||||
|
* default constructor
|
||||||
|
*/
|
||||||
|
public NotAllMetaRegionsOnlineException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param message
|
||||||
|
*/
|
||||||
|
public NotAllMetaRegionsOnlineException(String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -33,19 +33,21 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||||||
* necessary.
|
* necessary.
|
||||||
*/
|
*/
|
||||||
class ProcessRegionClose extends ProcessRegionStatusChange {
|
class ProcessRegionClose extends ProcessRegionStatusChange {
|
||||||
protected final boolean offlineRegion;
|
protected final boolean offlineRegion;
|
||||||
|
protected final boolean reassignRegion;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param master
|
* @param master
|
||||||
* @param regionInfo Region to operate on
|
* @param regionInfo Region to operate on
|
||||||
* @param offlineRegion if true, set the region to offline in meta
|
* @param offlineRegion if true, set the region to offline in meta
|
||||||
* delete the region files from disk.
|
* @param reassignRegion if true, region is to be reassigned
|
||||||
*/
|
*/
|
||||||
public ProcessRegionClose(HMaster master, HRegionInfo regionInfo,
|
public ProcessRegionClose(HMaster master, HRegionInfo regionInfo,
|
||||||
boolean offlineRegion) {
|
boolean offlineRegion, boolean reassignRegion) {
|
||||||
|
|
||||||
super(master, regionInfo);
|
super(master, regionInfo);
|
||||||
this.offlineRegion = offlineRegion;
|
this.offlineRegion = offlineRegion;
|
||||||
|
this.reassignRegion = reassignRegion;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -56,32 +58,35 @@ class ProcessRegionClose extends ProcessRegionStatusChange {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean process() throws IOException {
|
protected boolean process() throws IOException {
|
||||||
Boolean result =
|
Boolean result = null;
|
||||||
new RetryableMetaOperation<Boolean>(this.metaRegion, this.master) {
|
if (offlineRegion) {
|
||||||
public Boolean call() throws IOException {
|
result =
|
||||||
LOG.info("region closed: " + regionInfo.getRegionNameAsString());
|
new RetryableMetaOperation<Boolean>(this.metaRegion, this.master) {
|
||||||
|
public Boolean call() throws IOException {
|
||||||
|
LOG.info("region closed: " + regionInfo.getRegionNameAsString());
|
||||||
|
|
||||||
// Mark the Region as unavailable in the appropriate meta table
|
|
||||||
|
|
||||||
if (!metaRegionAvailable()) {
|
|
||||||
// We can't proceed unless the meta region we are going to update
|
// We can't proceed unless the meta region we are going to update
|
||||||
// is online. metaRegionAvailable() has put this operation on the
|
// is online. metaRegionAvailable() will put this operation on the
|
||||||
// delayedToDoQueue, so return true so the operation is not put
|
// delayedToDoQueue, so return true so the operation is not put
|
||||||
// back on the toDoQueue
|
// back on the toDoQueue
|
||||||
|
|
||||||
|
if (metaRegionAvailable()) {
|
||||||
|
// offline the region in meta and then note that we've offlined
|
||||||
|
// the region.
|
||||||
|
HRegion.offlineRegionInMETA(server, metaRegionName,
|
||||||
|
regionInfo);
|
||||||
|
master.regionManager.regionOfflined(regionInfo.getRegionName());
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
}.doWithRetries();
|
||||||
|
|
||||||
|
result = result == null ? true : result;
|
||||||
|
|
||||||
if (offlineRegion) {
|
} else if (reassignRegion) {
|
||||||
// offline the region in meta and then note that we've offlined the
|
// we are reassigning the region eventually, so set it unassigned
|
||||||
// region.
|
master.regionManager.setUnassigned(regionInfo);
|
||||||
HRegion.offlineRegionInMETA(server, metaRegionName,
|
}
|
||||||
regionInfo);
|
|
||||||
master.regionManager.regionOfflined(regionInfo.getRegionName());
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}.doWithRetries();
|
|
||||||
|
|
||||||
return result == null ? true : result;
|
return result == null ? true : result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -96,13 +96,8 @@ class ProcessRegionOpen extends ProcessRegionStatusChange {
|
|||||||
regionInfo.getRegionName(), regionInfo.getStartKey());
|
regionInfo.getRegionName(), regionInfo.getStartKey());
|
||||||
if (!master.regionManager.isInitialMetaScanComplete()) {
|
if (!master.regionManager.isInitialMetaScanComplete()) {
|
||||||
// Put it on the queue to be scanned for the first time.
|
// Put it on the queue to be scanned for the first time.
|
||||||
try {
|
LOG.debug("Adding " + m.toString() + " to regions to scan");
|
||||||
LOG.debug("Adding " + m.toString() + " to regions to scan");
|
master.regionManager.addMetaRegionToScan(m);
|
||||||
master.regionManager.addMetaRegionToScan(m);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new RuntimeException(
|
|
||||||
"Putting into metaRegionsToScan was interrupted.", e);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Add it to the online meta regions
|
// Add it to the online meta regions
|
||||||
LOG.debug("Adding to onlineMetaRegions: " + m.toString());
|
LOG.debug("Adding to onlineMetaRegions: " + m.toString());
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
package org.apache.hadoop.hbase.master;
|
package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.UnsupportedEncodingException; //TODO: remove
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
@ -46,6 +45,8 @@ import org.apache.hadoop.hbase.io.RowResult;
|
|||||||
class ProcessServerShutdown extends RegionServerOperation {
|
class ProcessServerShutdown extends RegionServerOperation {
|
||||||
private HServerAddress deadServer;
|
private HServerAddress deadServer;
|
||||||
private String deadServerName;
|
private String deadServerName;
|
||||||
|
private final boolean rootRegionServer;
|
||||||
|
private boolean rootRegionReassigned = false;
|
||||||
private Path oldLogDir;
|
private Path oldLogDir;
|
||||||
private boolean logSplit;
|
private boolean logSplit;
|
||||||
private boolean rootRescanned;
|
private boolean rootRescanned;
|
||||||
@ -65,20 +66,18 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||||||
/**
|
/**
|
||||||
* @param master
|
* @param master
|
||||||
* @param serverInfo
|
* @param serverInfo
|
||||||
|
* @param rootRegionServer
|
||||||
*/
|
*/
|
||||||
public ProcessServerShutdown(HMaster master, HServerInfo serverInfo) {
|
public ProcessServerShutdown(HMaster master, HServerInfo serverInfo,
|
||||||
|
boolean rootRegionServer) {
|
||||||
super(master);
|
super(master);
|
||||||
this.deadServer = serverInfo.getServerAddress();
|
this.deadServer = serverInfo.getServerAddress();
|
||||||
this.deadServerName = this.deadServer.toString();
|
this.deadServerName = this.deadServer.toString();
|
||||||
|
this.rootRegionServer = rootRegionServer;
|
||||||
this.logSplit = false;
|
this.logSplit = false;
|
||||||
this.rootRescanned = false;
|
this.rootRescanned = false;
|
||||||
StringBuilder dirName = new StringBuilder("log_");
|
this.oldLogDir =
|
||||||
dirName.append(deadServer.getBindAddress());
|
new Path(master.rootdir, HLog.getHLogDirectoryName(serverInfo));
|
||||||
dirName.append("_");
|
|
||||||
dirName.append(serverInfo.getStartCode());
|
|
||||||
dirName.append("_");
|
|
||||||
dirName.append(deadServer.getPort());
|
|
||||||
this.oldLogDir = new Path(master.rootdir, dirName.toString());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -250,6 +249,17 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||||||
logSplit = true;
|
logSplit = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (this.rootRegionServer && !this.rootRegionReassigned) {
|
||||||
|
// The server that died was serving the root region. Now that the log
|
||||||
|
// has been split, get it reassigned.
|
||||||
|
master.regionManager.reassignRootRegion();
|
||||||
|
// avoid multiple root region reassignment
|
||||||
|
this.rootRegionReassigned = true;
|
||||||
|
// When we call rootAvailable below, it will put us on the delayed
|
||||||
|
// to do queue to allow some time to pass during which the root
|
||||||
|
// region will hopefully get reassigned.
|
||||||
|
}
|
||||||
|
|
||||||
if (!rootAvailable()) {
|
if (!rootAvailable()) {
|
||||||
// Return true so that worker does not put this request back on the
|
// Return true so that worker does not put this request back on the
|
||||||
// toDoQueue.
|
// toDoQueue.
|
||||||
|
@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.util.Writables;
|
|||||||
class RegionManager implements HConstants {
|
class RegionManager implements HConstants {
|
||||||
protected static final Log LOG = LogFactory.getLog(RegionManager.class);
|
protected static final Log LOG = LogFactory.getLog(RegionManager.class);
|
||||||
|
|
||||||
private volatile AtomicReference<HServerAddress> rootRegionLocation =
|
private AtomicReference<HServerAddress> rootRegionLocation =
|
||||||
new AtomicReference<HServerAddress>(null);
|
new AtomicReference<HServerAddress>(null);
|
||||||
|
|
||||||
final Lock splitLogLock = new ReentrantLock();
|
final Lock splitLogLock = new ReentrantLock();
|
||||||
@ -118,15 +118,17 @@ class RegionManager implements HConstants {
|
|||||||
private final int maxAssignInOneGo;
|
private final int maxAssignInOneGo;
|
||||||
|
|
||||||
private final HMaster master;
|
private final HMaster master;
|
||||||
|
|
||||||
private final RegionHistorian historian;
|
private final RegionHistorian historian;
|
||||||
|
private final float slop;
|
||||||
|
|
||||||
RegionManager(HMaster master) {
|
RegionManager(HMaster master) {
|
||||||
this.master = master;
|
this.master = master;
|
||||||
this.historian = RegionHistorian.getInstance();
|
this.historian = RegionHistorian.getInstance();
|
||||||
this.maxAssignInOneGo = this.master.getConfiguration().
|
this.maxAssignInOneGo = this.master.getConfiguration().
|
||||||
getInt("hbase.regions.percheckin", 10);
|
getInt("hbase.regions.percheckin", 10);
|
||||||
|
this.slop = this.master.getConfiguration().getFloat("hbase.regions.slop",
|
||||||
|
(float)0.1);
|
||||||
|
|
||||||
// The root region
|
// The root region
|
||||||
rootScannerThread = new RootScanner(master, this);
|
rootScannerThread = new RootScanner(master, this);
|
||||||
|
|
||||||
@ -158,6 +160,17 @@ class RegionManager implements HConstants {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void unsetRootRegion() {
|
||||||
|
rootRegionLocation.set(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
void reassignRootRegion() {
|
||||||
|
unsetRootRegion();
|
||||||
|
if (!master.shutdownRequested) {
|
||||||
|
unassignedRegions.put(HRegionInfo.ROOT_REGIONINFO, ZERO_L);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Assigns regions to region servers attempting to balance the load across
|
* Assigns regions to region servers attempting to balance the load across
|
||||||
* all region servers
|
* all region servers
|
||||||
@ -178,16 +191,24 @@ class RegionManager implements HConstants {
|
|||||||
// worked on elsewhere.
|
// worked on elsewhere.
|
||||||
Set<HRegionInfo> regionsToAssign = regionsAwaitingAssignment();
|
Set<HRegionInfo> regionsToAssign = regionsAwaitingAssignment();
|
||||||
if (regionsToAssign.size() == 0) {
|
if (regionsToAssign.size() == 0) {
|
||||||
// There are no regions waiting to be assigned. This is an opportunity
|
// There are no regions waiting to be assigned.
|
||||||
// for us to check if this server is overloaded.
|
if (allRegionsAssigned()) {
|
||||||
double avgLoad = master.serverManager.getAverageLoad();
|
// We only do load balancing once all regions are assigned.
|
||||||
if (avgLoad > 2.0 && thisServersLoad.getNumberOfRegions() > avgLoad) {
|
// This prevents churn while the cluster is starting up.
|
||||||
if (LOG.isDebugEnabled()) {
|
double avgLoad = master.serverManager.getAverageLoad();
|
||||||
LOG.debug("Server " + serverName + " is overloaded. Server load: " +
|
double avgLoadWithSlop = avgLoad +
|
||||||
thisServersLoad.getNumberOfRegions() + " avg: " + avgLoad);
|
((this.slop != 0)? avgLoad * this.slop: avgLoad);
|
||||||
|
if (avgLoad > 2.0 &&
|
||||||
|
thisServersLoad.getNumberOfRegions() > avgLoadWithSlop) {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Server " + serverName +
|
||||||
|
" is overloaded. Server load: " +
|
||||||
|
thisServersLoad.getNumberOfRegions() + " avg: " + avgLoad +
|
||||||
|
", slop: " + this.slop);
|
||||||
|
}
|
||||||
|
unassignSomeRegions(thisServersLoad, avgLoad, mostLoadedRegions,
|
||||||
|
returnMsgs);
|
||||||
}
|
}
|
||||||
unassignSomeRegions(thisServersLoad, avgLoad, mostLoadedRegions,
|
|
||||||
returnMsgs);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// if there's only one server, just give it all the regions
|
// if there's only one server, just give it all the regions
|
||||||
@ -483,10 +504,16 @@ class RegionManager implements HConstants {
|
|||||||
* Block until meta regions are online or we're shutting down.
|
* Block until meta regions are online or we're shutting down.
|
||||||
* @return true if we found meta regions, false if we're closing.
|
* @return true if we found meta regions, false if we're closing.
|
||||||
*/
|
*/
|
||||||
public boolean waitForMetaRegionsOrClose() {
|
public boolean areAllMetaRegionsOnline() {
|
||||||
return metaScannerThread.waitForMetaRegionsOrClose();
|
boolean result = false;
|
||||||
|
if (rootRegionLocation.get() != null &&
|
||||||
|
numberOfMetaRegions.get() == onlineMetaRegions.size()) {
|
||||||
|
result = true;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Search our map of online meta regions to find the first meta region that
|
* Search our map of online meta regions to find the first meta region that
|
||||||
* should contain a pointer to <i>newRegion</i>.
|
* should contain a pointer to <i>newRegion</i>.
|
||||||
@ -513,16 +540,25 @@ class RegionManager implements HConstants {
|
|||||||
* Get a set of all the meta regions that contain info about a given table.
|
* Get a set of all the meta regions that contain info about a given table.
|
||||||
* @param tableName Table you need to know all the meta regions for
|
* @param tableName Table you need to know all the meta regions for
|
||||||
* @return set of MetaRegion objects that contain the table
|
* @return set of MetaRegion objects that contain the table
|
||||||
|
* @throws NotAllMetaRegionsOnlineException
|
||||||
*/
|
*/
|
||||||
public Set<MetaRegion> getMetaRegionsForTable(byte [] tableName) {
|
public Set<MetaRegion> getMetaRegionsForTable(byte [] tableName)
|
||||||
|
throws NotAllMetaRegionsOnlineException {
|
||||||
byte [] firstMetaRegion = null;
|
byte [] firstMetaRegion = null;
|
||||||
Set<MetaRegion> metaRegions = new HashSet<MetaRegion>();
|
Set<MetaRegion> metaRegions = new HashSet<MetaRegion>();
|
||||||
|
|
||||||
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
|
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
|
||||||
|
if (rootRegionLocation.get() == null) {
|
||||||
|
throw new NotAllMetaRegionsOnlineException(
|
||||||
|
Bytes.toString(HConstants.ROOT_TABLE_NAME));
|
||||||
|
}
|
||||||
metaRegions.add(new MetaRegion(rootRegionLocation.get(),
|
metaRegions.add(new MetaRegion(rootRegionLocation.get(),
|
||||||
HRegionInfo.ROOT_REGIONINFO.getRegionName()));
|
HRegionInfo.ROOT_REGIONINFO.getRegionName()));
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
if (!areAllMetaRegionsOnline()) {
|
||||||
|
throw new NotAllMetaRegionsOnlineException();
|
||||||
|
}
|
||||||
synchronized (onlineMetaRegions) {
|
synchronized (onlineMetaRegions) {
|
||||||
if (onlineMetaRegions.size() == 1) {
|
if (onlineMetaRegions.size() == 1) {
|
||||||
firstMetaRegion = onlineMetaRegions.firstKey();
|
firstMetaRegion = onlineMetaRegions.firstKey();
|
||||||
@ -582,9 +618,9 @@ class RegionManager implements HConstants {
|
|||||||
* @return list of MetaRegion objects
|
* @return list of MetaRegion objects
|
||||||
*/
|
*/
|
||||||
public List<MetaRegion> getListOfOnlineMetaRegions() {
|
public List<MetaRegion> getListOfOnlineMetaRegions() {
|
||||||
List<MetaRegion> regions = new ArrayList<MetaRegion>();
|
List<MetaRegion> regions = null;
|
||||||
synchronized(onlineMetaRegions) {
|
synchronized(onlineMetaRegions) {
|
||||||
regions.addAll(onlineMetaRegions.values());
|
regions = new ArrayList<MetaRegion>(onlineMetaRegions.values());
|
||||||
}
|
}
|
||||||
return regions;
|
return regions;
|
||||||
}
|
}
|
||||||
@ -795,9 +831,8 @@ class RegionManager implements HConstants {
|
|||||||
/**
|
/**
|
||||||
* Add a meta region to the scan queue
|
* Add a meta region to the scan queue
|
||||||
* @param m MetaRegion that needs to get scanned
|
* @param m MetaRegion that needs to get scanned
|
||||||
* @throws InterruptedException
|
|
||||||
*/
|
*/
|
||||||
public void addMetaRegionToScan(MetaRegion m) throws InterruptedException {
|
public void addMetaRegionToScan(MetaRegion m) {
|
||||||
metaScannerThread.addMetaRegionToScan(m);
|
metaScannerThread.addMetaRegionToScan(m);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -842,6 +877,15 @@ class RegionManager implements HConstants {
|
|||||||
return metaScannerThread.isInitialScanComplete();
|
return metaScannerThread.isInitialScanComplete();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return true if the initial meta scan is complete and there are no
|
||||||
|
* unassigned or pending regions
|
||||||
|
*/
|
||||||
|
public boolean allRegionsAssigned() {
|
||||||
|
return isInitialMetaScanComplete() && unassignedRegions.size() == 0 &&
|
||||||
|
pendingRegions.size() == 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the root region location.
|
* Get the root region location.
|
||||||
* @return HServerAddress describing root region server.
|
* @return HServerAddress describing root region server.
|
||||||
|
@ -69,12 +69,6 @@ abstract class RegionServerOperation implements Delayed, HConstants {
|
|||||||
|
|
||||||
protected boolean metaTableAvailable() {
|
protected boolean metaTableAvailable() {
|
||||||
boolean available = true;
|
boolean available = true;
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("numberOfMetaRegions: " +
|
|
||||||
master.regionManager.numMetaRegions() +
|
|
||||||
", onlineMetaRegions.size(): " +
|
|
||||||
master.regionManager.numOnlineMetaRegions());
|
|
||||||
}
|
|
||||||
if (master.regionManager.numMetaRegions() !=
|
if (master.regionManager.numMetaRegions() !=
|
||||||
master.regionManager.numOnlineMetaRegions()) {
|
master.regionManager.numOnlineMetaRegions()) {
|
||||||
// We can't proceed because not all of the meta regions are online.
|
// We can't proceed because not all of the meta regions are online.
|
||||||
@ -83,6 +77,10 @@ abstract class RegionServerOperation implements Delayed, HConstants {
|
|||||||
// in the run queue, put this request on the delay queue to give
|
// in the run queue, put this request on the delay queue to give
|
||||||
// other threads the opportunity to get the meta regions on-line.
|
// other threads the opportunity to get the meta regions on-line.
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("numberOfMetaRegions: " +
|
||||||
|
master.regionManager.numMetaRegions() +
|
||||||
|
", onlineMetaRegions.size(): " +
|
||||||
|
master.regionManager.numOnlineMetaRegions());
|
||||||
LOG.debug("Requeuing because not all meta regions are online");
|
LOG.debug("Requeuing because not all meta regions are online");
|
||||||
}
|
}
|
||||||
available = false;
|
available = false;
|
||||||
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
|
import org.apache.hadoop.hbase.util.Sleeper;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Uses Callable pattern so that operations against meta regions do not need
|
* Uses Callable pattern so that operations against meta regions do not need
|
||||||
@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
|||||||
*/
|
*/
|
||||||
abstract class RetryableMetaOperation<T> implements Callable<T> {
|
abstract class RetryableMetaOperation<T> implements Callable<T> {
|
||||||
protected final Log LOG = LogFactory.getLog(this.getClass());
|
protected final Log LOG = LogFactory.getLog(this.getClass());
|
||||||
|
protected final Sleeper sleeper;
|
||||||
protected final MetaRegion m;
|
protected final MetaRegion m;
|
||||||
protected final HMaster master;
|
protected final HMaster master;
|
||||||
|
|
||||||
@ -47,6 +49,7 @@ abstract class RetryableMetaOperation<T> implements Callable<T> {
|
|||||||
protected RetryableMetaOperation(MetaRegion m, HMaster master) {
|
protected RetryableMetaOperation(MetaRegion m, HMaster master) {
|
||||||
this.m = m;
|
this.m = m;
|
||||||
this.master = master;
|
this.master = master;
|
||||||
|
this.sleeper = new Sleeper(master.threadWakeFrequency, master.closed);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected T doWithRetries()
|
protected T doWithRetries()
|
||||||
@ -89,7 +92,7 @@ abstract class RetryableMetaOperation<T> implements Callable<T> {
|
|||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
}
|
}
|
||||||
master.sleeper.sleep();
|
sleeper.sleep();
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -124,10 +124,17 @@ class ServerManager implements HConstants {
|
|||||||
// The startup message was from a known server with the same name.
|
// The startup message was from a known server with the same name.
|
||||||
// Timeout the old one right away.
|
// Timeout the old one right away.
|
||||||
HServerAddress root = master.getRootRegionLocation();
|
HServerAddress root = master.getRootRegionLocation();
|
||||||
|
boolean rootServer = false;
|
||||||
if (root != null && root.equals(storedInfo.getServerAddress())) {
|
if (root != null && root.equals(storedInfo.getServerAddress())) {
|
||||||
master.regionManager.unassignRootRegion();
|
master.regionManager.unsetRootRegion();
|
||||||
|
rootServer = true;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
master.toDoQueue.put(
|
||||||
|
new ProcessServerShutdown(master, storedInfo, rootServer));
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
LOG.error("Insertion into toDoQueue was interrupted", e);
|
||||||
}
|
}
|
||||||
master.delayedToDoQueue.put(new ProcessServerShutdown(master, storedInfo));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// record new server
|
// record new server
|
||||||
@ -285,16 +292,18 @@ class ServerManager implements HConstants {
|
|||||||
serversToServerInfo.put(serverName, serverInfo);
|
serversToServerInfo.put(serverName, serverInfo);
|
||||||
|
|
||||||
HServerLoad load = serversToLoad.get(serverName);
|
HServerLoad load = serversToLoad.get(serverName);
|
||||||
if (load != null && !load.equals(serverInfo.getLoad())) {
|
if (load != null) {
|
||||||
// We have previous information about the load on this server
|
if (!load.equals(serverInfo.getLoad())) {
|
||||||
// and the load on this server has changed
|
// We have previous information about the load on this server
|
||||||
synchronized (loadToServers) {
|
// and the load on this server has changed
|
||||||
Set<String> servers = loadToServers.get(load);
|
synchronized (loadToServers) {
|
||||||
|
Set<String> servers = loadToServers.get(load);
|
||||||
|
|
||||||
// Note that servers should never be null because loadToServers
|
// Note that servers should never be null because loadToServers
|
||||||
// and serversToLoad are manipulated in pairs
|
// and serversToLoad are manipulated in pairs
|
||||||
servers.remove(serverName);
|
servers.remove(serverName);
|
||||||
loadToServers.put(load, servers);
|
loadToServers.put(load, servers);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -499,19 +508,17 @@ class ServerManager implements HConstants {
|
|||||||
// the ProcessRegionClose going on asynchronously.
|
// the ProcessRegionClose going on asynchronously.
|
||||||
master.regionManager.noLongerUnassigned(region);
|
master.regionManager.noLongerUnassigned(region);
|
||||||
|
|
||||||
if (!reassignRegion) {
|
// NOTE: we cannot put the region into unassignedRegions as that
|
||||||
// either the region is being offlined or deleted. we want to do those
|
// changes the ordering of the messages we've received. In
|
||||||
// operations asynchronously, so we'll creating a todo item for that.
|
// this case, a close could be processed before an open
|
||||||
try {
|
// resulting in the master not agreeing on the region's
|
||||||
master.toDoQueue.put(new ProcessRegionClose(master, region,
|
// state.
|
||||||
offlineRegion));
|
try {
|
||||||
} catch (InterruptedException e) {
|
master.toDoQueue.put(new ProcessRegionClose(master, region,
|
||||||
throw new RuntimeException(
|
offlineRegion, reassignRegion));
|
||||||
"Putting into toDoQueue was interrupted.", e);
|
} catch (InterruptedException e) {
|
||||||
}
|
throw new RuntimeException(
|
||||||
} else {
|
"Putting into toDoQueue was interrupted.", e);
|
||||||
// we are reassigning the region eventually, so set it unassigned
|
|
||||||
master.regionManager.setUnassigned(region);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -670,10 +677,12 @@ class ServerManager implements HConstants {
|
|||||||
LOG.info(server + " lease expired");
|
LOG.info(server + " lease expired");
|
||||||
// Remove the server from the known servers list and update load info
|
// Remove the server from the known servers list and update load info
|
||||||
HServerInfo info = serversToServerInfo.remove(server);
|
HServerInfo info = serversToServerInfo.remove(server);
|
||||||
|
boolean rootServer = false;
|
||||||
if (info != null) {
|
if (info != null) {
|
||||||
HServerAddress root = master.getRootRegionLocation();
|
HServerAddress root = master.getRootRegionLocation();
|
||||||
if (root != null && root.equals(info.getServerAddress())) {
|
if (root != null && root.equals(info.getServerAddress())) {
|
||||||
master.regionManager.unassignRootRegion();
|
master.regionManager.unsetRootRegion();
|
||||||
|
rootServer = true;
|
||||||
}
|
}
|
||||||
String serverName = info.getServerAddress().toString();
|
String serverName = info.getServerAddress().toString();
|
||||||
HServerLoad load = serversToLoad.remove(serverName);
|
HServerLoad load = serversToLoad.remove(serverName);
|
||||||
@ -687,17 +696,16 @@ class ServerManager implements HConstants {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
deadServers.add(server);
|
deadServers.add(server);
|
||||||
|
try {
|
||||||
|
master.toDoQueue.put(
|
||||||
|
new ProcessServerShutdown(master, info, rootServer));
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
LOG.error("insert into toDoQueue was interrupted", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
synchronized (serversToServerInfo) {
|
synchronized (serversToServerInfo) {
|
||||||
serversToServerInfo.notifyAll();
|
serversToServerInfo.notifyAll();
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: If the server was serving the root region, we cannot reassign it
|
|
||||||
// here because the new server will start serving the root region before
|
|
||||||
// the ProcessServerShutdown operation has a chance to split the log file.
|
|
||||||
if (info != null) {
|
|
||||||
master.delayedToDoQueue.put(new ProcessServerShutdown(master, info));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ public class BeforeThisStoreKey extends HStoreKey {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int compareTo(Object o) {
|
public int compareTo(final HStoreKey o) {
|
||||||
int result = this.beforeThisKey.compareTo(o);
|
int result = this.beforeThisKey.compareTo(o);
|
||||||
return result == 0? -1: result;
|
return result == 0? -1: result;
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,38 @@
|
|||||||
|
/**
|
||||||
|
* Copyright 2008 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when we fail close of the write-ahead-log file.
|
||||||
|
* Package private. Only used inside this package.
|
||||||
|
*/
|
||||||
|
class FailedLogCloseException extends IOException {
|
||||||
|
private static final long serialVersionUID = 1759152841462990925L;
|
||||||
|
|
||||||
|
public FailedLogCloseException() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
public FailedLogCloseException(String arg0) {
|
||||||
|
super(arg0);
|
||||||
|
}
|
||||||
|
}
|
@ -178,7 +178,7 @@ class Flusher extends Thread implements FlushRequester {
|
|||||||
// is required. Currently the only way to do this is a restart of
|
// is required. Currently the only way to do this is a restart of
|
||||||
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
|
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
|
||||||
// where hdfs was bad but passed the hdfs check).
|
// where hdfs was bad but passed the hdfs check).
|
||||||
LOG.fatal("Replay of hlog required. Forcing server restart", ex);
|
LOG.fatal("Replay of hlog required. Forcing server shutdown", ex);
|
||||||
server.abort();
|
server.abort();
|
||||||
return false;
|
return false;
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
|
@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.regionserver;
|
|||||||
import java.io.EOFException;
|
import java.io.EOFException;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.UnsupportedEncodingException;
|
||||||
|
import java.net.URLEncoder;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.SortedMap;
|
import java.util.SortedMap;
|
||||||
@ -39,6 +41,7 @@ import org.apache.hadoop.fs.Path;
|
|||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.HServerInfo;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
@ -168,7 +171,7 @@ public class HLog implements HConstants {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Accessor for tests.
|
* Accessor for tests. Not part of public API. Hence no javadoc.
|
||||||
* @return Current state of the monotonically increasing file id.
|
* @return Current state of the monotonically increasing file id.
|
||||||
*/
|
*/
|
||||||
public long getFilenum() {
|
public long getFilenum() {
|
||||||
@ -226,9 +229,10 @@ public class HLog implements HConstants {
|
|||||||
* cacheFlushLock and then completeCacheFlush could be called which would wait
|
* cacheFlushLock and then completeCacheFlush could be called which would wait
|
||||||
* for the lock on this and consequently never release the cacheFlushLock
|
* for the lock on this and consequently never release the cacheFlushLock
|
||||||
*
|
*
|
||||||
|
* @throws FailedLogCloseException
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void rollWriter() throws IOException {
|
public void rollWriter() throws FailedLogCloseException, IOException {
|
||||||
this.cacheFlushLock.lock();
|
this.cacheFlushLock.lock();
|
||||||
try {
|
try {
|
||||||
if (closed) {
|
if (closed) {
|
||||||
@ -237,7 +241,17 @@ public class HLog implements HConstants {
|
|||||||
synchronized (updateLock) {
|
synchronized (updateLock) {
|
||||||
if (this.writer != null) {
|
if (this.writer != null) {
|
||||||
// Close the current writer, get a new one.
|
// Close the current writer, get a new one.
|
||||||
this.writer.close();
|
try {
|
||||||
|
this.writer.close();
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Failed close of log file. Means we're losing edits. For now,
|
||||||
|
// shut ourselves down to minimize loss. Alternative is to try and
|
||||||
|
// keep going. See HBASE-930.
|
||||||
|
FailedLogCloseException flce =
|
||||||
|
new FailedLogCloseException("#" + this.filenum);
|
||||||
|
flce.initCause(e);
|
||||||
|
throw flce;
|
||||||
|
}
|
||||||
Path p = computeFilename(old_filenum);
|
Path p = computeFilename(old_filenum);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Closing current log writer " + FSUtils.getPath(p));
|
LOG.debug("Closing current log writer " + FSUtils.getPath(p));
|
||||||
@ -608,85 +622,90 @@ public class HLog implements HConstants {
|
|||||||
LOG.debug("Splitting " + i + " of " + logfiles.length + ": " +
|
LOG.debug("Splitting " + i + " of " + logfiles.length + ": " +
|
||||||
logfiles[i].getPath());
|
logfiles[i].getPath());
|
||||||
}
|
}
|
||||||
// Check for empty file.
|
// Check for possibly empty file. With appends, currently Hadoop reports
|
||||||
if (logfiles[i].getLen() <= 0) {
|
// a zero length even if the file has been sync'd. Revisit if
|
||||||
LOG.info("Skipping " + logfiles[i].toString() +
|
// HADOOP-4751 is committed.
|
||||||
" because zero length");
|
boolean possiblyEmpty = logfiles[i].getLen() <= 0;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
HLogKey key = new HLogKey();
|
HLogKey key = new HLogKey();
|
||||||
HLogEdit val = new HLogEdit();
|
HLogEdit val = new HLogEdit();
|
||||||
SequenceFile.Reader in =
|
|
||||||
new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
|
|
||||||
try {
|
try {
|
||||||
int count = 0;
|
SequenceFile.Reader in =
|
||||||
for (; in.next(key, val); count++) {
|
new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
|
||||||
byte [] tableName = key.getTablename();
|
try {
|
||||||
byte [] regionName = key.getRegionName();
|
int count = 0;
|
||||||
SequenceFile.Writer w = logWriters.get(regionName);
|
for (; in.next(key, val); count++) {
|
||||||
if (w == null) {
|
byte [] tableName = key.getTablename();
|
||||||
Path logfile = new Path(
|
byte [] regionName = key.getRegionName();
|
||||||
HRegion.getRegionDir(
|
SequenceFile.Writer w = logWriters.get(regionName);
|
||||||
HTableDescriptor.getTableDir(rootDir, tableName),
|
if (w == null) {
|
||||||
HRegionInfo.encodeRegionName(regionName)),
|
Path logfile = new Path(
|
||||||
HREGION_OLDLOGFILE_NAME);
|
HRegion.getRegionDir(
|
||||||
Path oldlogfile = null;
|
HTableDescriptor.getTableDir(rootDir, tableName),
|
||||||
SequenceFile.Reader old = null;
|
HRegionInfo.encodeRegionName(regionName)),
|
||||||
if (fs.exists(logfile)) {
|
HREGION_OLDLOGFILE_NAME);
|
||||||
LOG.warn("Old log file " + logfile +
|
Path oldlogfile = null;
|
||||||
" already exists. Copying existing file to new file");
|
SequenceFile.Reader old = null;
|
||||||
oldlogfile = new Path(logfile.toString() + ".old");
|
if (fs.exists(logfile)) {
|
||||||
fs.rename(logfile, oldlogfile);
|
LOG.warn("Old log file " + logfile +
|
||||||
old = new SequenceFile.Reader(fs, oldlogfile, conf);
|
" already exists. Copying existing file to new file");
|
||||||
}
|
oldlogfile = new Path(logfile.toString() + ".old");
|
||||||
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
|
fs.rename(logfile, oldlogfile);
|
||||||
HLogEdit.class, getCompressionType(conf));
|
old = new SequenceFile.Reader(fs, oldlogfile, conf);
|
||||||
// Use copy of regionName; regionName object is reused inside in
|
}
|
||||||
// HStoreKey.getRegionName so its content changes as we iterate.
|
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
|
||||||
logWriters.put(regionName, w);
|
HLogEdit.class, getCompressionType(conf));
|
||||||
if (LOG.isDebugEnabled()) {
|
// Use copy of regionName; regionName object is reused inside in
|
||||||
LOG.debug("Creating new log file writer for path " + logfile +
|
// HStoreKey.getRegionName so its content changes as we iterate.
|
||||||
" and region " + regionName);
|
logWriters.put(regionName, w);
|
||||||
}
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Creating new log file writer for path " + logfile +
|
||||||
if (old != null) {
|
" and region " + regionName);
|
||||||
// Copy from existing log file
|
}
|
||||||
HLogKey oldkey = new HLogKey();
|
|
||||||
HLogEdit oldval = new HLogEdit();
|
if (old != null) {
|
||||||
for (; old.next(oldkey, oldval); count++) {
|
// Copy from existing log file
|
||||||
if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
|
HLogKey oldkey = new HLogKey();
|
||||||
LOG.debug("Copied " + count + " edits");
|
HLogEdit oldval = new HLogEdit();
|
||||||
}
|
for (; old.next(oldkey, oldval); count++) {
|
||||||
w.append(oldkey, oldval);
|
if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
|
||||||
|
LOG.debug("Copied " + count + " edits");
|
||||||
|
}
|
||||||
|
w.append(oldkey, oldval);
|
||||||
|
}
|
||||||
|
old.close();
|
||||||
|
fs.delete(oldlogfile, true);
|
||||||
}
|
}
|
||||||
old.close();
|
|
||||||
fs.delete(oldlogfile, true);
|
|
||||||
}
|
}
|
||||||
|
w.append(key, val);
|
||||||
}
|
}
|
||||||
w.append(key, val);
|
if (LOG.isDebugEnabled()) {
|
||||||
}
|
LOG.debug("Applied " + count + " total edits from " +
|
||||||
if (LOG.isDebugEnabled()) {
|
logfiles[i].getPath().toString());
|
||||||
LOG.debug("Applied " + count + " total edits from " +
|
}
|
||||||
logfiles[i].getPath().toString());
|
} catch (IOException e) {
|
||||||
|
e = RemoteExceptionHandler.checkIOException(e);
|
||||||
|
if (!(e instanceof EOFException)) {
|
||||||
|
LOG.warn("Exception processing " + logfiles[i].getPath() +
|
||||||
|
" -- continuing. Possible DATA LOSS!", e);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
try {
|
||||||
|
in.close();
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.warn("Close in finally threw exception -- continuing", e);
|
||||||
|
}
|
||||||
|
// Delete the input file now so we do not replay edits. We could
|
||||||
|
// have gotten here because of an exception. If so, probably
|
||||||
|
// nothing we can do about it. Replaying it, it could work but we
|
||||||
|
// could be stuck replaying for ever. Just continue though we
|
||||||
|
// could have lost some edits.
|
||||||
|
fs.delete(logfiles[i].getPath(), true);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e = RemoteExceptionHandler.checkIOException(e);
|
if (possiblyEmpty) {
|
||||||
if (!(e instanceof EOFException)) {
|
continue;
|
||||||
LOG.warn("Exception processing " + logfiles[i].getPath() +
|
|
||||||
" -- continuing. Possible DATA LOSS!", e);
|
|
||||||
}
|
}
|
||||||
} finally {
|
throw e;
|
||||||
try {
|
|
||||||
in.close();
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.warn("Close in finally threw exception -- continuing", e);
|
|
||||||
}
|
|
||||||
// Delete the input file now so we do not replay edits. We could
|
|
||||||
// have gotten here because of an exception. If so, probably
|
|
||||||
// nothing we can do about it. Replaying it, it could work but we
|
|
||||||
// could be stuck replaying for ever. Just continue though we
|
|
||||||
// could have lost some edits.
|
|
||||||
fs.delete(logfiles[i].getPath(), true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
@ -705,6 +724,28 @@ public class HLog implements HConstants {
|
|||||||
}
|
}
|
||||||
LOG.info("log file splitting completed for " + srcDir.toString());
|
LOG.info("log file splitting completed for " + srcDir.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct the HLog directory name
|
||||||
|
*
|
||||||
|
* @param info HServerInfo for server
|
||||||
|
* @return the HLog directory name
|
||||||
|
*/
|
||||||
|
public static String getHLogDirectoryName(HServerInfo info) {
|
||||||
|
StringBuilder dirName = new StringBuilder("log_");
|
||||||
|
try {
|
||||||
|
dirName.append(URLEncoder.encode(
|
||||||
|
info.getServerAddress().getBindAddress(), UTF8_ENCODING));
|
||||||
|
} catch (UnsupportedEncodingException e) {
|
||||||
|
LOG.error("Error encoding '" + info.getServerAddress().getBindAddress()
|
||||||
|
+ "'", e);
|
||||||
|
}
|
||||||
|
dirName.append("_");
|
||||||
|
dirName.append(info.getStartCode());
|
||||||
|
dirName.append("_");
|
||||||
|
dirName.append(info.getServerAddress().getPort());
|
||||||
|
return dirName.toString();
|
||||||
|
}
|
||||||
|
|
||||||
private static void usage() {
|
private static void usage() {
|
||||||
System.err.println("Usage: java org.apache.hbase.HLog" +
|
System.err.println("Usage: java org.apache.hbase.HLog" +
|
||||||
|
@ -326,7 +326,8 @@ public class HRegion implements HConstants {
|
|||||||
private final Map<Integer, TreeMap<HStoreKey, byte []>> targetColumns =
|
private final Map<Integer, TreeMap<HStoreKey, byte []>> targetColumns =
|
||||||
new ConcurrentHashMap<Integer, TreeMap<HStoreKey, byte []>>();
|
new ConcurrentHashMap<Integer, TreeMap<HStoreKey, byte []>>();
|
||||||
// Default access because read by tests.
|
// Default access because read by tests.
|
||||||
protected final Map<Integer, HStore> stores = new ConcurrentHashMap<Integer, HStore>();
|
protected final Map<Integer, HStore> stores =
|
||||||
|
new ConcurrentHashMap<Integer, HStore>();
|
||||||
final AtomicLong memcacheSize = new AtomicLong(0);
|
final AtomicLong memcacheSize = new AtomicLong(0);
|
||||||
|
|
||||||
final Path basedir;
|
final Path basedir;
|
||||||
@ -894,7 +895,8 @@ public class HRegion implements HConstants {
|
|||||||
return midKey;
|
return midKey;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG.info("starting compaction on region " + this);
|
LOG.info("starting " + (majorCompaction? "major" : "") +
|
||||||
|
" compaction on region " + this);
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
doRegionCompactionPrep();
|
doRegionCompactionPrep();
|
||||||
long maxSize = -1;
|
long maxSize = -1;
|
||||||
@ -1236,40 +1238,47 @@ public class HRegion implements HConstants {
|
|||||||
* @return map of values
|
* @return map of values
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public RowResult getClosestRowBefore(final byte [] row)
|
RowResult getClosestRowBefore(final byte [] row) throws IOException{
|
||||||
throws IOException{
|
return getClosestRowBefore(row, HConstants.COLUMN_FAMILY);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return all the data for the row that matches <i>row</i> exactly,
|
||||||
|
* or the one that immediately preceeds it, at or immediately before
|
||||||
|
* <i>ts</i>.
|
||||||
|
*
|
||||||
|
* @param row row key
|
||||||
|
* @param columnFamily
|
||||||
|
* @return map of values
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public RowResult getClosestRowBefore(final byte [] row,
|
||||||
|
final byte [] columnFamily) throws IOException{
|
||||||
// look across all the HStores for this region and determine what the
|
// look across all the HStores for this region and determine what the
|
||||||
// closest key is across all column families, since the data may be sparse
|
// closest key is across all column families, since the data may be sparse
|
||||||
HStoreKey key = null;
|
HStoreKey key = null;
|
||||||
checkRow(row);
|
checkRow(row);
|
||||||
splitsAndClosesLock.readLock().lock();
|
splitsAndClosesLock.readLock().lock();
|
||||||
try {
|
try {
|
||||||
// examine each column family for the preceeding or matching key
|
HStore store = getStore(columnFamily);
|
||||||
for (HStore store : stores.values()) {
|
// get the closest key
|
||||||
// get the closest key
|
byte [] closestKey = store.getRowKeyAtOrBefore(row);
|
||||||
byte [] closestKey = store.getRowKeyAtOrBefore(row);
|
// If it happens to be an exact match, we can stop looping.
|
||||||
// if it happens to be an exact match, we can stop looping
|
// Otherwise, we need to check if it's the max and move to the next
|
||||||
if (HStoreKey.equalsTwoRowKeys(regionInfo,row, closestKey)) {
|
if (HStoreKey.equalsTwoRowKeys(regionInfo, row, closestKey)) {
|
||||||
key = new HStoreKey(closestKey, this.regionInfo);
|
key = new HStoreKey(closestKey, this.regionInfo);
|
||||||
break;
|
} else if (closestKey != null &&
|
||||||
}
|
(key == null || HStoreKey.compareTwoRowKeys(
|
||||||
// otherwise, we need to check if it's the max and move to the next
|
|
||||||
if (closestKey != null
|
|
||||||
&& (key == null || HStoreKey.compareTwoRowKeys(
|
|
||||||
regionInfo,closestKey, key.getRow()) > 0) ) {
|
regionInfo,closestKey, key.getRow()) > 0) ) {
|
||||||
key = new HStoreKey(closestKey, this.regionInfo);
|
key = new HStoreKey(closestKey, this.regionInfo);
|
||||||
}
|
} else {
|
||||||
}
|
|
||||||
if (key == null) {
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// now that we've found our key, get the values
|
// Now that we've found our key, get the values
|
||||||
HbaseMapWritable<byte [], Cell> cells =
|
HbaseMapWritable<byte [], Cell> cells =
|
||||||
new HbaseMapWritable<byte [], Cell>();
|
new HbaseMapWritable<byte [], Cell>();
|
||||||
for (HStore s: stores.values()) {
|
store.getFull(key, null, cells);
|
||||||
s.getFull(key, null, cells);
|
|
||||||
}
|
|
||||||
return new RowResult(key.getRow(), cells);
|
return new RowResult(key.getRow(), cells);
|
||||||
} finally {
|
} finally {
|
||||||
splitsAndClosesLock.readLock().unlock();
|
splitsAndClosesLock.readLock().unlock();
|
||||||
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.Thread.UncaughtExceptionHandler;
|
import java.lang.Thread.UncaughtExceptionHandler;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
|
import java.lang.reflect.Field;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
@ -68,6 +69,7 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
|
|||||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||||
import org.apache.hadoop.hbase.RegionHistorian;
|
import org.apache.hadoop.hbase.RegionHistorian;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
|
import org.apache.hadoop.hbase.ValueOverMaxLengthException;
|
||||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||||
import org.apache.hadoop.hbase.UnknownRowLockException;
|
import org.apache.hadoop.hbase.UnknownRowLockException;
|
||||||
import org.apache.hadoop.hbase.Leases.LeaseStillHeldException;
|
import org.apache.hadoop.hbase.Leases.LeaseStillHeldException;
|
||||||
@ -171,25 +173,34 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
*/
|
*/
|
||||||
class ShutdownThread extends Thread {
|
class ShutdownThread extends Thread {
|
||||||
private final HRegionServer instance;
|
private final HRegionServer instance;
|
||||||
|
private final Thread mainThread;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param instance
|
* @param instance
|
||||||
|
* @param mainThread
|
||||||
*/
|
*/
|
||||||
public ShutdownThread(HRegionServer instance) {
|
public ShutdownThread(HRegionServer instance, Thread mainThread) {
|
||||||
this.instance = instance;
|
this.instance = instance;
|
||||||
|
this.mainThread = mainThread;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
LOG.info("Starting shutdown thread.");
|
LOG.info("Starting shutdown thread.");
|
||||||
|
|
||||||
// tell the region server to stop and wait for it to complete
|
// tell the region server to stop
|
||||||
instance.stop();
|
instance.stop();
|
||||||
instance.join();
|
|
||||||
|
// Wait for main thread to exit.
|
||||||
|
instance.join(mainThread);
|
||||||
|
|
||||||
LOG.info("Shutdown thread complete");
|
LOG.info("Shutdown thread complete");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We need to call HDFS shutdown when we are done shutting down
|
||||||
|
private Thread hdfsShutdownThread;
|
||||||
|
|
||||||
// Compactions
|
// Compactions
|
||||||
final CompactSplitThread compactSplitThread;
|
final CompactSplitThread compactSplitThread;
|
||||||
|
|
||||||
@ -269,10 +280,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
for(int i = 0; i < nbBlocks; i++) {
|
for(int i = 0; i < nbBlocks; i++) {
|
||||||
reservedSpace.add(new byte[DEFAULT_SIZE_RESERVATION_BLOCK]);
|
reservedSpace.add(new byte[DEFAULT_SIZE_RESERVATION_BLOCK]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register shutdown hook for HRegionServer, runs an orderly shutdown
|
|
||||||
// when a kill signal is recieved
|
|
||||||
Runtime.getRuntime().addShutdownHook(new ShutdownThread(this));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -293,11 +300,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
long now = System.currentTimeMillis();
|
long now = System.currentTimeMillis();
|
||||||
if (lastMsg != 0 && (now - lastMsg) >= serverLeaseTimeout) {
|
if (lastMsg != 0 && (now - lastMsg) >= serverLeaseTimeout) {
|
||||||
// It has been way too long since we last reported to the master.
|
// It has been way too long since we last reported to the master.
|
||||||
// Commit suicide.
|
LOG.warn("unable to report to master for " + (now - lastMsg) +
|
||||||
LOG.fatal("unable to report to master for " + (now - lastMsg) +
|
" milliseconds - retrying");
|
||||||
" milliseconds - aborting server");
|
|
||||||
abort();
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
if ((now - lastMsg) >= msgInterval) {
|
if ((now - lastMsg) >= msgInterval) {
|
||||||
HMsg outboundArray[] = null;
|
HMsg outboundArray[] = null;
|
||||||
@ -402,12 +406,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
LOG.warn("Processing message (Retry: " + tries + ")", e);
|
LOG.warn("Processing message (Retry: " + tries + ")", e);
|
||||||
tries++;
|
tries++;
|
||||||
} else {
|
} else {
|
||||||
LOG.fatal("Exceeded max retries: " + this.numRetries, e);
|
LOG.error("Exceeded max retries: " + this.numRetries, e);
|
||||||
if (!checkFileSystem()) {
|
checkFileSystem();
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// Something seriously wrong. Shutdown.
|
|
||||||
stop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -445,8 +445,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
if (this.fsOk) {
|
if (this.fsOk) {
|
||||||
// Only try to clean up if the file system is available
|
// Only try to clean up if the file system is available
|
||||||
try {
|
try {
|
||||||
this.log.close();
|
if (this.log != null) {
|
||||||
LOG.info("On abort, closed hlog");
|
this.log.close();
|
||||||
|
LOG.info("On abort, closed hlog");
|
||||||
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Unable to close log in abort",
|
LOG.error("Unable to close log in abort",
|
||||||
RemoteExceptionHandler.checkIOException(e));
|
RemoteExceptionHandler.checkIOException(e));
|
||||||
@ -488,6 +490,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
this.hbaseMaster = null;
|
this.hbaseMaster = null;
|
||||||
}
|
}
|
||||||
join();
|
join();
|
||||||
|
|
||||||
|
LOG.info("Running hdfs shutdown thread");
|
||||||
|
hdfsShutdownThread.start();
|
||||||
|
try {
|
||||||
|
hdfsShutdownThread.join();
|
||||||
|
LOG.info("Hdfs shutdown thread completed.");
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
LOG.warn("hdfsShutdownThread.join() was interrupted", e);
|
||||||
|
}
|
||||||
LOG.info(Thread.currentThread().getName() + " exiting");
|
LOG.info(Thread.currentThread().getName() + " exiting");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -518,6 +529,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
// to defaults).
|
// to defaults).
|
||||||
this.conf.set("fs.default.name", this.conf.get("hbase.rootdir"));
|
this.conf.set("fs.default.name", this.conf.get("hbase.rootdir"));
|
||||||
this.fs = FileSystem.get(this.conf);
|
this.fs = FileSystem.get(this.conf);
|
||||||
|
|
||||||
|
// Register shutdown hook for HRegionServer, runs an orderly shutdown
|
||||||
|
// when a kill signal is recieved
|
||||||
|
Runtime.getRuntime().addShutdownHook(new ShutdownThread(this,
|
||||||
|
Thread.currentThread()));
|
||||||
|
this.hdfsShutdownThread = suppressHdfsShutdownHook();
|
||||||
|
|
||||||
this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
|
this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR));
|
||||||
this.log = setupHLog();
|
this.log = setupHLog();
|
||||||
startServiceThreads();
|
startServiceThreads();
|
||||||
@ -533,6 +551,43 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* So, HDFS caches FileSystems so when you call FileSystem.get it's fast. In
|
||||||
|
* order to make sure things are cleaned up, it also creates a shutdown hook
|
||||||
|
* so that all filesystems can be closed when the process is terminated. This
|
||||||
|
* conveniently runs concurrently with our own shutdown handler, and
|
||||||
|
* therefore causes all the filesystems to be closed before the server can do
|
||||||
|
* all its necessary cleanup.
|
||||||
|
*
|
||||||
|
* The crazy dirty reflection in this method sneaks into the FileSystem cache
|
||||||
|
* and grabs the shutdown hook, removes it from the list of active shutdown
|
||||||
|
* hooks, and hangs onto it until later. Then, after we're properly done with
|
||||||
|
* our graceful shutdown, we can execute the hdfs hook manually to make sure
|
||||||
|
* loose ends are tied up.
|
||||||
|
*
|
||||||
|
* This seems quite fragile and susceptible to breaking if Hadoop changes
|
||||||
|
* anything about the way this cleanup is managed. Keep an eye on things.
|
||||||
|
*/
|
||||||
|
private Thread suppressHdfsShutdownHook() {
|
||||||
|
try {
|
||||||
|
Field field = FileSystem.class.getDeclaredField ("clientFinalizer");
|
||||||
|
field.setAccessible(true);
|
||||||
|
Thread hdfsClientFinalizer = (Thread)field.get(null);
|
||||||
|
if (hdfsClientFinalizer == null) {
|
||||||
|
throw new RuntimeException("client finalizer is null, can't suppress!");
|
||||||
|
}
|
||||||
|
Runtime.getRuntime().removeShutdownHook(hdfsClientFinalizer);
|
||||||
|
return hdfsClientFinalizer;
|
||||||
|
|
||||||
|
} catch (NoSuchFieldException nsfe) {
|
||||||
|
LOG.fatal("Couldn't find field 'clientFinalizer' in FileSystem!", nsfe);
|
||||||
|
throw new RuntimeException("Failed to suppress HDFS shutdown hook");
|
||||||
|
} catch (IllegalAccessException iae) {
|
||||||
|
LOG.fatal("Couldn't access field 'clientFinalizer' in FileSystem!", iae);
|
||||||
|
throw new RuntimeException("Failed to suppress HDFS shutdown hook");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Report the status of the server. A server is online once all the startup
|
* Report the status of the server. A server is online once all the startup
|
||||||
* is completed (setting up filesystem, starting service threads, etc.). This
|
* is completed (setting up filesystem, starting service threads, etc.). This
|
||||||
@ -546,10 +601,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
private HLog setupHLog() throws RegionServerRunningException,
|
private HLog setupHLog() throws RegionServerRunningException,
|
||||||
IOException {
|
IOException {
|
||||||
|
|
||||||
Path logdir = new Path(rootDir, "log" + "_" +
|
Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(serverInfo));
|
||||||
serverInfo.getServerAddress().getBindAddress() + "_" +
|
|
||||||
this.serverInfo.getStartCode() + "_" +
|
|
||||||
this.serverInfo.getServerAddress().getPort());
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Log dir " + logdir);
|
LOG.debug("Log dir " + logdir);
|
||||||
}
|
}
|
||||||
@ -684,7 +736,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
join(this.logRoller);
|
join(this.logRoller);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void join(final Thread t) {
|
void join(final Thread t) {
|
||||||
while (t.isAlive()) {
|
while (t.isAlive()) {
|
||||||
try {
|
try {
|
||||||
t.join();
|
t.join();
|
||||||
@ -698,17 +750,26 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
* Let the master know we're here
|
* Let the master know we're here
|
||||||
* Run initialization using parameters passed us by the master.
|
* Run initialization using parameters passed us by the master.
|
||||||
*/
|
*/
|
||||||
private MapWritable reportForDuty(final Sleeper sleeper)
|
private MapWritable reportForDuty(final Sleeper sleeper) {
|
||||||
throws IOException {
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Telling master at " +
|
LOG.debug("Telling master at " +
|
||||||
conf.get(MASTER_ADDRESS) + " that we are up");
|
conf.get(MASTER_ADDRESS) + " that we are up");
|
||||||
}
|
}
|
||||||
// Do initial RPC setup. The final argument indicates that the RPC should retry indefinitely.
|
HMasterRegionInterface master = null;
|
||||||
this.hbaseMaster = (HMasterRegionInterface)HbaseRPC.waitForProxy(
|
while (!stopRequested.get() && master == null) {
|
||||||
HMasterRegionInterface.class, HMasterRegionInterface.versionID,
|
try {
|
||||||
new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
|
// Do initial RPC setup. The final argument indicates that the RPC
|
||||||
this.conf, -1);
|
// should retry indefinitely.
|
||||||
|
master = (HMasterRegionInterface)HbaseRPC.waitForProxy(
|
||||||
|
HMasterRegionInterface.class, HMasterRegionInterface.versionID,
|
||||||
|
new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
|
||||||
|
this.conf, -1);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.warn("Unable to connect to master. Retrying. Error was:", e);
|
||||||
|
sleeper.sleep();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.hbaseMaster = master;
|
||||||
MapWritable result = null;
|
MapWritable result = null;
|
||||||
long lastMsg = 0;
|
long lastMsg = 0;
|
||||||
while(!stopRequested.get()) {
|
while(!stopRequested.get()) {
|
||||||
@ -806,7 +867,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
if(e == null || stopRequested.get()) {
|
if(e == null || stopRequested.get()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
LOG.info(e.msg);
|
LOG.info("Worker: " + e.msg);
|
||||||
switch(e.msg.getType()) {
|
switch(e.msg.getType()) {
|
||||||
|
|
||||||
case MSG_REGIONSERVER_QUIESCE:
|
case MSG_REGIONSERVER_QUIESCE:
|
||||||
@ -1046,7 +1107,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public RowResult getClosestRowBefore(final byte [] regionName,
|
public RowResult getClosestRowBefore(final byte [] regionName,
|
||||||
final byte [] row)
|
final byte [] row, final byte [] columnFamily)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
requestCount.incrementAndGet();
|
requestCount.incrementAndGet();
|
||||||
@ -1054,7 +1115,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
// locate the region we're operating on
|
// locate the region we're operating on
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
// ask the region for all the data
|
// ask the region for all the data
|
||||||
RowResult rr = region.getClosestRowBefore(row);
|
RowResult rr = region.getClosestRowBefore(row, columnFamily);
|
||||||
return rr;
|
return rr;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
checkFileSystem();
|
checkFileSystem();
|
||||||
@ -1134,7 +1195,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||||||
if (fam != null) {
|
if (fam != null) {
|
||||||
int maxLength = fam.getMaxValueLength();
|
int maxLength = fam.getMaxValueLength();
|
||||||
if (operation.getValue().length > maxLength) {
|
if (operation.getValue().length > maxLength) {
|
||||||
throw new IOException("Value in column "
|
throw new ValueOverMaxLengthException("Value in column "
|
||||||
+ Bytes.toString(operation.getColumn()) + " is too long. "
|
+ Bytes.toString(operation.getColumn()) + " is too long. "
|
||||||
+ operation.getValue().length + " instead of " + maxLength);
|
+ operation.getValue().length + " instead of " + maxLength);
|
||||||
}
|
}
|
||||||
|
@ -600,7 +600,7 @@ public class HStore implements HConstants {
|
|||||||
this.info, family.getName(), -1L, null);
|
this.info, family.getName(), -1L, null);
|
||||||
MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression,
|
MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression,
|
||||||
this.family.isBloomfilter(), cache.size());
|
this.family.isBloomfilter(), cache.size());
|
||||||
out.setIndexInterval(family.getMapFileIndexInterval());
|
setIndexInterval(out);
|
||||||
|
|
||||||
// Here we tried picking up an existing HStoreFile from disk and
|
// Here we tried picking up an existing HStoreFile from disk and
|
||||||
// interlacing the memcache flush compacting as we go. The notion was
|
// interlacing the memcache flush compacting as we go. The notion was
|
||||||
@ -648,6 +648,27 @@ public class HStore implements HConstants {
|
|||||||
}
|
}
|
||||||
return storefiles.size() >= compactionThreshold;
|
return storefiles.size() >= compactionThreshold;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set the index interval for the mapfile. There are two sources for
|
||||||
|
* configuration information: the HCD, and the global HBase config.
|
||||||
|
* If a source returns the default value, it is ignored. Otherwise,
|
||||||
|
* the smallest non-default value is preferred.
|
||||||
|
*/
|
||||||
|
private void setIndexInterval(MapFile.Writer writer) {
|
||||||
|
int familyInterval = this.family.getMapFileIndexInterval();
|
||||||
|
int interval = this.conf.getInt("hbase.io.index.interval",
|
||||||
|
HColumnDescriptor.DEFAULT_MAPFILE_INDEX_INTERVAL);
|
||||||
|
if (familyInterval != HColumnDescriptor.DEFAULT_MAPFILE_INDEX_INTERVAL) {
|
||||||
|
if (interval != HColumnDescriptor.DEFAULT_MAPFILE_INDEX_INTERVAL) {
|
||||||
|
if (familyInterval < interval)
|
||||||
|
interval = familyInterval;
|
||||||
|
} else {
|
||||||
|
interval = familyInterval;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
writer.setIndexInterval(interval);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Change readers adding into place the Reader produced by this new flush.
|
* Change readers adding into place the Reader produced by this new flush.
|
||||||
@ -888,7 +909,7 @@ public class HStore implements HConstants {
|
|||||||
}
|
}
|
||||||
MapFile.Writer writer = compactedOutputFile.getWriter(this.fs,
|
MapFile.Writer writer = compactedOutputFile.getWriter(this.fs,
|
||||||
this.compression, this.family.isBloomfilter(), nrows);
|
this.compression, this.family.isBloomfilter(), nrows);
|
||||||
writer.setIndexInterval(family.getMapFileIndexInterval());
|
setIndexInterval(writer);
|
||||||
try {
|
try {
|
||||||
compact(writer, rdrs, majorCompaction);
|
compact(writer, rdrs, majorCompaction);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -22,8 +22,10 @@ package org.apache.hadoop.hbase.regionserver;
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.SortedMap;
|
import java.util.SortedMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
@ -146,7 +148,7 @@ class HStoreScanner implements InternalScanner {
|
|||||||
// are only keeping rows and columns that match those set on the
|
// are only keeping rows and columns that match those set on the
|
||||||
// scanner and which have delete values. If memory usage becomes a
|
// scanner and which have delete values. If memory usage becomes a
|
||||||
// problem, could redo as bloom filter.
|
// problem, could redo as bloom filter.
|
||||||
List<HStoreKey> deletes = new ArrayList<HStoreKey>();
|
Set<HStoreKey> deletes = new HashSet<HStoreKey>();
|
||||||
for (int i = 0; i < scanners.length && !filtered; i++) {
|
for (int i = 0; i < scanners.length && !filtered; i++) {
|
||||||
while ((scanners[i] != null
|
while ((scanners[i] != null
|
||||||
&& !filtered
|
&& !filtered
|
||||||
@ -166,16 +168,14 @@ class HStoreScanner implements InternalScanner {
|
|||||||
// but this had the effect of overwriting newer
|
// but this had the effect of overwriting newer
|
||||||
// values with older ones. So now we only insert
|
// values with older ones. So now we only insert
|
||||||
// a result if the map does not contain the key.
|
// a result if the map does not contain the key.
|
||||||
HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY,
|
HStoreKey hsk = new HStoreKey(key.getRow(),
|
||||||
|
HConstants.EMPTY_BYTE_ARRAY,
|
||||||
key.getTimestamp(), this.store.getHRegionInfo());
|
key.getTimestamp(), this.store.getHRegionInfo());
|
||||||
for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
|
for (Map.Entry<byte [], Cell> e : resultSets[i].entrySet()) {
|
||||||
hsk.setColumn(e.getKey());
|
hsk.setColumn(e.getKey());
|
||||||
if (HLogEdit.isDeleted(e.getValue().getValue())) {
|
if (HLogEdit.isDeleted(e.getValue().getValue())) {
|
||||||
if (!deletes.contains(hsk)) {
|
// Only first key encountered is added; deletes is a Set.
|
||||||
// Key changes as we cycle the for loop so add a copy to
|
deletes.add(new HStoreKey(hsk));
|
||||||
// the set of deletes.
|
|
||||||
deletes.add(new HStoreKey(hsk));
|
|
||||||
}
|
|
||||||
} else if (!deletes.contains(hsk) &&
|
} else if (!deletes.contains(hsk) &&
|
||||||
!filtered &&
|
!filtered &&
|
||||||
moreToFollow &&
|
moreToFollow &&
|
||||||
|
@ -77,8 +77,11 @@ class LogRoller extends Thread implements LogRollListener {
|
|||||||
try {
|
try {
|
||||||
LOG.info("Rolling hlog. Number of entries: " + server.getLog().getNumEntries());
|
LOG.info("Rolling hlog. Number of entries: " + server.getLog().getNumEntries());
|
||||||
server.getLog().rollWriter();
|
server.getLog().rollWriter();
|
||||||
|
} catch (FailedLogCloseException e) {
|
||||||
|
LOG.fatal("Forcing server shutdown", e);
|
||||||
|
server.abort();
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
LOG.error("Log rolling failed",
|
LOG.error("Log rolling failed with ioe: ",
|
||||||
RemoteExceptionHandler.checkIOException(ex));
|
RemoteExceptionHandler.checkIOException(ex));
|
||||||
server.checkFileSystem();
|
server.checkFileSystem();
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
|
@ -72,10 +72,8 @@ public class FSUtils {
|
|||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
exception = RemoteExceptionHandler.checkIOException(e);
|
exception = RemoteExceptionHandler.checkIOException(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
fs.close();
|
fs.close();
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error("file system close failed: ", e);
|
LOG.error("file system close failed: ", e);
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ public class PerformanceEvaluation implements HConstants {
|
|||||||
if (extantTables.length > 0) {
|
if (extantTables.length > 0) {
|
||||||
// Check to see if our table already exists. Print warning if it does.
|
// Check to see if our table already exists. Print warning if it does.
|
||||||
for (int i = 0; i < extantTables.length; i++) {
|
for (int i = 0; i < extantTables.length; i++) {
|
||||||
if (extantTables[0].equals(tableDescriptor)) {
|
if (extantTables[i].equals(tableDescriptor)) {
|
||||||
LOG.warn("Table " + tableDescriptor + " already exists");
|
LOG.warn("Table " + tableDescriptor + " already exists");
|
||||||
tableExists = true;
|
tableExists = true;
|
||||||
break;
|
break;
|
||||||
@ -701,4 +701,4 @@ public class PerformanceEvaluation implements HConstants {
|
|||||||
System.exit(new PerformanceEvaluation(c).
|
System.exit(new PerformanceEvaluation(c).
|
||||||
doCommandLine(args));
|
doCommandLine(args));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -59,6 +59,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void testHTable() throws IOException {
|
public void testHTable() throws IOException {
|
||||||
|
LOG.info("TEST: " + getName());
|
||||||
byte[] value = "value".getBytes(UTF8_ENCODING);
|
byte[] value = "value".getBytes(UTF8_ENCODING);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -179,6 +180,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||||||
* For HADOOP-2579
|
* For HADOOP-2579
|
||||||
*/
|
*/
|
||||||
public void testTableNotFoundExceptionWithoutAnyTables() {
|
public void testTableNotFoundExceptionWithoutAnyTables() {
|
||||||
|
LOG.info("TEST: " + getName());
|
||||||
try {
|
try {
|
||||||
new HTable(conf, "notATable");
|
new HTable(conf, "notATable");
|
||||||
fail("Should have thrown a TableNotFoundException");
|
fail("Should have thrown a TableNotFoundException");
|
||||||
@ -195,6 +197,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||||||
* For HADOOP-2579
|
* For HADOOP-2579
|
||||||
*/
|
*/
|
||||||
public void testTableNotFoundExceptionWithATable() {
|
public void testTableNotFoundExceptionWithATable() {
|
||||||
|
LOG.info("TEST: " + getName());
|
||||||
try {
|
try {
|
||||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||||
HTableDescriptor testTableADesc =
|
HTableDescriptor testTableADesc =
|
||||||
@ -216,6 +219,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void testGetRow() {
|
public void testGetRow() {
|
||||||
|
LOG.info("TEST: " + getName());
|
||||||
HTable table = null;
|
HTable table = null;
|
||||||
try {
|
try {
|
||||||
HColumnDescriptor column2 =
|
HColumnDescriptor column2 =
|
||||||
|
@ -25,17 +25,15 @@ import org.apache.commons.logging.Log;
|
|||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.JenkinsHash;
|
|
||||||
import org.apache.hadoop.io.MapFile;
|
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
|
||||||
import org.apache.hadoop.io.WritableComparable;
|
|
||||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.io.MapFile;
|
||||||
|
import org.apache.hadoop.io.SequenceFile;
|
||||||
|
import org.apache.hadoop.io.WritableComparable;
|
||||||
/**
|
/**
|
||||||
* Test HStoreFile
|
* Test HStoreFile
|
||||||
*/
|
*/
|
||||||
@ -234,7 +232,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||||||
first = false;
|
first = false;
|
||||||
LOG.info("First in bottom: " + previous);
|
LOG.info("First in bottom: " + previous);
|
||||||
}
|
}
|
||||||
assertTrue(key.compareTo(midkey) < 0);
|
assertTrue(key.compareTo((HStoreKey)midkey) < 0);
|
||||||
}
|
}
|
||||||
if (previous != null) {
|
if (previous != null) {
|
||||||
LOG.info("Last in bottom: " + previous.toString());
|
LOG.info("Last in bottom: " + previous.toString());
|
||||||
@ -244,7 +242,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||||||
HStoreFile.Range.top, midkey, null);
|
HStoreFile.Range.top, midkey, null);
|
||||||
first = true;
|
first = true;
|
||||||
while (top.next(key, value)) {
|
while (top.next(key, value)) {
|
||||||
assertTrue(key.compareTo(midkey) >= 0);
|
assertTrue(key.compareTo((HStoreKey)midkey) >= 0);
|
||||||
if (first) {
|
if (first) {
|
||||||
first = false;
|
first = false;
|
||||||
assertTrue(Bytes.equals(((HStoreKey)midkey).getRow(),
|
assertTrue(Bytes.equals(((HStoreKey)midkey).getRow(),
|
||||||
@ -255,7 +253,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||||||
LOG.info("Last in top: " + key.toString());
|
LOG.info("Last in top: " + key.toString());
|
||||||
top.getClosest(midkey, value);
|
top.getClosest(midkey, value);
|
||||||
// Assert value is same as key.
|
// Assert value is same as key.
|
||||||
assertTrue(Bytes.equals(value.get(), ((HStoreKey) midkey).getRow()));
|
assertTrue(Bytes.equals(value.get(), ((HStoreKey)midkey).getRow()));
|
||||||
|
|
||||||
// Next test using a midkey that does not exist in the file.
|
// Next test using a midkey that does not exist in the file.
|
||||||
// First, do a key that is < than first key. Ensure splits behave
|
// First, do a key that is < than first key. Ensure splits behave
|
||||||
@ -270,7 +268,7 @@ public class TestHStoreFile extends HBaseTestCase {
|
|||||||
HStoreFile.Range.top, badkey, null);
|
HStoreFile.Range.top, badkey, null);
|
||||||
first = true;
|
first = true;
|
||||||
while (top.next(key, value)) {
|
while (top.next(key, value)) {
|
||||||
assertTrue(key.compareTo(badkey) >= 0);
|
assertTrue(key.compareTo((HStoreKey)badkey) >= 0);
|
||||||
if (first) {
|
if (first) {
|
||||||
first = false;
|
first = false;
|
||||||
LOG.info("First top when key < bottom: " + key.toString());
|
LOG.info("First top when key < bottom: " + key.toString());
|
||||||
|
Loading…
x
Reference in New Issue
Block a user