HBASE-3941 'hbase version' command line should print version info
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1133209 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f1a6ee8b16
commit
7a55ec9a25
|
@ -115,6 +115,8 @@ Release 0.91.0 - Unreleased
|
||||||
HBASE-3894 Thread contention over row locks set monitor (Dave Latham)
|
HBASE-3894 Thread contention over row locks set monitor (Dave Latham)
|
||||||
HBASE-3959 hadoop-snappy version in the pom.xml is incorrect
|
HBASE-3959 hadoop-snappy version in the pom.xml is incorrect
|
||||||
(Alejandro Abdelnur)
|
(Alejandro Abdelnur)
|
||||||
|
HBASE-3941 "hbase version" command line should print version info
|
||||||
|
(Jolly Chen)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)
|
HBASE-3290 Max Compaction Size (Nicolas Spiegelberg via Stack)
|
||||||
|
|
|
@ -77,6 +77,8 @@ if [ $# = 0 ]; then
|
||||||
echo " migrate upgrade an hbase.rootdir"
|
echo " migrate upgrade an hbase.rootdir"
|
||||||
echo " hbck run the hbase 'fsck' tool"
|
echo " hbck run the hbase 'fsck' tool"
|
||||||
echo " classpath dump hbase CLASSPATH"
|
echo " classpath dump hbase CLASSPATH"
|
||||||
|
echo " version print the version"
|
||||||
|
|
||||||
echo " or"
|
echo " or"
|
||||||
echo " CLASSNAME run the class named CLASSNAME"
|
echo " CLASSNAME run the class named CLASSNAME"
|
||||||
echo "Most commands print help when invoked w/o parameters."
|
echo "Most commands print help when invoked w/o parameters."
|
||||||
|
@ -259,6 +261,8 @@ elif [ "$COMMAND" = "zkcli" ] ; then
|
||||||
elif [ "$COMMAND" = "classpath" ] ; then
|
elif [ "$COMMAND" = "classpath" ] ; then
|
||||||
echo $CLASSPATH
|
echo $CLASSPATH
|
||||||
exit 0
|
exit 0
|
||||||
|
elif [ "$COMMAND" = "version" ] ; then
|
||||||
|
CLASS='org.apache.hadoop.hbase.util.VersionInfo'
|
||||||
else
|
else
|
||||||
CLASS=$COMMAND
|
CLASS=$COMMAND
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -171,6 +171,13 @@ public final class HConstants {
|
||||||
/** Used to construct the name of the compaction directory during compaction */
|
/** Used to construct the name of the compaction directory during compaction */
|
||||||
public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
|
public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir";
|
||||||
|
|
||||||
|
/** The file name used to store HTD in HDFS */
|
||||||
|
public static final String TABLEINFO_NAME = ".tableinfo";
|
||||||
|
|
||||||
|
/** The metaupdated column qualifier */
|
||||||
|
public static final byte [] META_MIGRATION_QUALIFIER = Bytes.toBytes("metamigrated");
|
||||||
|
|
||||||
|
|
||||||
/** Default maximum file size */
|
/** Default maximum file size */
|
||||||
public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
|
public static final long DEFAULT_MAX_FILE_SIZE = 256 * 1024 * 1024;
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Arrays;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||||
|
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.JenkinsHash;
|
import org.apache.hadoop.hbase.util.JenkinsHash;
|
||||||
import org.apache.hadoop.hbase.util.MD5Hash;
|
import org.apache.hadoop.hbase.util.MD5Hash;
|
||||||
|
@ -130,11 +131,11 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
|
|
||||||
/** HRegionInfo for root region */
|
/** HRegionInfo for root region */
|
||||||
public static final HRegionInfo ROOT_REGIONINFO =
|
public static final HRegionInfo ROOT_REGIONINFO =
|
||||||
new HRegionInfo(0L, HTableDescriptor.ROOT_TABLEDESC);
|
new HRegionInfo(0L, Bytes.toBytes("-ROOT-"));
|
||||||
|
|
||||||
/** HRegionInfo for first meta region */
|
/** HRegionInfo for first meta region */
|
||||||
public static final HRegionInfo FIRST_META_REGIONINFO =
|
public static final HRegionInfo FIRST_META_REGIONINFO =
|
||||||
new HRegionInfo(1L, HTableDescriptor.META_TABLEDESC);
|
new HRegionInfo(1L, Bytes.toBytes(".META."));
|
||||||
|
|
||||||
private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY;
|
private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY;
|
||||||
// This flag is in the parent of a split while the parent is still referenced
|
// This flag is in the parent of a split while the parent is still referenced
|
||||||
|
@ -146,34 +147,37 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
private String regionNameStr = "";
|
private String regionNameStr = "";
|
||||||
private boolean split = false;
|
private boolean split = false;
|
||||||
private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY;
|
private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY;
|
||||||
protected HTableDescriptor tableDesc = null;
|
|
||||||
private int hashCode = -1;
|
private int hashCode = -1;
|
||||||
//TODO: Move NO_HASH to HStoreFile which is really the only place it is used.
|
//TODO: Move NO_HASH to HStoreFile which is really the only place it is used.
|
||||||
public static final String NO_HASH = null;
|
public static final String NO_HASH = null;
|
||||||
private volatile String encodedName = NO_HASH;
|
private volatile String encodedName = NO_HASH;
|
||||||
private byte [] encodedNameAsBytes = null;
|
private byte [] encodedNameAsBytes = null;
|
||||||
|
|
||||||
|
// Current TableName
|
||||||
|
private byte[] tableName = null;
|
||||||
|
private String tableNameAsString = null;
|
||||||
|
|
||||||
private void setHashCode() {
|
private void setHashCode() {
|
||||||
int result = Arrays.hashCode(this.regionName);
|
int result = Arrays.hashCode(this.regionName);
|
||||||
result ^= this.regionId;
|
result ^= this.regionId;
|
||||||
result ^= Arrays.hashCode(this.startKey);
|
result ^= Arrays.hashCode(this.startKey);
|
||||||
result ^= Arrays.hashCode(this.endKey);
|
result ^= Arrays.hashCode(this.endKey);
|
||||||
result ^= Boolean.valueOf(this.offLine).hashCode();
|
result ^= Boolean.valueOf(this.offLine).hashCode();
|
||||||
result ^= this.tableDesc.hashCode();
|
result ^= Arrays.hashCode(this.tableName);
|
||||||
this.hashCode = result;
|
this.hashCode = result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Private constructor used constructing HRegionInfo for the catalog root and
|
* Private constructor used constructing HRegionInfo for the catalog root and
|
||||||
* first meta regions
|
* first meta regions
|
||||||
*/
|
*/
|
||||||
private HRegionInfo(long regionId, HTableDescriptor tableDesc) {
|
private HRegionInfo(long regionId, byte[] tableName) {
|
||||||
super();
|
super();
|
||||||
this.regionId = regionId;
|
this.regionId = regionId;
|
||||||
this.tableDesc = tableDesc;
|
this.tableName = tableName.clone();
|
||||||
|
|
||||||
// Note: Root & First Meta regions names are still in old format
|
// Note: Root & First Meta regions names are still in old format
|
||||||
this.regionName = createRegionName(tableDesc.getName(), null,
|
this.regionName = createRegionName(tableName, null,
|
||||||
regionId, false);
|
regionId, false);
|
||||||
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
||||||
setHashCode();
|
setHashCode();
|
||||||
|
@ -182,43 +186,66 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
/** Default constructor - creates empty object */
|
/** Default constructor - creates empty object */
|
||||||
public HRegionInfo() {
|
public HRegionInfo() {
|
||||||
super();
|
super();
|
||||||
this.tableDesc = new HTableDescriptor();
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used only for migration
|
||||||
|
* @param other HRegionInfoForMigration
|
||||||
|
*/
|
||||||
|
public HRegionInfo(HRegionInfo090x other) {
|
||||||
|
super();
|
||||||
|
this.endKey = other.getEndKey();
|
||||||
|
this.offLine = other.isOffline();
|
||||||
|
this.regionId = other.getRegionId();
|
||||||
|
this.regionName = other.getRegionName();
|
||||||
|
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
||||||
|
this.split = other.isSplit();
|
||||||
|
this.startKey = other.getStartKey();
|
||||||
|
this.hashCode = other.hashCode();
|
||||||
|
this.encodedName = other.getEncodedName();
|
||||||
|
this.tableName = other.getTableDesc().getName();
|
||||||
|
}
|
||||||
|
|
||||||
|
public HRegionInfo(final byte[] tableName) {
|
||||||
|
this(tableName, null, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct HRegionInfo with explicit parameters
|
* Construct HRegionInfo with explicit parameters
|
||||||
*
|
*
|
||||||
* @param tableDesc the table descriptor
|
* @param tableName the table name
|
||||||
* @param startKey first key in region
|
* @param startKey first key in region
|
||||||
* @param endKey end of key range
|
* @param endKey end of key range
|
||||||
* @throws IllegalArgumentException
|
* @throws IllegalArgumentException
|
||||||
*/
|
*/
|
||||||
public HRegionInfo(final HTableDescriptor tableDesc, final byte [] startKey,
|
public HRegionInfo(final byte[] tableName, final byte[] startKey,
|
||||||
final byte [] endKey)
|
final byte[] endKey)
|
||||||
throws IllegalArgumentException {
|
throws IllegalArgumentException {
|
||||||
this(tableDesc, startKey, endKey, false);
|
this(tableName, startKey, endKey, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct HRegionInfo with explicit parameters
|
* Construct HRegionInfo with explicit parameters
|
||||||
*
|
*
|
||||||
* @param tableDesc the table descriptor
|
* @param tableName the table descriptor
|
||||||
* @param startKey first key in region
|
* @param startKey first key in region
|
||||||
* @param endKey end of key range
|
* @param endKey end of key range
|
||||||
* @param split true if this region has split and we have daughter regions
|
* @param split true if this region has split and we have daughter regions
|
||||||
* regions that may or may not hold references to this region.
|
* regions that may or may not hold references to this region.
|
||||||
* @throws IllegalArgumentException
|
* @throws IllegalArgumentException
|
||||||
*/
|
*/
|
||||||
public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
|
public HRegionInfo(final byte[] tableName, final byte[] startKey,
|
||||||
final byte [] endKey, final boolean split)
|
final byte[] endKey, final boolean split)
|
||||||
throws IllegalArgumentException {
|
throws IllegalArgumentException {
|
||||||
this(tableDesc, startKey, endKey, split, System.currentTimeMillis());
|
this(tableName, startKey, endKey, split, System.currentTimeMillis());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct HRegionInfo with explicit parameters
|
* Construct HRegionInfo with explicit parameters
|
||||||
*
|
*
|
||||||
* @param tableDesc the table descriptor
|
* @param tableName the table descriptor
|
||||||
* @param startKey first key in region
|
* @param startKey first key in region
|
||||||
* @param endKey end of key range
|
* @param endKey end of key range
|
||||||
* @param split true if this region has split and we have daughter regions
|
* @param split true if this region has split and we have daughter regions
|
||||||
|
@ -226,22 +253,26 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
* @param regionid Region id to use.
|
* @param regionid Region id to use.
|
||||||
* @throws IllegalArgumentException
|
* @throws IllegalArgumentException
|
||||||
*/
|
*/
|
||||||
public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey,
|
public HRegionInfo(final byte[] tableName, final byte[] startKey,
|
||||||
final byte [] endKey, final boolean split, final long regionid)
|
final byte[] endKey, final boolean split, final long regionid)
|
||||||
throws IllegalArgumentException {
|
throws IllegalArgumentException {
|
||||||
|
|
||||||
super();
|
super();
|
||||||
if (tableDesc == null) {
|
if (tableName == null) {
|
||||||
throw new IllegalArgumentException("tableDesc cannot be null");
|
throw new IllegalArgumentException("tableName cannot be null");
|
||||||
}
|
}
|
||||||
|
this.tableName = tableName.clone();
|
||||||
this.offLine = false;
|
this.offLine = false;
|
||||||
this.regionId = regionid;
|
this.regionId = regionid;
|
||||||
this.regionName = createRegionName(tableDesc.getName(), startKey, regionId, true);
|
|
||||||
|
this.regionName = createRegionName(this.tableName, startKey, regionId, true);
|
||||||
|
|
||||||
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
||||||
this.split = split;
|
this.split = split;
|
||||||
this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone();
|
this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone();
|
||||||
this.startKey = startKey == null?
|
this.startKey = startKey == null?
|
||||||
HConstants.EMPTY_START_ROW: startKey.clone();
|
HConstants.EMPTY_START_ROW: startKey.clone();
|
||||||
this.tableDesc = tableDesc;
|
this.tableName = tableName.clone();
|
||||||
setHashCode();
|
setHashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,11 +290,12 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
||||||
this.split = other.isSplit();
|
this.split = other.isSplit();
|
||||||
this.startKey = other.getStartKey();
|
this.startKey = other.getStartKey();
|
||||||
this.tableDesc = other.getTableDesc();
|
|
||||||
this.hashCode = other.hashCode();
|
this.hashCode = other.hashCode();
|
||||||
this.encodedName = other.getEncodedName();
|
this.encodedName = other.getEncodedName();
|
||||||
|
this.tableName = other.tableName;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Make a region name of passed parameters.
|
* Make a region name of passed parameters.
|
||||||
* @param tableName
|
* @param tableName
|
||||||
|
@ -457,6 +489,22 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
return endKey;
|
return endKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current table name of the region
|
||||||
|
* @return byte array of table name
|
||||||
|
*/
|
||||||
|
public byte[] getTableName() {
|
||||||
|
return tableName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current table name as string
|
||||||
|
* @return string representation of current table
|
||||||
|
*/
|
||||||
|
public String getTableNameAsString() {
|
||||||
|
return Bytes.toString(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if the given inclusive range of rows is fully contained
|
* Returns true if the given inclusive range of rows is fully contained
|
||||||
* by this region. For example, if the region is foo,a,g and this is
|
* by this region. For example, if the region is foo,a,g and this is
|
||||||
|
@ -488,32 +536,34 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the tableDesc */
|
/** @return the tableDesc */
|
||||||
|
@Deprecated
|
||||||
public HTableDescriptor getTableDesc(){
|
public HTableDescriptor getTableDesc(){
|
||||||
return tableDesc;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param newDesc new table descriptor to use
|
* @param newDesc new table descriptor to use
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
public void setTableDesc(HTableDescriptor newDesc) {
|
public void setTableDesc(HTableDescriptor newDesc) {
|
||||||
this.tableDesc = newDesc;
|
// do nothing.
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return true if this is the root region */
|
/** @return true if this is the root region */
|
||||||
public boolean isRootRegion() {
|
public boolean isRootRegion() {
|
||||||
return this.tableDesc.isRootRegion();
|
return Bytes.equals(tableName, HRegionInfo.ROOT_REGIONINFO.getTableName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return true if this region is from a table that is a meta table,
|
/** @return true if this region is from a table that is a meta table,
|
||||||
* either <code>.META.</code> or <code>-ROOT-</code>
|
* either <code>.META.</code> or <code>-ROOT-</code>
|
||||||
*/
|
*/
|
||||||
public boolean isMetaTable() {
|
public boolean isMetaTable() {
|
||||||
return this.tableDesc.isMetaTable();
|
return Bytes.equals(tableName, HRegionInfo.FIRST_META_REGIONINFO.getTableName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return true if this region is a meta region */
|
/** @return true if this region is a meta region */
|
||||||
public boolean isMetaRegion() {
|
public boolean isMetaRegion() {
|
||||||
return this.tableDesc.isMetaRegion();
|
return isMetaTable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -564,14 +614,14 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "REGION => {" + HConstants.NAME + " => '" +
|
return "REGION => {" + HConstants.NAME + " => '" +
|
||||||
this.regionNameStr +
|
this.regionNameStr
|
||||||
"', STARTKEY => '" +
|
+ " TableName => " + this.tableName
|
||||||
|
+ "', STARTKEY => '" +
|
||||||
Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" +
|
Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" +
|
||||||
Bytes.toStringBinary(this.endKey) +
|
Bytes.toStringBinary(this.endKey) +
|
||||||
"', ENCODED => " + getEncodedName() + "," +
|
"', ENCODED => " + getEncodedName() + "," +
|
||||||
(isOffline()? " OFFLINE => true,": "") +
|
(isOffline()? " OFFLINE => true,": "") +
|
||||||
(isSplit()? " SPLIT => true,": "") +
|
(isSplit()? " SPLIT => true,": "") + "}";
|
||||||
" TABLE => {" + this.tableDesc.toString() + "}";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -618,7 +668,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
Bytes.writeByteArray(out, regionName);
|
Bytes.writeByteArray(out, regionName);
|
||||||
out.writeBoolean(split);
|
out.writeBoolean(split);
|
||||||
Bytes.writeByteArray(out, startKey);
|
Bytes.writeByteArray(out, startKey);
|
||||||
tableDesc.write(out);
|
Bytes.writeByteArray(out, tableName);
|
||||||
out.writeInt(hashCode);
|
out.writeInt(hashCode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -632,7 +682,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
this.regionNameStr = Bytes.toStringBinary(this.regionName);
|
||||||
this.split = in.readBoolean();
|
this.split = in.readBoolean();
|
||||||
this.startKey = Bytes.readByteArray(in);
|
this.startKey = Bytes.readByteArray(in);
|
||||||
this.tableDesc.readFields(in);
|
this.tableName = Bytes.readByteArray(in);
|
||||||
this.hashCode = in.readInt();
|
this.hashCode = in.readInt();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -646,7 +696,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
||||||
}
|
}
|
||||||
|
|
||||||
// Are regions of same table?
|
// Are regions of same table?
|
||||||
int result = Bytes.compareTo(this.tableDesc.getName(), o.tableDesc.getName());
|
int result = Bytes.compareTo(this.tableName, o.tableName);
|
||||||
if (result != 0) {
|
if (result != 0) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -597,7 +597,7 @@ public class KeyValue implements Writable, HeapSize {
|
||||||
return "empty";
|
return "empty";
|
||||||
}
|
}
|
||||||
return keyToString(this.bytes, this.offset + ROW_OFFSET, getKeyLength()) +
|
return keyToString(this.bytes, this.offset + ROW_OFFSET, getKeyLength()) +
|
||||||
"/vlen=" + getValueLength();
|
"/vlen=" + getValueLength() + " value = " + Bytes.toString(getValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -19,20 +19,24 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.catalog;
|
package org.apache.hadoop.hbase.catalog;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.ConnectException;
|
import java.net.ConnectException;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
|
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
|
import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Writes region and assignment information to <code>.META.</code>.
|
* Writes region and assignment information to <code>.META.</code>.
|
||||||
|
@ -219,6 +223,82 @@ public class MetaEditor {
|
||||||
LOG.info("Updated region " + regionInfo.getRegionNameAsString() + " in META");
|
LOG.info("Updated region " + regionInfo.getRegionNameAsString() + " in META");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void updateRootWithMetaMigrationStatus(CatalogTracker catalogTracker) throws IOException {
|
||||||
|
updateRootWithMetaMigrationStatus(catalogTracker, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void updateRootWithMetaMigrationStatus(CatalogTracker catalogTracker,
|
||||||
|
boolean metaUpdated)
|
||||||
|
throws IOException {
|
||||||
|
Put put = new Put(HRegionInfo.ROOT_REGIONINFO.getRegionName());
|
||||||
|
addMetaUpdateStatus(put, metaUpdated);
|
||||||
|
catalogTracker.waitForRootServerConnectionDefault().put(
|
||||||
|
CatalogTracker.ROOT_REGION, put);
|
||||||
|
LOG.info("Updated -ROOT- row with metaMigrated status = " + metaUpdated);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static List<HTableDescriptor> updateMetaWithNewRegionInfo(
|
||||||
|
final MasterServices masterServices)
|
||||||
|
throws IOException {
|
||||||
|
final List<HTableDescriptor> htds = new ArrayList<HTableDescriptor>();
|
||||||
|
Visitor v = new Visitor() {
|
||||||
|
@Override
|
||||||
|
public boolean visit(Result r) throws IOException {
|
||||||
|
if (r == null || r.isEmpty()) return true;
|
||||||
|
HRegionInfo090x hrfm = getHRegionInfoForMigration(r);
|
||||||
|
htds.add(hrfm.getTableDesc());
|
||||||
|
masterServices.getMasterFileSystem().createTableDescriptor(hrfm.getTableDesc());
|
||||||
|
HRegionInfo regionInfo = new HRegionInfo(hrfm);
|
||||||
|
LOG.debug(" MetaEditor.updatemeta RegionInfo = " + regionInfo.toString()
|
||||||
|
+ " old HRI = " + hrfm.toString());
|
||||||
|
Put put = new Put(regionInfo.getRegionName());
|
||||||
|
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||||
|
Writables.getBytes(regionInfo));
|
||||||
|
masterServices.getCatalogTracker().waitForMetaServerConnectionDefault().put(
|
||||||
|
CatalogTracker.META_REGION, put);
|
||||||
|
LOG.info("Updated region " + regionInfo + " to META");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
MetaReader.fullScan(masterServices.getCatalogTracker(), v);
|
||||||
|
updateRootWithMetaMigrationStatus(masterServices.getCatalogTracker());
|
||||||
|
return htds;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static HRegionInfo090x getHRegionInfoForMigration(
|
||||||
|
Result data) throws IOException {
|
||||||
|
byte [] bytes =
|
||||||
|
data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
|
||||||
|
if (bytes == null) return null;
|
||||||
|
HRegionInfo090x info = Writables.getHRegionInfoForMigration(bytes);
|
||||||
|
LOG.info("Current INFO from scan results = " + info);
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static HRegionInfo getHRegionInfo(
|
||||||
|
Result data) throws IOException {
|
||||||
|
byte [] bytes =
|
||||||
|
data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
|
||||||
|
if (bytes == null) return null;
|
||||||
|
HRegionInfo info = Writables.getHRegionInfo(bytes);
|
||||||
|
LOG.info("Current INFO from scan results = " + info);
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Put addMetaUpdateStatus(final Put p) {
|
||||||
|
p.add(HConstants.CATALOG_FAMILY, HConstants.META_MIGRATION_QUALIFIER,
|
||||||
|
Bytes.toBytes("true"));
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private static Put addMetaUpdateStatus(final Put p, final boolean metaUpdated) {
|
||||||
|
p.add(HConstants.CATALOG_FAMILY, HConstants.META_MIGRATION_QUALIFIER,
|
||||||
|
Bytes.toBytes(metaUpdated));
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
private static Put addRegionInfo(final Put p, final HRegionInfo hri)
|
private static Put addRegionInfo(final Put p, final HRegionInfo hri)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||||
|
|
|
@ -28,16 +28,14 @@ import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
|
|
||||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
|
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
@ -50,6 +48,8 @@ import org.apache.hadoop.ipc.RemoteException;
|
||||||
* catalogs.
|
* catalogs.
|
||||||
*/
|
*/
|
||||||
public class MetaReader {
|
public class MetaReader {
|
||||||
|
private static final Log LOG = LogFactory.getLog(MetaReader.class);
|
||||||
|
|
||||||
public static final byte [] META_REGION_PREFIX;
|
public static final byte [] META_REGION_PREFIX;
|
||||||
static {
|
static {
|
||||||
// Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
|
// Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
|
||||||
|
@ -182,7 +182,7 @@ public class MetaReader {
|
||||||
if (region == null) return true;
|
if (region == null) return true;
|
||||||
HRegionInfo hri = region.getFirst();
|
HRegionInfo hri = region.getFirst();
|
||||||
if (disabledTables.contains(
|
if (disabledTables.contains(
|
||||||
hri.getTableDesc().getNameAsString())) return true;
|
hri.getTableNameAsString())) return true;
|
||||||
// Are we to include split parents in the list?
|
// Are we to include split parents in the list?
|
||||||
if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
|
if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
|
||||||
regions.put(hri, region.getSecond());
|
regions.put(hri, region.getSecond());
|
||||||
|
@ -583,6 +583,48 @@ public class MetaReader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void fullScanMetaAndPrint(
|
||||||
|
CatalogTracker catalogTracker)
|
||||||
|
throws IOException {
|
||||||
|
final List<HRegionInfo090x> regions =
|
||||||
|
new ArrayList<HRegionInfo090x>();
|
||||||
|
Visitor v = new Visitor() {
|
||||||
|
@Override
|
||||||
|
public boolean visit(Result r) throws IOException {
|
||||||
|
if (r == null || r.isEmpty()) return true;
|
||||||
|
LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
|
||||||
|
HRegionInfo hrim = MetaEditor.getHRegionInfo(r);
|
||||||
|
LOG.info("fullScanMetaAndPrint.HRI Print= " + hrim);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
fullScan(catalogTracker, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public static List<HRegionInfo090x> fullScanMetaAndPrintHRIM(
|
||||||
|
CatalogTracker catalogTracker)
|
||||||
|
throws IOException {
|
||||||
|
final List<HRegionInfo090x> regions =
|
||||||
|
new ArrayList<HRegionInfo090x>();
|
||||||
|
Visitor v = new Visitor() {
|
||||||
|
@Override
|
||||||
|
public boolean visit(Result r) throws IOException {
|
||||||
|
if (r == null || r.isEmpty()) return true;
|
||||||
|
LOG.info("fullScanMetaAndPrint1.Current Meta Result: " + r);
|
||||||
|
HRegionInfo090x hrim = MetaEditor.getHRegionInfoForMigration(r);
|
||||||
|
LOG.info("fullScanMetaAndPrint.HRIM Print= " + hrim);
|
||||||
|
regions.add(hrim);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
fullScan(catalogTracker, v);
|
||||||
|
return regions;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Implementations 'visit' a catalog table row.
|
* Implementations 'visit' a catalog table row.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -103,10 +103,14 @@ public class HBaseAdmin implements Abortable, Closeable {
|
||||||
*/
|
*/
|
||||||
private synchronized CatalogTracker getCatalogTracker()
|
private synchronized CatalogTracker getCatalogTracker()
|
||||||
throws ZooKeeperConnectionException, IOException {
|
throws ZooKeeperConnectionException, IOException {
|
||||||
|
LOG.info("HBaseAdmin.getCatalogTracker()");
|
||||||
CatalogTracker ct = null;
|
CatalogTracker ct = null;
|
||||||
try {
|
try {
|
||||||
ct = new CatalogTracker(this.conf);
|
ct = new CatalogTracker(this.conf);
|
||||||
|
LOG.info("HBaseAdmin.getCatalogTracker()--11");
|
||||||
|
|
||||||
ct.start();
|
ct.start();
|
||||||
|
LOG.info("HBaseAdmin.getCatalogTracker()-- CTracker started");
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
// Let it out as an IOE for now until we redo all so tolerate IEs
|
// Let it out as an IOE for now until we redo all so tolerate IEs
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
|
@ -529,6 +533,7 @@ public class HBaseAdmin implements Abortable, Closeable {
|
||||||
*/
|
*/
|
||||||
public void disableTable(final byte [] tableName)
|
public void disableTable(final byte [] tableName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
LOG.info("HBaseAdmin.disableTable");
|
||||||
disableTableAsync(tableName);
|
disableTableAsync(tableName);
|
||||||
// Wait until table is disabled
|
// Wait until table is disabled
|
||||||
boolean disabled = false;
|
boolean disabled = false;
|
||||||
|
@ -1266,4 +1271,16 @@ public class HBaseAdmin implements Abortable, Closeable {
|
||||||
this.connection.close();
|
this.connection.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get tableDescriptors
|
||||||
|
* @param tableNames List of table names
|
||||||
|
* @return HTD[] the tableDescriptor
|
||||||
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
*/
|
||||||
|
public HTableDescriptor[] getTableDescriptors(List<String> tableNames)
|
||||||
|
throws IOException {
|
||||||
|
return this.connection.getHTableDescriptors(tableNames);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -373,4 +373,13 @@ public interface HConnection extends Abortable, Closeable {
|
||||||
* @deprecated This method will be changed from public to package protected.
|
* @deprecated This method will be changed from public to package protected.
|
||||||
*/
|
*/
|
||||||
public int getCurrentNrHRS() throws IOException;
|
public int getCurrentNrHRS() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param tableNames List of table names
|
||||||
|
* @return HTD[] table metadata
|
||||||
|
* @throws IOException if a remote or network exception occurs
|
||||||
|
*/
|
||||||
|
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
}
|
}
|
|
@ -634,33 +634,6 @@ public class HConnectionManager {
|
||||||
return reload? relocateRegion(name, row): locateRegion(name, row);
|
return reload? relocateRegion(name, row): locateRegion(name, row);
|
||||||
}
|
}
|
||||||
|
|
||||||
public HTableDescriptor[] listTables() throws IOException {
|
|
||||||
final TreeSet<HTableDescriptor> uniqueTables =
|
|
||||||
new TreeSet<HTableDescriptor>();
|
|
||||||
MetaScannerVisitor visitor = new MetaScannerVisitor() {
|
|
||||||
public boolean processRow(Result result) throws IOException {
|
|
||||||
try {
|
|
||||||
byte[] value = result.getValue(HConstants.CATALOG_FAMILY,
|
|
||||||
HConstants.REGIONINFO_QUALIFIER);
|
|
||||||
HRegionInfo info = null;
|
|
||||||
if (value != null) {
|
|
||||||
info = Writables.getHRegionInfo(value);
|
|
||||||
}
|
|
||||||
// Only examine the rows where the startKey is zero length
|
|
||||||
if (info != null && info.getStartKey().length == 0) {
|
|
||||||
uniqueTables.add(info.getTableDesc());
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
} catch (RuntimeException e) {
|
|
||||||
LOG.error("Result=" + result);
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
MetaScanner.metaScan(conf, visitor);
|
|
||||||
return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean isTableEnabled(byte[] tableName) throws IOException {
|
public boolean isTableEnabled(byte[] tableName) throws IOException {
|
||||||
return testTableOnlineState(tableName, true);
|
return testTableOnlineState(tableName, true);
|
||||||
}
|
}
|
||||||
|
@ -679,7 +652,7 @@ public class HConnectionManager {
|
||||||
HConstants.REGIONINFO_QUALIFIER);
|
HConstants.REGIONINFO_QUALIFIER);
|
||||||
HRegionInfo info = Writables.getHRegionInfoOrNull(value);
|
HRegionInfo info = Writables.getHRegionInfoOrNull(value);
|
||||||
if (info != null) {
|
if (info != null) {
|
||||||
if (Bytes.equals(tableName, info.getTableDesc().getName())) {
|
if (Bytes.equals(tableName, info.getTableName())) {
|
||||||
value = row.getValue(HConstants.CATALOG_FAMILY,
|
value = row.getValue(HConstants.CATALOG_FAMILY,
|
||||||
HConstants.SERVER_QUALIFIER);
|
HConstants.SERVER_QUALIFIER);
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
|
@ -716,47 +689,6 @@ public class HConnectionManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class HTableDescriptorFinder
|
|
||||||
implements MetaScanner.MetaScannerVisitor {
|
|
||||||
byte[] tableName;
|
|
||||||
HTableDescriptor result;
|
|
||||||
protected HTableDescriptorFinder(byte[] tableName) {
|
|
||||||
this.tableName = tableName;
|
|
||||||
}
|
|
||||||
public boolean processRow(Result rowResult) throws IOException {
|
|
||||||
HRegionInfo info = Writables.getHRegionInfoOrNull(
|
|
||||||
rowResult.getValue(HConstants.CATALOG_FAMILY,
|
|
||||||
HConstants.REGIONINFO_QUALIFIER));
|
|
||||||
if (info == null) return true;
|
|
||||||
HTableDescriptor desc = info.getTableDesc();
|
|
||||||
if (Bytes.compareTo(desc.getName(), tableName) == 0) {
|
|
||||||
result = desc;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
HTableDescriptor getResult() {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public HTableDescriptor getHTableDescriptor(final byte[] tableName)
|
|
||||||
throws IOException {
|
|
||||||
if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
|
|
||||||
return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC);
|
|
||||||
}
|
|
||||||
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
|
|
||||||
return HTableDescriptor.META_TABLEDESC;
|
|
||||||
}
|
|
||||||
HTableDescriptorFinder finder = new HTableDescriptorFinder(tableName);
|
|
||||||
MetaScanner.metaScan(conf, finder, tableName);
|
|
||||||
HTableDescriptor result = finder.getResult();
|
|
||||||
if (result == null) {
|
|
||||||
throw new TableNotFoundException(Bytes.toString(tableName));
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public HRegionLocation locateRegion(final byte [] regionName)
|
public HRegionLocation locateRegion(final byte [] regionName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -836,7 +768,7 @@ public class HConnectionManager {
|
||||||
regionInfo = Writables.getHRegionInfo(value);
|
regionInfo = Writables.getHRegionInfo(value);
|
||||||
|
|
||||||
// possible we got a region of a different table...
|
// possible we got a region of a different table...
|
||||||
if (!Bytes.equals(regionInfo.getTableDesc().getName(),
|
if (!Bytes.equals(regionInfo.getTableName(),
|
||||||
tableName)) {
|
tableName)) {
|
||||||
return false; // stop scanning
|
return false; // stop scanning
|
||||||
}
|
}
|
||||||
|
@ -956,7 +888,7 @@ public class HConnectionManager {
|
||||||
HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
|
HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
|
||||||
value, new HRegionInfo());
|
value, new HRegionInfo());
|
||||||
// possible we got a region of a different table...
|
// possible we got a region of a different table...
|
||||||
if (!Bytes.equals(regionInfo.getTableDesc().getName(), tableName)) {
|
if (!Bytes.equals(regionInfo.getTableName(), tableName)) {
|
||||||
throw new TableNotFoundException(
|
throw new TableNotFoundException(
|
||||||
"Table '" + Bytes.toString(tableName) + "' was not found.");
|
"Table '" + Bytes.toString(tableName) + "' was not found.");
|
||||||
}
|
}
|
||||||
|
@ -1785,5 +1717,50 @@ public class HConnectionManager {
|
||||||
LOG.debug("The connection to " + this.zooKeeper
|
LOG.debug("The connection to " + this.zooKeeper
|
||||||
+ " was closed by the finalize method.");
|
+ " was closed by the finalize method.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public HTableDescriptor[] listTables() throws IOException {
|
||||||
|
if (this.master == null) {
|
||||||
|
this.master = getMaster();
|
||||||
|
}
|
||||||
|
HTableDescriptor[] htd = master.getHTableDescriptors();
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames) throws IOException {
|
||||||
|
if (tableNames == null || tableNames.size() == 0) return null;
|
||||||
|
if (this.master == null) {
|
||||||
|
this.master = getMaster();
|
||||||
|
}
|
||||||
|
return master.getHTableDescriptors(tableNames);
|
||||||
|
}
|
||||||
|
|
||||||
|
public HTableDescriptor getHTableDescriptor(final byte[] tableName)
|
||||||
|
throws IOException {
|
||||||
|
if (tableName == null || tableName.length == 0) return null;
|
||||||
|
if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
|
||||||
|
return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC);
|
||||||
|
}
|
||||||
|
if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
|
||||||
|
return HTableDescriptor.META_TABLEDESC;
|
||||||
|
}
|
||||||
|
if (this.master == null) {
|
||||||
|
this.master = getMaster();
|
||||||
|
}
|
||||||
|
HTableDescriptor hTableDescriptor = null;
|
||||||
|
HTableDescriptor[] htds = master.getHTableDescriptors();
|
||||||
|
if (htds != null && htds.length > 0) {
|
||||||
|
for (HTableDescriptor htd: htds) {
|
||||||
|
if (Bytes.equals(tableName, htd.getName())) {
|
||||||
|
hTableDescriptor = htd;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//HTableDescriptor htd = master.getHTableDescriptor(tableName);
|
||||||
|
if (hTableDescriptor == null) {
|
||||||
|
throw new TableNotFoundException(Bytes.toString(tableName));
|
||||||
|
}
|
||||||
|
return hTableDescriptor;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -393,7 +393,7 @@ public class HTable implements HTableInterface, Closeable {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
HRegionInfo info = Writables.getHRegionInfo(bytes);
|
HRegionInfo info = Writables.getHRegionInfo(bytes);
|
||||||
if (Bytes.equals(info.getTableDesc().getName(), getTableName())) {
|
if (Bytes.equals(info.getTableName(), getTableName())) {
|
||||||
if (!(info.isOffline() || info.isSplit())) {
|
if (!(info.isOffline() || info.isSplit())) {
|
||||||
startKeyList.add(info.getStartKey());
|
startKeyList.add(info.getStartKey());
|
||||||
endKeyList.add(info.getEndKey());
|
endKeyList.add(info.getEndKey());
|
||||||
|
@ -423,7 +423,7 @@ public class HTable implements HTableInterface, Closeable {
|
||||||
rowResult.getValue(HConstants.CATALOG_FAMILY,
|
rowResult.getValue(HConstants.CATALOG_FAMILY,
|
||||||
HConstants.REGIONINFO_QUALIFIER));
|
HConstants.REGIONINFO_QUALIFIER));
|
||||||
|
|
||||||
if (!(Bytes.equals(info.getTableDesc().getName(), getTableName()))) {
|
if (!(Bytes.equals(info.getTableName(), getTableName()))) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -288,7 +288,7 @@ public class MetaScanner {
|
||||||
HRegionInfo info = Writables.getHRegionInfo(
|
HRegionInfo info = Writables.getHRegionInfo(
|
||||||
rowResult.getValue(HConstants.CATALOG_FAMILY,
|
rowResult.getValue(HConstants.CATALOG_FAMILY,
|
||||||
HConstants.REGIONINFO_QUALIFIER));
|
HConstants.REGIONINFO_QUALIFIER));
|
||||||
if (!(Bytes.equals(info.getTableDesc().getName(), tablename))) {
|
if (!(Bytes.equals(info.getTableName(), tablename))) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY,
|
byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY,
|
||||||
|
|
|
@ -30,7 +30,6 @@ class UnmodifyableHRegionInfo extends HRegionInfo {
|
||||||
*/
|
*/
|
||||||
UnmodifyableHRegionInfo(HRegionInfo info) {
|
UnmodifyableHRegionInfo(HRegionInfo info) {
|
||||||
super(info);
|
super(info);
|
||||||
this.tableDesc = new UnmodifyableHTableDescriptor(info.getTableDesc());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -163,6 +163,7 @@ public class HbaseObjectWritable implements Writable, WritableWithSize, Configur
|
||||||
addToMap(HServerAddress.class, code++);
|
addToMap(HServerAddress.class, code++);
|
||||||
addToMap(HServerInfo.class, code++);
|
addToMap(HServerInfo.class, code++);
|
||||||
addToMap(HTableDescriptor.class, code++);
|
addToMap(HTableDescriptor.class, code++);
|
||||||
|
addToMap(HTableDescriptor[].class, code++);
|
||||||
addToMap(MapWritable.class, code++);
|
addToMap(MapWritable.class, code++);
|
||||||
|
|
||||||
//
|
//
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.apache.hadoop.hbase.ipc;
|
package org.apache.hadoop.hbase.ipc;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
@ -200,4 +201,25 @@ public interface HMasterInterface extends VersionedProtocol {
|
||||||
* @return Previous balancer value
|
* @return Previous balancer value
|
||||||
*/
|
*/
|
||||||
public boolean balanceSwitch(final boolean b);
|
public boolean balanceSwitch(final boolean b);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get array of all HTDs.
|
||||||
|
* @return array of HTableDescriptor
|
||||||
|
*/
|
||||||
|
public HTableDescriptor[] getHTableDescriptors();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get current HTD for a given tablename
|
||||||
|
* @param tableName
|
||||||
|
* @return HTableDescriptor for the table
|
||||||
|
*/
|
||||||
|
//public HTableDescriptor getHTableDescriptor(final byte[] tableName);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get array of HTDs for requested tables.
|
||||||
|
* @param tableNames
|
||||||
|
* @return array of HTableDescriptor
|
||||||
|
*/
|
||||||
|
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames);
|
||||||
|
|
||||||
}
|
}
|
|
@ -25,6 +25,7 @@ import java.io.IOException;
|
||||||
import java.lang.Thread.UncaughtExceptionHandler;
|
import java.lang.Thread.UncaughtExceptionHandler;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
@ -63,6 +64,7 @@ import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
|
||||||
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
|
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
||||||
|
@ -77,6 +79,7 @@ import org.apache.zookeeper.AsyncCallback;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.apache.zookeeper.KeeperException.NoNodeException;
|
import org.apache.zookeeper.KeeperException.NoNodeException;
|
||||||
import org.apache.zookeeper.data.Stat;
|
import org.apache.zookeeper.data.Stat;
|
||||||
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manages and performs region assignment.
|
* Manages and performs region assignment.
|
||||||
|
@ -139,6 +142,10 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
|
|
||||||
private final ExecutorService executorService;
|
private final ExecutorService executorService;
|
||||||
|
|
||||||
|
private Map<String, HTableDescriptor> tableDescMap =
|
||||||
|
new HashMap<String, HTableDescriptor>();
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a new assignment manager.
|
* Constructs a new assignment manager.
|
||||||
*
|
*
|
||||||
|
@ -166,6 +173,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
this.zkTable = new ZKTable(this.master.getZooKeeper());
|
this.zkTable = new ZKTable(this.master.getZooKeeper());
|
||||||
this.maximumAssignmentAttempts =
|
this.maximumAssignmentAttempts =
|
||||||
this.master.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10);
|
this.master.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10);
|
||||||
|
initHTableDescriptorMap();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -244,7 +252,9 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
// its a clean cluster startup, else its a failover.
|
// its a clean cluster startup, else its a failover.
|
||||||
boolean regionsToProcess = false;
|
boolean regionsToProcess = false;
|
||||||
for (Map.Entry<HRegionInfo, ServerName> e: this.regions.entrySet()) {
|
for (Map.Entry<HRegionInfo, ServerName> e: this.regions.entrySet()) {
|
||||||
if (!e.getKey().isMetaRegion() && e.getValue() != null) {
|
if (!e.getKey().isMetaRegion()
|
||||||
|
&& !e.getKey().isRootRegion()
|
||||||
|
&& e.getValue() != null) {
|
||||||
LOG.debug("Found " + e + " out on cluster");
|
LOG.debug("Found " + e + " out on cluster");
|
||||||
regionsToProcess = true;
|
regionsToProcess = true;
|
||||||
break;
|
break;
|
||||||
|
@ -267,6 +277,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Fresh cluster startup.
|
// Fresh cluster startup.
|
||||||
|
LOG.info("Clean cluster startup. Assigning userregions");
|
||||||
cleanoutUnassigned();
|
cleanoutUnassigned();
|
||||||
assignAllUserRegions();
|
assignAllUserRegions();
|
||||||
}
|
}
|
||||||
|
@ -919,7 +930,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
|
|
||||||
public void assign(HRegionInfo region, boolean setOfflineInZK,
|
public void assign(HRegionInfo region, boolean setOfflineInZK,
|
||||||
boolean forceNewPlan) {
|
boolean forceNewPlan) {
|
||||||
String tableName = region.getTableDesc().getNameAsString();
|
String tableName = region.getTableNameAsString();
|
||||||
boolean disabled = this.zkTable.isDisabledTable(tableName);
|
boolean disabled = this.zkTable.isDisabledTable(tableName);
|
||||||
if (disabled || this.zkTable.isDisablingTable(tableName)) {
|
if (disabled || this.zkTable.isDisablingTable(tableName)) {
|
||||||
LOG.info("Table " + tableName + (disabled? " disabled;": " disabling;") +
|
LOG.info("Table " + tableName + (disabled? " disabled;": " disabling;") +
|
||||||
|
@ -978,6 +989,10 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
}
|
}
|
||||||
// Move on to open regions.
|
// Move on to open regions.
|
||||||
try {
|
try {
|
||||||
|
// Update the tableDesc map.
|
||||||
|
for (HRegionInfo region : regions) {
|
||||||
|
updateDescMap(region.getTableNameAsString());
|
||||||
|
}
|
||||||
// Send OPEN RPC. This can fail if the server on other end is is not up.
|
// Send OPEN RPC. This can fail if the server on other end is is not up.
|
||||||
// If we fail, fail the startup by aborting the server. There is one
|
// If we fail, fail the startup by aborting the server. There is one
|
||||||
// exception we will tolerate: ServerNotRunningException. This is thrown
|
// exception we will tolerate: ServerNotRunningException. This is thrown
|
||||||
|
@ -1808,10 +1823,10 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
public List<HRegionInfo> getRegionsOfTable(byte[] tableName) {
|
public List<HRegionInfo> getRegionsOfTable(byte[] tableName) {
|
||||||
List<HRegionInfo> tableRegions = new ArrayList<HRegionInfo>();
|
List<HRegionInfo> tableRegions = new ArrayList<HRegionInfo>();
|
||||||
HRegionInfo boundary =
|
HRegionInfo boundary =
|
||||||
new HRegionInfo(new HTableDescriptor(tableName), null, null);
|
new HRegionInfo(tableName, null, null);
|
||||||
synchronized (this.regions) {
|
synchronized (this.regions) {
|
||||||
for (HRegionInfo regionInfo: this.regions.tailMap(boundary).keySet()) {
|
for (HRegionInfo regionInfo: this.regions.tailMap(boundary).keySet()) {
|
||||||
if(Bytes.equals(regionInfo.getTableDesc().getName(), tableName)) {
|
if(Bytes.equals(regionInfo.getTableName(), tableName)) {
|
||||||
tableRegions.add(regionInfo);
|
tableRegions.add(regionInfo);
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
|
@ -2054,7 +2069,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
// that case. This is not racing with the region server itself since RS
|
// that case. This is not racing with the region server itself since RS
|
||||||
// report is done after the split transaction completed.
|
// report is done after the split transaction completed.
|
||||||
if (this.zkTable.isDisablingOrDisabledTable(
|
if (this.zkTable.isDisablingOrDisabledTable(
|
||||||
parent.getTableDesc().getNameAsString())) {
|
parent.getTableNameAsString())) {
|
||||||
unassign(a);
|
unassign(a);
|
||||||
unassign(b);
|
unassign(b);
|
||||||
}
|
}
|
||||||
|
@ -2149,6 +2164,140 @@ public class AssignmentManager extends ZooKeeperListener {
|
||||||
LOG.info("Bulk assigning done");
|
LOG.info("Bulk assigning done");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void initHTableDescriptorMap() {
|
||||||
|
try {
|
||||||
|
synchronized (this.tableDescMap) {
|
||||||
|
this.tableDescMap =
|
||||||
|
FSUtils.getTableDescriptors(this.master.getConfiguration());
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.info("IOException while initializing HTableDescriptor Map");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private HTableDescriptor readTableDescriptor(String tableName)
|
||||||
|
throws IOException {
|
||||||
|
return FSUtils.getHTableDescriptor(
|
||||||
|
this.master.getConfiguration(), tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isRootOrMetaRegion(String tableName) {
|
||||||
|
return (
|
||||||
|
tableName.equals(
|
||||||
|
HRegionInfo.ROOT_REGIONINFO.getTableNameAsString())
|
||||||
|
||
|
||||||
|
tableName.equals(
|
||||||
|
HRegionInfo.FIRST_META_REGIONINFO.getTableNameAsString()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void updateDescMap(String tableName) throws IOException {
|
||||||
|
|
||||||
|
if (this.tableDescMap == null) {
|
||||||
|
LOG.error("Table Descriptor cache is null. " +
|
||||||
|
"Skipping desc map update for table = " + tableName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tableName == null || isRootOrMetaRegion(tableName))
|
||||||
|
return;
|
||||||
|
if (!this.tableDescMap.containsKey(tableName)) {
|
||||||
|
HTableDescriptor htd = readTableDescriptor(tableName);
|
||||||
|
if (htd != null) {
|
||||||
|
LOG.info("Updating TableDesc Map for tablename = " + tableName
|
||||||
|
+ "htd == " + htd);
|
||||||
|
synchronized (this.tableDescMap) {
|
||||||
|
this.tableDescMap.put(tableName, htd);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG.info("HTable Descriptor is NULL for table = " + tableName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void updateTableDesc(String tableName, HTableDescriptor htd) {
|
||||||
|
if (this.tableDescMap == null) {
|
||||||
|
LOG.error("Table Descriptor cache is null. " +
|
||||||
|
"Skipping desc map update for table = " + tableName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (tableName == null || isRootOrMetaRegion(tableName))
|
||||||
|
return;
|
||||||
|
if (!this.tableDescMap.containsKey(tableName)) {
|
||||||
|
LOG.error("Table descriptor missing in DescMap. for tablename = " + tableName);
|
||||||
|
}
|
||||||
|
synchronized (this.tableDescMap) {
|
||||||
|
this.tableDescMap.put(tableName, htd);
|
||||||
|
}
|
||||||
|
LOG.info("TableDesc updated successfully for table = " + tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void deleteTableDesc(String tableName) {
|
||||||
|
if (this.tableDescMap == null) {
|
||||||
|
LOG.error("Table Descriptor cache is null. " +
|
||||||
|
"Skipping desc map update for table = " + tableName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (tableName == null || isRootOrMetaRegion(tableName))
|
||||||
|
return;
|
||||||
|
if (!this.tableDescMap.containsKey(tableName)) {
|
||||||
|
LOG.error("Table descriptor missing in DescMap. for tablename = " + tableName);
|
||||||
|
}
|
||||||
|
synchronized (this.tableDescMap) {
|
||||||
|
this.tableDescMap.remove(tableName);
|
||||||
|
}
|
||||||
|
LOG.info("TableDesc removed successfully for table = " + tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames) {
|
||||||
|
List htdList = null;
|
||||||
|
HTableDescriptor[] htd = null;
|
||||||
|
if (tableNames != null && tableNames.size() > 0) {
|
||||||
|
if (this.tableDescMap != null) {
|
||||||
|
htd = new HTableDescriptor[tableNames.size()];
|
||||||
|
htdList = new ArrayList();
|
||||||
|
synchronized (this.tableDescMap) {
|
||||||
|
int index = 0;
|
||||||
|
for (String tableName : tableNames) {
|
||||||
|
HTableDescriptor htdesc = this.tableDescMap.get(tableName);
|
||||||
|
htd[index++] = this.tableDescMap.get(tableName);
|
||||||
|
if (htdesc != null) {
|
||||||
|
htdList.add(htdesc);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (htdList != null && htdList.size() > 0 ) {
|
||||||
|
return (HTableDescriptor[]) htdList.toArray(new HTableDescriptor[htdList.size()]);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HTableDescriptor[] getHTableDescriptors() {
|
||||||
|
if (this.tableDescMap != null) {
|
||||||
|
synchronized (this.tableDescMap) {
|
||||||
|
Collection<HTableDescriptor> htdc = this.tableDescMap.values();
|
||||||
|
if (htdc != null) {
|
||||||
|
return htdc.toArray(new HTableDescriptor[htdc.size()]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public HTableDescriptor getTableDescriptor(String tableName) {
|
||||||
|
HTableDescriptor htd = null;
|
||||||
|
if (tableName != null) {
|
||||||
|
synchronized (this.tableDescMap) {
|
||||||
|
htd = this.tableDescMap.get(tableName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* State of a Region while undergoing transitions.
|
* State of a Region while undergoing transitions.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathFilter;
|
import org.apache.hadoop.fs.PathFilter;
|
||||||
import org.apache.hadoop.hbase.Chore;
|
import org.apache.hadoop.hbase.Chore;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
|
@ -41,8 +42,10 @@ import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A janitor for the catalog tables. Scans the <code>.META.</code> catalog
|
* A janitor for the catalog tables. Scans the <code>.META.</code> catalog
|
||||||
* table on a period looking for unused regions to garbage collect.
|
* table on a period looking for unused regions to garbage collect.
|
||||||
|
@ -253,8 +256,10 @@ class CatalogJanitor extends Chore {
|
||||||
if (split == null) return result;
|
if (split == null) return result;
|
||||||
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
|
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
|
||||||
Path rootdir = this.services.getMasterFileSystem().getRootDir();
|
Path rootdir = this.services.getMasterFileSystem().getRootDir();
|
||||||
Path tabledir = new Path(rootdir, split.getTableDesc().getNameAsString());
|
Path tabledir = new Path(rootdir, split.getTableNameAsString());
|
||||||
for (HColumnDescriptor family: split.getTableDesc().getFamilies()) {
|
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
|
||||||
|
|
||||||
|
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
|
||||||
Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(),
|
Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(),
|
||||||
family.getName());
|
family.getName());
|
||||||
// Look for reference files. Call listStatus with anonymous instance of PathFilter.
|
// Look for reference files. Call listStatus with anonymous instance of PathFilter.
|
||||||
|
@ -276,4 +281,10 @@ class CatalogJanitor extends Chore {
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private HTableDescriptor getTableDescriptor(byte[] tableName) {
|
||||||
|
return this.services.getAssignmentManager().getTableDescriptor(
|
||||||
|
Bytes.toString(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,12 +75,7 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.replication.regionserver.Replication;
|
import org.apache.hadoop.hbase.replication.regionserver.Replication;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.*;
|
||||||
import org.apache.hadoop.hbase.util.InfoServer;
|
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
|
||||||
import org.apache.hadoop.hbase.util.Sleeper;
|
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
|
||||||
import org.apache.hadoop.hbase.util.VersionInfo;
|
|
||||||
import org.apache.hadoop.hbase.zookeeper.ClusterId;
|
import org.apache.hadoop.hbase.zookeeper.ClusterId;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
|
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
|
||||||
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
|
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
|
||||||
|
@ -448,6 +443,13 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
||||||
// Make sure root and meta assigned before proceeding.
|
// Make sure root and meta assigned before proceeding.
|
||||||
assignRootAndMeta(status);
|
assignRootAndMeta(status);
|
||||||
|
|
||||||
|
MetaReader.fullScanMetaAndPrint(this.catalogTracker);
|
||||||
|
|
||||||
|
// Update meta with new HRI if required. i.e migrate all HRI with HTD to
|
||||||
|
// HRI with out HTD in meta and update the status in ROOT. This must happen
|
||||||
|
// before we assign all user regions or else the assignment will fail.
|
||||||
|
updateMetaWithNewHRI();
|
||||||
|
|
||||||
// Fixup assignment manager status
|
// Fixup assignment manager status
|
||||||
status.setStatus("Starting assignment manager");
|
status.setStatus("Starting assignment manager");
|
||||||
this.assignmentManager.joinCluster();
|
this.assignmentManager.joinCluster();
|
||||||
|
@ -464,6 +466,44 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
||||||
initialized = true;
|
initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean isMetaHRIUpdated()
|
||||||
|
throws IOException {
|
||||||
|
boolean metaUpdated = false;
|
||||||
|
Get get = new Get(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
|
||||||
|
get.addColumn(HConstants.CATALOG_FAMILY,
|
||||||
|
HConstants.META_MIGRATION_QUALIFIER);
|
||||||
|
Result r =
|
||||||
|
catalogTracker.waitForRootServerConnectionDefault().get(
|
||||||
|
HRegionInfo.ROOT_REGIONINFO.getRegionName(), get);
|
||||||
|
if (r != null && r.getBytes() != null)
|
||||||
|
{
|
||||||
|
byte[] metaMigrated = r.getValue(HConstants.CATALOG_FAMILY,
|
||||||
|
HConstants.META_MIGRATION_QUALIFIER);
|
||||||
|
String migrated = Bytes.toString(metaMigrated);
|
||||||
|
metaUpdated = new Boolean(migrated).booleanValue();
|
||||||
|
} else {
|
||||||
|
LOG.info("metaUpdated = NULL.");
|
||||||
|
}
|
||||||
|
LOG.info("Meta updated status = " + metaUpdated);
|
||||||
|
return metaUpdated;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
boolean updateMetaWithNewHRI() throws IOException {
|
||||||
|
if (!isMetaHRIUpdated()) {
|
||||||
|
LOG.info("Meta has HRI with HTDs. Updating meta now.");
|
||||||
|
try {
|
||||||
|
MetaEditor.updateMetaWithNewRegionInfo(this);
|
||||||
|
LOG.info("Meta updated with new HRI.");
|
||||||
|
return true;
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException("Update Meta with nw HRI failed. Master startup aborted.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG.info("Meta already up-to date with new HRI.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check <code>-ROOT-</code> and <code>.META.</code> are assigned. If not,
|
* Check <code>-ROOT-</code> and <code>.META.</code> are assigned. If not,
|
||||||
* assign them.
|
* assign them.
|
||||||
|
@ -850,29 +890,18 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
||||||
createTable(desc, splitKeys, false);
|
createTable(desc, splitKeys, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void createTable(HTableDescriptor desc, byte [][] splitKeys,
|
public void createTable(HTableDescriptor hTableDescriptor,
|
||||||
|
byte [][] splitKeys,
|
||||||
boolean sync)
|
boolean sync)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!isMasterRunning()) {
|
if (!isMasterRunning()) {
|
||||||
throw new MasterNotRunningException();
|
throw new MasterNotRunningException();
|
||||||
}
|
}
|
||||||
if (cpHost != null) {
|
if (cpHost != null) {
|
||||||
cpHost.preCreateTable(desc, splitKeys);
|
cpHost.preCreateTable(hTableDescriptor, splitKeys);
|
||||||
}
|
|
||||||
HRegionInfo [] newRegions = null;
|
|
||||||
if(splitKeys == null || splitKeys.length == 0) {
|
|
||||||
newRegions = new HRegionInfo [] { new HRegionInfo(desc, null, null) };
|
|
||||||
} else {
|
|
||||||
int numRegions = splitKeys.length + 1;
|
|
||||||
newRegions = new HRegionInfo[numRegions];
|
|
||||||
byte [] startKey = null;
|
|
||||||
byte [] endKey = null;
|
|
||||||
for(int i=0;i<numRegions;i++) {
|
|
||||||
endKey = (i == splitKeys.length) ? null : splitKeys[i];
|
|
||||||
newRegions[i] = new HRegionInfo(desc, startKey, endKey);
|
|
||||||
startKey = endKey;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
HRegionInfo [] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
|
||||||
|
storeTableDescriptor(hTableDescriptor);
|
||||||
int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
|
int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
|
||||||
// Need META availability to create a table
|
// Need META availability to create a table
|
||||||
try {
|
try {
|
||||||
|
@ -883,13 +912,40 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
||||||
LOG.warn("Interrupted waiting for meta availability", e);
|
LOG.warn("Interrupted waiting for meta availability", e);
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
}
|
}
|
||||||
createTable(newRegions, sync);
|
createTable(hTableDescriptor ,newRegions, sync);
|
||||||
}
|
}
|
||||||
|
|
||||||
private synchronized void createTable(final HRegionInfo [] newRegions,
|
private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
|
||||||
|
byte[][] splitKeys) {
|
||||||
|
HRegionInfo[] hRegionInfos = null;
|
||||||
|
if (splitKeys == null || splitKeys.length == 0) {
|
||||||
|
hRegionInfos = new HRegionInfo[]{
|
||||||
|
new HRegionInfo(hTableDescriptor.getName(), null, null)};
|
||||||
|
} else {
|
||||||
|
int numRegions = splitKeys.length + 1;
|
||||||
|
hRegionInfos = new HRegionInfo[numRegions];
|
||||||
|
byte[] startKey = null;
|
||||||
|
byte[] endKey = null;
|
||||||
|
for (int i = 0; i < numRegions; i++) {
|
||||||
|
endKey = (i == splitKeys.length) ? null : splitKeys[i];
|
||||||
|
hRegionInfos[i] =
|
||||||
|
new HRegionInfo(hTableDescriptor.getName(), startKey, endKey);
|
||||||
|
startKey = endKey;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hRegionInfos;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void storeTableDescriptor(HTableDescriptor hTableDescriptor) {
|
||||||
|
FSUtils.createTableDescriptor(hTableDescriptor, conf);
|
||||||
|
//fileSystemManager.createTableDescriptor(hTableDescriptor);
|
||||||
|
}
|
||||||
|
|
||||||
|
private synchronized void createTable(final HTableDescriptor hTableDescriptor,
|
||||||
|
final HRegionInfo [] newRegions,
|
||||||
final boolean sync)
|
final boolean sync)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String tableName = newRegions[0].getTableDesc().getNameAsString();
|
String tableName = newRegions[0].getTableNameAsString();
|
||||||
if(MetaReader.tableExists(catalogTracker, tableName)) {
|
if(MetaReader.tableExists(catalogTracker, tableName)) {
|
||||||
throw new TableExistsException(tableName);
|
throw new TableExistsException(tableName);
|
||||||
}
|
}
|
||||||
|
@ -904,7 +960,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
||||||
|
|
||||||
// 2. Create HRegion
|
// 2. Create HRegion
|
||||||
HRegion region = HRegion.createHRegion(newRegion,
|
HRegion region = HRegion.createHRegion(newRegion,
|
||||||
fileSystemManager.getRootDir(), conf);
|
fileSystemManager.getRootDir(), conf, hTableDescriptor);
|
||||||
|
|
||||||
// 3. Insert into META
|
// 3. Insert into META
|
||||||
MetaEditor.addRegionToMeta(catalogTracker, region.getRegionInfo());
|
MetaEditor.addRegionToMeta(catalogTracker, region.getRegionInfo());
|
||||||
|
@ -1040,7 +1096,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
||||||
if (pair == null) {
|
if (pair == null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!Bytes.equals(pair.getFirst().getTableDesc().getName(), tableName)) {
|
if (!Bytes.equals(pair.getFirst().getTableName(), tableName)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
result.set(pair);
|
result.set(pair);
|
||||||
|
@ -1304,6 +1360,39 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get HTD array for given tables
|
||||||
|
* @param tableNames
|
||||||
|
* @return HTableDescriptor[]
|
||||||
|
*/
|
||||||
|
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames) {
|
||||||
|
return this.assignmentManager.getHTableDescriptors(tableNames);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all table descriptors
|
||||||
|
* @return HTableDescriptor[]
|
||||||
|
*/
|
||||||
|
public HTableDescriptor[] getHTableDescriptors() {
|
||||||
|
return this.assignmentManager.getHTableDescriptors();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a HTD for a given table name
|
||||||
|
* @param tableName
|
||||||
|
* @return HTableDescriptor
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
public HTableDescriptor getHTableDescriptor(byte[] tableName) {
|
||||||
|
if (tableName != null && tableName.length > 0) {
|
||||||
|
return this.assignmentManager.getTableDescriptor(
|
||||||
|
Bytes.toString(tableName));
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compute the average load across all region servers.
|
* Compute the average load across all region servers.
|
||||||
* Currently, this uses a very naive computation - just uses the number of
|
* Currently, this uses a very naive computation - just uses the number of
|
||||||
|
|
|
@ -31,12 +31,16 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
|
import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
|
@ -316,13 +320,15 @@ public class MasterFileSystem {
|
||||||
// not make it in first place. Turn off block caching for bootstrap.
|
// not make it in first place. Turn off block caching for bootstrap.
|
||||||
// Enable after.
|
// Enable after.
|
||||||
HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
|
HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
|
||||||
setInfoFamilyCaching(rootHRI, false);
|
setInfoFamilyCachingForRoot(false);
|
||||||
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
|
||||||
setInfoFamilyCaching(metaHRI, false);
|
setInfoFamilyCachingForMeta(false);
|
||||||
HRegion root = HRegion.createHRegion(rootHRI, rd, c);
|
HRegion root = HRegion.createHRegion(rootHRI, rd, c,
|
||||||
HRegion meta = HRegion.createHRegion(metaHRI, rd, c);
|
HTableDescriptor.ROOT_TABLEDESC);
|
||||||
setInfoFamilyCaching(rootHRI, true);
|
HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
|
||||||
setInfoFamilyCaching(metaHRI, true);
|
HTableDescriptor.META_TABLEDESC);
|
||||||
|
setInfoFamilyCachingForRoot(true);
|
||||||
|
setInfoFamilyCachingForMeta(true);
|
||||||
// Add first region from the META table to the ROOT region.
|
// Add first region from the META table to the ROOT region.
|
||||||
HRegion.addRegionToMETA(root, meta);
|
HRegion.addRegionToMETA(root, meta);
|
||||||
root.close();
|
root.close();
|
||||||
|
@ -336,12 +342,9 @@ public class MasterFileSystem {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private static void setInfoFamilyCachingForRoot(final boolean b) {
|
||||||
* @param hri Set all family block caching to <code>b</code>
|
for (HColumnDescriptor hcd:
|
||||||
* @param b
|
HTableDescriptor.ROOT_TABLEDESC.families.values()) {
|
||||||
*/
|
|
||||||
private static void setInfoFamilyCaching(final HRegionInfo hri, final boolean b) {
|
|
||||||
for (HColumnDescriptor hcd: hri.getTableDesc().families.values()) {
|
|
||||||
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
|
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
|
||||||
hcd.setBlockCacheEnabled(b);
|
hcd.setBlockCacheEnabled(b);
|
||||||
hcd.setInMemory(b);
|
hcd.setInMemory(b);
|
||||||
|
@ -349,6 +352,17 @@ public class MasterFileSystem {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void setInfoFamilyCachingForMeta(final boolean b) {
|
||||||
|
for (HColumnDescriptor hcd:
|
||||||
|
HTableDescriptor.META_TABLEDESC.families.values()) {
|
||||||
|
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
|
||||||
|
hcd.setBlockCacheEnabled(b);
|
||||||
|
hcd.setInMemory(b);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
public void deleteRegion(HRegionInfo region) throws IOException {
|
public void deleteRegion(HRegionInfo region) throws IOException {
|
||||||
fs.delete(HRegion.getRegionDir(rootdir, region), true);
|
fs.delete(HRegion.getRegionDir(rootdir, region), true);
|
||||||
}
|
}
|
||||||
|
@ -363,16 +377,137 @@ public class MasterFileSystem {
|
||||||
// @see HRegion.checkRegioninfoOnFilesystem()
|
// @see HRegion.checkRegioninfoOnFilesystem()
|
||||||
}
|
}
|
||||||
|
|
||||||
public void deleteFamily(HRegionInfo region, byte[] familyName)
|
|
||||||
throws IOException {
|
|
||||||
fs.delete(Store.getStoreHomedir(
|
|
||||||
new Path(rootdir, region.getTableDesc().getNameAsString()),
|
|
||||||
region.getEncodedName(), familyName), true);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void stop() {
|
public void stop() {
|
||||||
if (splitLogManager != null) {
|
if (splitLogManager != null) {
|
||||||
this.splitLogManager.stop();
|
this.splitLogManager.stop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get table info path for a table.
|
||||||
|
* @param tableName
|
||||||
|
* @return Table info path
|
||||||
|
*/
|
||||||
|
private Path getTableInfoPath(byte[] tableName) {
|
||||||
|
Path tablePath = new Path(this.rootdir, Bytes.toString(tableName));
|
||||||
|
Path tableInfoPath = new Path(tablePath, HConstants.TABLEINFO_NAME);
|
||||||
|
return tableInfoPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get table info path for a table.
|
||||||
|
* @param tableName
|
||||||
|
* @return Table info path
|
||||||
|
*/
|
||||||
|
private Path getTablePath(byte[] tableName) {
|
||||||
|
return new Path(this.rootdir, Bytes.toString(tableName));
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Get a HTableDescriptor of a table.
|
||||||
|
* @param tableName
|
||||||
|
* @return HTableDescriptor
|
||||||
|
*/
|
||||||
|
public HTableDescriptor getTableDescriptor(byte[] tableName) {
|
||||||
|
try {
|
||||||
|
|
||||||
|
FSDataInputStream fsDataInputStream = fs.open(getTableInfoPath(tableName));
|
||||||
|
HTableDescriptor hTableDescriptor = new HTableDescriptor();
|
||||||
|
hTableDescriptor.readFields(fsDataInputStream);
|
||||||
|
fsDataInputStream.close();
|
||||||
|
//fs.close();
|
||||||
|
return hTableDescriptor;
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
try {
|
||||||
|
//fs.close();
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.error("file system close failed: ", e);
|
||||||
|
}
|
||||||
|
LOG.info("Exception during readTableDecriptor ", ioe);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create new HTableDescriptor in HDFS.
|
||||||
|
* @param htableDescriptor
|
||||||
|
*/
|
||||||
|
public void createTableDescriptor(HTableDescriptor htableDescriptor) {
|
||||||
|
FSUtils.createTableDescriptor(htableDescriptor, conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update a table descriptor.
|
||||||
|
* @param htableDescriptor
|
||||||
|
* @return updated HTableDescriptor
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public HTableDescriptor updateTableDescriptor(HTableDescriptor htableDescriptor)
|
||||||
|
throws IOException {
|
||||||
|
LOG.info("Update Table Descriptor. Current HTD = " + htableDescriptor);
|
||||||
|
FSUtils.updateHTableDescriptor(fs, conf, htableDescriptor);
|
||||||
|
return htableDescriptor;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete column of a table
|
||||||
|
* @param tableName
|
||||||
|
* @param familyName
|
||||||
|
* @return Modified HTableDescriptor with requested column deleted.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public HTableDescriptor deleteColumn(byte[] tableName, byte[] familyName)
|
||||||
|
throws IOException {
|
||||||
|
LOG.info("DeleteColumn. Table = " + Bytes.toString(tableName)
|
||||||
|
+ " family = " + Bytes.toString(familyName));
|
||||||
|
HTableDescriptor htd = getTableDescriptor(tableName);
|
||||||
|
htd.removeFamily(familyName);
|
||||||
|
updateTableDescriptor(htd);
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Modify Column of a table
|
||||||
|
* @param tableName
|
||||||
|
* @param hcd HColumnDesciptor
|
||||||
|
* @return Modified HTableDescriptor with the column modified.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public HTableDescriptor modifyColumn(byte[] tableName, HColumnDescriptor hcd)
|
||||||
|
throws IOException {
|
||||||
|
LOG.info("AddModifyColumn. Table = " + Bytes.toString(tableName)
|
||||||
|
+ " HCD = " + hcd.toString());
|
||||||
|
|
||||||
|
HTableDescriptor htd = getTableDescriptor(tableName);
|
||||||
|
byte [] familyName = hcd.getName();
|
||||||
|
if(!htd.hasFamily(familyName)) {
|
||||||
|
throw new InvalidFamilyOperationException("Family '" +
|
||||||
|
Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
|
||||||
|
}
|
||||||
|
htd.addFamily(hcd);
|
||||||
|
updateTableDescriptor(htd);
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add column to a table
|
||||||
|
* @param tableName
|
||||||
|
* @param hcd
|
||||||
|
* @return Modified HTableDescriptor with new column added.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public HTableDescriptor addColumn(byte[] tableName, HColumnDescriptor hcd)
|
||||||
|
throws IOException {
|
||||||
|
LOG.info("AddColumn. Table = " + Bytes.toString(tableName)
|
||||||
|
+ " HCD = " + hcd.toString());
|
||||||
|
|
||||||
|
HTableDescriptor htd = getTableDescriptor(tableName);
|
||||||
|
if(htd == null) {
|
||||||
|
throw new InvalidFamilyOperationException("Family '" +
|
||||||
|
hcd.getNameAsString() + "' cannot be modified as HTD is null");
|
||||||
|
}
|
||||||
|
htd.addFamily(hcd);
|
||||||
|
updateTableDescriptor(htd);
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,7 +92,7 @@ public class ClosedRegionHandler extends EventHandler implements TotesHRegionInf
|
||||||
LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName());
|
LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName());
|
||||||
// Check if this table is being disabled or not
|
// Check if this table is being disabled or not
|
||||||
if (this.assignmentManager.getZKTable().
|
if (this.assignmentManager.getZKTable().
|
||||||
isDisablingOrDisabledTable(this.regionInfo.getTableDesc().getNameAsString())) {
|
isDisablingOrDisabledTable(this.regionInfo.getTableNameAsString())) {
|
||||||
assignmentManager.offlineDisabledRegion(regionInfo);
|
assignmentManager.offlineDisabledRegion(regionInfo);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,6 +70,8 @@ public class DeleteTableHandler extends TableEventHandler {
|
||||||
}
|
}
|
||||||
// Delete table from FS
|
// Delete table from FS
|
||||||
this.masterServices.getMasterFileSystem().deleteTable(tableName);
|
this.masterServices.getMasterFileSystem().deleteTable(tableName);
|
||||||
|
// Update table descriptor cache
|
||||||
|
am.deleteTableDesc(Bytes.toString(tableName));
|
||||||
|
|
||||||
// If entry for this table in zk, and up in AssignmentManager, remove it.
|
// If entry for this table in zk, and up in AssignmentManager, remove it.
|
||||||
// Call to undisableTable does this. TODO: Make a more formal purge table.
|
// Call to undisableTable does this. TODO: Make a more formal purge table.
|
||||||
|
|
|
@ -26,7 +26,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||||
|
import org.apache.hadoop.hbase.master.AssignmentManager;
|
||||||
import org.apache.hadoop.hbase.master.MasterServices;
|
import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
public class ModifyTableHandler extends TableEventHandler {
|
public class ModifyTableHandler extends TableEventHandler {
|
||||||
private final HTableDescriptor htd;
|
private final HTableDescriptor htd;
|
||||||
|
@ -41,13 +43,19 @@ public class ModifyTableHandler extends TableEventHandler {
|
||||||
@Override
|
@Override
|
||||||
protected void handleTableOperation(List<HRegionInfo> hris)
|
protected void handleTableOperation(List<HRegionInfo> hris)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (HRegionInfo hri : hris) {
|
AssignmentManager am = this.masterServices.getAssignmentManager();
|
||||||
// Update region info in META
|
HTableDescriptor htd = am.getTableDescriptor(Bytes.toString(tableName));
|
||||||
hri.setTableDesc(this.htd);
|
if (htd == null) {
|
||||||
MetaEditor.updateRegionInfo(this.server.getCatalogTracker(), hri);
|
throw new IOException("Modify Table operation could not be completed as " +
|
||||||
// Update region info in FS
|
"HTableDescritor is missing for table = "
|
||||||
this.masterServices.getMasterFileSystem().updateRegionInfo(hri);
|
+ Bytes.toString(tableName));
|
||||||
}
|
}
|
||||||
|
// Update table descriptor in HDFS
|
||||||
|
|
||||||
|
HTableDescriptor updatedHTD = this.masterServices.getMasterFileSystem()
|
||||||
|
.updateTableDescriptor(this.htd);
|
||||||
|
// Update in-memory descriptor cache
|
||||||
|
am.updateTableDesc(Bytes.toString(tableName), updatedHTD);
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
|
|
|
@ -109,7 +109,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf
|
||||||
" because regions is NOT in RIT -- presuming this is because it SPLIT");
|
" because regions is NOT in RIT -- presuming this is because it SPLIT");
|
||||||
}
|
}
|
||||||
if (this.assignmentManager.getZKTable().isDisablingOrDisabledTable(
|
if (this.assignmentManager.getZKTable().isDisablingOrDisabledTable(
|
||||||
regionInfo.getTableDesc().getNameAsString())) {
|
regionInfo.getTableNameAsString())) {
|
||||||
LOG.debug("Opened region " + regionInfo.getRegionNameAsString() + " but "
|
LOG.debug("Opened region " + regionInfo.getRegionNameAsString() + " but "
|
||||||
+ "this table is disabled, triggering close of region");
|
+ "this table is disabled, triggering close of region");
|
||||||
assignmentManager.unassign(regionInfo);
|
assignmentManager.unassign(regionInfo);
|
||||||
|
|
|
@ -217,7 +217,7 @@ public class ServerShutdownHandler extends EventHandler {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// If table is not disabled but the region is offlined,
|
// If table is not disabled but the region is offlined,
|
||||||
boolean disabled = assignmentManager.getZKTable().isDisabledTable(
|
boolean disabled = assignmentManager.getZKTable().isDisabledTable(
|
||||||
hri.getTableDesc().getNameAsString());
|
hri.getTableNameAsString());
|
||||||
if (disabled) return false;
|
if (disabled) return false;
|
||||||
if (hri.isOffline() && hri.isSplit()) {
|
if (hri.isOffline() && hri.isSplit()) {
|
||||||
LOG.debug("Offlined and split region " + hri.getRegionNameAsString() +
|
LOG.debug("Offlined and split region " + hri.getRegionNameAsString() +
|
||||||
|
@ -328,8 +328,8 @@ public class ServerShutdownHandler extends EventHandler {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
// Now see if we have gone beyond the daughter's startrow.
|
// Now see if we have gone beyond the daughter's startrow.
|
||||||
if (!Bytes.equals(daughter.getTableDesc().getName(),
|
if (!Bytes.equals(daughter.getTableName(),
|
||||||
hri.getTableDesc().getName())) {
|
hri.getTableName())) {
|
||||||
// We fell into another table. Stop scanning.
|
// We fell into another table. Stop scanning.
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
|
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||||
|
import org.apache.hadoop.hbase.master.AssignmentManager;
|
||||||
import org.apache.hadoop.hbase.master.MasterServices;
|
import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
|
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||||
|
import org.apache.hadoop.hbase.master.AssignmentManager;
|
||||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||||
import org.apache.hadoop.hbase.master.MasterServices;
|
import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
|
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||||
|
import org.apache.hadoop.hbase.master.AssignmentManager;
|
||||||
import org.apache.hadoop.hbase.master.MasterServices;
|
import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
|
|
|
@ -50,10 +50,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.*;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.DroppedSnapshotException;
|
import org.apache.hadoop.hbase.DroppedSnapshotException;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
|
@ -199,6 +196,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
final Path regiondir;
|
final Path regiondir;
|
||||||
KeyValue.KVComparator comparator;
|
KeyValue.KVComparator comparator;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Data structure of write state flags used coordinating flushes,
|
* Data structure of write state flags used coordinating flushes,
|
||||||
* compactions and closes.
|
* compactions and closes.
|
||||||
|
@ -239,11 +237,11 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
|
|
||||||
final WriteState writestate = new WriteState();
|
final WriteState writestate = new WriteState();
|
||||||
|
|
||||||
final long memstoreFlushSize;
|
long memstoreFlushSize;
|
||||||
private volatile long lastFlushTime;
|
private volatile long lastFlushTime;
|
||||||
final RegionServerServices rsServices;
|
final RegionServerServices rsServices;
|
||||||
private List<Pair<Long, Long>> recentFlushes = new ArrayList<Pair<Long,Long>>();
|
private List<Pair<Long, Long>> recentFlushes = new ArrayList<Pair<Long,Long>>();
|
||||||
private final long blockingMemStoreSize;
|
private long blockingMemStoreSize;
|
||||||
final long threadWakeFrequency;
|
final long threadWakeFrequency;
|
||||||
// Used to guard closes
|
// Used to guard closes
|
||||||
final ReentrantReadWriteLock lock =
|
final ReentrantReadWriteLock lock =
|
||||||
|
@ -265,6 +263,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
* Name of the region info file that resides just under the region directory.
|
* Name of the region info file that resides just under the region directory.
|
||||||
*/
|
*/
|
||||||
public final static String REGIONINFO_FILE = ".regioninfo";
|
public final static String REGIONINFO_FILE = ".regioninfo";
|
||||||
|
private HTableDescriptor htableDescriptor = null;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Should only be used for testing purposes
|
* Should only be used for testing purposes
|
||||||
|
@ -304,7 +304,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
* is new), then read them from the supplied path.
|
* is new), then read them from the supplied path.
|
||||||
* @param rsServices reference to {@link RegionServerServices} or null
|
* @param rsServices reference to {@link RegionServerServices} or null
|
||||||
*
|
*
|
||||||
* @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
|
* @see HRegion#newHRegion(Path, HLog,
|
||||||
|
* FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
|
||||||
*/
|
*/
|
||||||
public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
|
public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
|
||||||
HRegionInfo regionInfo, RegionServerServices rsServices) {
|
HRegionInfo regionInfo, RegionServerServices rsServices) {
|
||||||
|
@ -319,14 +320,14 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
10 * 1000);
|
10 * 1000);
|
||||||
String encodedNameStr = this.regionInfo.getEncodedName();
|
String encodedNameStr = this.regionInfo.getEncodedName();
|
||||||
this.regiondir = getRegionDir(this.tableDir, encodedNameStr);
|
this.regiondir = getRegionDir(this.tableDir, encodedNameStr);
|
||||||
long flushSize = regionInfo.getTableDesc().getMemStoreFlushSize();
|
try {
|
||||||
if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) {
|
LOG.info("Setting table desc from HDFS. Region = "
|
||||||
flushSize = conf.getLong("hbase.hregion.memstore.flush.size",
|
+ this.regionInfo.getTableNameAsString());
|
||||||
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
|
loadHTableDescriptor(tableDir);
|
||||||
|
LOG.info(" This HTD from HDFS == " + this.htableDescriptor);
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.error("Could not instantiate region as error loading HTableDescriptor");
|
||||||
}
|
}
|
||||||
this.memstoreFlushSize = flushSize;
|
|
||||||
this.blockingMemStoreSize = this.memstoreFlushSize *
|
|
||||||
conf.getLong("hbase.hregion.memstore.block.multiplier", 2);
|
|
||||||
// don't initialize coprocessors if not running within a regionserver
|
// don't initialize coprocessors if not running within a regionserver
|
||||||
// TODO: revisit if coprocessors should load in other cases
|
// TODO: revisit if coprocessors should load in other cases
|
||||||
if (rsServices != null) {
|
if (rsServices != null) {
|
||||||
|
@ -338,6 +339,40 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void loadHTableDescriptor(Path tableDir) throws IOException {
|
||||||
|
LOG.debug("Assigning tabledesc from .tableinfo for region = "
|
||||||
|
+ this.regionInfo.getRegionNameAsString());
|
||||||
|
// load HTableDescriptor
|
||||||
|
this.htableDescriptor = FSUtils.getTableDescriptor(tableDir, fs);
|
||||||
|
|
||||||
|
if (this.htableDescriptor != null) {
|
||||||
|
setHTableSpecificConf();
|
||||||
|
} else {
|
||||||
|
throw new IOException("Table description missing in " +
|
||||||
|
".tableinfo. Cannot create new region."
|
||||||
|
+ " current region is == " + this.regionInfo.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setHTableSpecificConf() {
|
||||||
|
if (this.htableDescriptor != null) {
|
||||||
|
LOG.info("Setting up tabledescriptor config now ...");
|
||||||
|
long flushSize = this.htableDescriptor.getMemStoreFlushSize();
|
||||||
|
if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) {
|
||||||
|
flushSize = conf.getLong("hbase.hregion.memstore.flush.size",
|
||||||
|
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
|
||||||
|
}
|
||||||
|
this.memstoreFlushSize = flushSize;
|
||||||
|
this.blockingMemStoreSize = this.memstoreFlushSize *
|
||||||
|
conf.getLong("hbase.hregion.memstore.block.multiplier", 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setHtableDescriptor(HTableDescriptor htableDescriptor) {
|
||||||
|
this.htableDescriptor = htableDescriptor;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialize this region.
|
* Initialize this region.
|
||||||
* @return What the next sequence (edit) id should be.
|
* @return What the next sequence (edit) id should be.
|
||||||
|
@ -378,7 +413,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
|
|
||||||
// Load in all the HStores. Get maximum seqid.
|
// Load in all the HStores. Get maximum seqid.
|
||||||
long maxSeqId = -1;
|
long maxSeqId = -1;
|
||||||
for (HColumnDescriptor c : this.regionInfo.getTableDesc().getFamilies()) {
|
for (HColumnDescriptor c : this.htableDescriptor.getFamilies()) {
|
||||||
status.setStatus("Instantiating store for column family " + c);
|
status.setStatus("Instantiating store for column family " + c);
|
||||||
Store store = instantiateHStore(this.tableDir, c);
|
Store store = instantiateHStore(this.tableDir, c);
|
||||||
this.stores.put(c.getName(), store);
|
this.stores.put(c.getName(), store);
|
||||||
|
@ -398,7 +433,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
SplitTransaction.cleanupAnySplitDetritus(this);
|
SplitTransaction.cleanupAnySplitDetritus(this);
|
||||||
FSUtils.deleteDirectory(this.fs, new Path(regiondir, MERGEDIR));
|
FSUtils.deleteDirectory(this.fs, new Path(regiondir, MERGEDIR));
|
||||||
|
|
||||||
this.writestate.setReadOnly(this.regionInfo.getTableDesc().isReadOnly());
|
this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
|
||||||
|
|
||||||
this.writestate.compacting = 0;
|
this.writestate.compacting = 0;
|
||||||
this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis();
|
this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis();
|
||||||
|
@ -703,7 +738,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
|
|
||||||
/** @return HTableDescriptor for this region */
|
/** @return HTableDescriptor for this region */
|
||||||
public HTableDescriptor getTableDesc() {
|
public HTableDescriptor getTableDesc() {
|
||||||
return this.regionInfo.getTableDesc();
|
return this.htableDescriptor;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return HLog in use for this region */
|
/** @return HLog in use for this region */
|
||||||
|
@ -1156,7 +1191,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
// log-sequence-ids can be safely ignored.
|
// log-sequence-ids can be safely ignored.
|
||||||
if (wal != null) {
|
if (wal != null) {
|
||||||
wal.completeCacheFlush(this.regionInfo.getEncodedNameAsBytes(),
|
wal.completeCacheFlush(this.regionInfo.getEncodedNameAsBytes(),
|
||||||
regionInfo.getTableDesc().getName(), completeSequenceId,
|
regionInfo.getTableName(), completeSequenceId,
|
||||||
this.getRegionInfo().isMetaRegion());
|
this.getRegionInfo().isMetaRegion());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1268,7 +1303,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
void prepareScanner(Scan scan) throws IOException {
|
void prepareScanner(Scan scan) throws IOException {
|
||||||
if(!scan.hasFamilies()) {
|
if(!scan.hasFamilies()) {
|
||||||
// Adding all families to scanner
|
// Adding all families to scanner
|
||||||
for(byte[] family: regionInfo.getTableDesc().getFamiliesKeys()){
|
for(byte[] family: this.htableDescriptor.getFamiliesKeys()){
|
||||||
scan.addFamily(family);
|
scan.addFamily(family);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1303,7 +1338,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
private void prepareDelete(Delete delete) throws IOException {
|
private void prepareDelete(Delete delete) throws IOException {
|
||||||
// Check to see if this is a deleteRow insert
|
// Check to see if this is a deleteRow insert
|
||||||
if(delete.getFamilyMap().isEmpty()){
|
if(delete.getFamilyMap().isEmpty()){
|
||||||
for(byte [] family : regionInfo.getTableDesc().getFamiliesKeys()){
|
for(byte [] family : this.htableDescriptor.getFamiliesKeys()){
|
||||||
// Don't eat the timestamp
|
// Don't eat the timestamp
|
||||||
delete.deleteFamily(family, delete.getTimeStamp());
|
delete.deleteFamily(family, delete.getTimeStamp());
|
||||||
}
|
}
|
||||||
|
@ -1424,8 +1459,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
// single WALEdit.
|
// single WALEdit.
|
||||||
WALEdit walEdit = new WALEdit();
|
WALEdit walEdit = new WALEdit();
|
||||||
addFamilyMapToWALEdit(familyMap, walEdit);
|
addFamilyMapToWALEdit(familyMap, walEdit);
|
||||||
this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
|
this.log.append(regionInfo, this.htableDescriptor.getName(),
|
||||||
walEdit, now);
|
walEdit, now, this.htableDescriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now make changes to the memstore.
|
// Now make changes to the memstore.
|
||||||
|
@ -1683,8 +1718,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append the edit to WAL
|
// Append the edit to WAL
|
||||||
this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
|
this.log.append(regionInfo, this.htableDescriptor.getName(),
|
||||||
walEdit, now);
|
walEdit, now, this.htableDescriptor);
|
||||||
|
|
||||||
// ------------------------------------
|
// ------------------------------------
|
||||||
// STEP 4. Write back to memstore
|
// STEP 4. Write back to memstore
|
||||||
|
@ -1937,8 +1972,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
if (writeToWAL) {
|
if (writeToWAL) {
|
||||||
WALEdit walEdit = new WALEdit();
|
WALEdit walEdit = new WALEdit();
|
||||||
addFamilyMapToWALEdit(familyMap, walEdit);
|
addFamilyMapToWALEdit(familyMap, walEdit);
|
||||||
this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
|
this.log.append(regionInfo, this.htableDescriptor.getName(),
|
||||||
walEdit, now);
|
walEdit, now, this.htableDescriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
long addedSize = applyFamilyMapToMemstore(familyMap);
|
long addedSize = applyFamilyMapToMemstore(familyMap);
|
||||||
|
@ -2079,6 +2114,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
final long minSeqId, final CancelableProgressable reporter,
|
final long minSeqId, final CancelableProgressable reporter,
|
||||||
final MonitoredTask status)
|
final MonitoredTask status)
|
||||||
throws UnsupportedEncodingException, IOException {
|
throws UnsupportedEncodingException, IOException {
|
||||||
|
LOG.info("replayRecoveredEditsIfAny");
|
||||||
long seqid = minSeqId;
|
long seqid = minSeqId;
|
||||||
NavigableSet<Path> files = HLog.getSplitEditFilesSorted(this.fs, regiondir);
|
NavigableSet<Path> files = HLog.getSplitEditFilesSorted(this.fs, regiondir);
|
||||||
if (files == null || files.isEmpty()) return seqid;
|
if (files == null || files.isEmpty()) return seqid;
|
||||||
|
@ -2128,6 +2164,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
private long replayRecoveredEdits(final Path edits,
|
private long replayRecoveredEdits(final Path edits,
|
||||||
final long minSeqId, final CancelableProgressable reporter)
|
final long minSeqId, final CancelableProgressable reporter)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
LOG.info("ReplayRecoveredEdits");
|
||||||
String msg = "Replaying edits from " + edits + "; minSequenceid=" + minSeqId;
|
String msg = "Replaying edits from " + edits + "; minSequenceid=" + minSeqId;
|
||||||
LOG.info(msg);
|
LOG.info(msg);
|
||||||
MonitoredTask status = TaskMonitor.get().createStatus(msg);
|
MonitoredTask status = TaskMonitor.get().createStatus(msg);
|
||||||
|
@ -2182,11 +2219,15 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
// Start coprocessor replay here. The coprocessor is for each WALEdit
|
// Start coprocessor replay here. The coprocessor is for each WALEdit
|
||||||
// instead of a KeyValue.
|
// instead of a KeyValue.
|
||||||
if (coprocessorHost != null) {
|
if (coprocessorHost != null) {
|
||||||
|
LOG.info("Running pre-WAL-restore hook in coprocessors");
|
||||||
|
|
||||||
status.setStatus("Running pre-WAL-restore hook in coprocessors");
|
status.setStatus("Running pre-WAL-restore hook in coprocessors");
|
||||||
if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
|
if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
|
||||||
// if bypass this log entry, ignore it ...
|
// if bypass this log entry, ignore it ...
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
LOG.info("CoProc Host is NULL");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (firstSeqIdInLog == -1) {
|
if (firstSeqIdInLog == -1) {
|
||||||
|
@ -2756,18 +2797,26 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
* @param info Info for region to create.
|
* @param info Info for region to create.
|
||||||
* @param rootDir Root directory for HBase instance
|
* @param rootDir Root directory for HBase instance
|
||||||
* @param conf
|
* @param conf
|
||||||
|
* @param hTableDescriptor
|
||||||
* @return new HRegion
|
* @return new HRegion
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
|
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
|
||||||
final Configuration conf)
|
final Configuration conf,
|
||||||
|
final HTableDescriptor hTableDescriptor)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
LOG.info("creating HRegion " + info.getTableNameAsString()
|
||||||
|
+ " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
|
||||||
|
" Table name == " + info.getTableNameAsString());
|
||||||
|
|
||||||
Path tableDir =
|
Path tableDir =
|
||||||
HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName());
|
HTableDescriptor.getTableDir(rootDir, info.getTableName());
|
||||||
Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
|
Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
|
||||||
FileSystem fs = FileSystem.get(conf);
|
FileSystem fs = FileSystem.get(conf);
|
||||||
|
|
||||||
fs.mkdirs(regionDir);
|
fs.mkdirs(regionDir);
|
||||||
|
FSUtils.createTableDescriptor(fs, hTableDescriptor, tableDir);
|
||||||
HRegion region = HRegion.newHRegion(tableDir,
|
HRegion region = HRegion.newHRegion(tableDir,
|
||||||
new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
|
new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
|
||||||
new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf),
|
new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf),
|
||||||
|
@ -2802,7 +2851,6 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
* HRegion#getMinSequenceId() to ensure the log id is properly kept
|
* HRegion#getMinSequenceId() to ensure the log id is properly kept
|
||||||
* up. HRegionStore does this every time it opens a new region.
|
* up. HRegionStore does this every time it opens a new region.
|
||||||
* @param conf
|
* @param conf
|
||||||
* @param flusher An interface we can request flushes against.
|
|
||||||
* @param reporter An interface we can report progress against.
|
* @param reporter An interface we can report progress against.
|
||||||
* @return new HRegion
|
* @return new HRegion
|
||||||
*
|
*
|
||||||
|
@ -2819,12 +2867,52 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
throw new NullPointerException("Passed region info is null");
|
throw new NullPointerException("Passed region info is null");
|
||||||
}
|
}
|
||||||
Path dir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf),
|
Path dir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf),
|
||||||
info.getTableDesc().getName());
|
info.getTableName());
|
||||||
HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
|
HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
|
||||||
rsServices);
|
rsServices);
|
||||||
return r.openHRegion(reporter);
|
return r.openHRegion(reporter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static HRegion openHRegion(Path tableDir, final HRegionInfo info,
|
||||||
|
final HLog wal, final Configuration conf)
|
||||||
|
throws IOException {
|
||||||
|
return openHRegion(tableDir, info, wal, conf, null, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Open a Region.
|
||||||
|
* @param tableDir Table directory
|
||||||
|
* @param info Info for region to be opened.
|
||||||
|
* @param wal HLog for region to use. This method will call
|
||||||
|
* HLog#setSequenceNumber(long) passing the result of the call to
|
||||||
|
* HRegion#getMinSequenceId() to ensure the log id is properly kept
|
||||||
|
* up. HRegionStore does this every time it opens a new region.
|
||||||
|
* @param conf
|
||||||
|
* @param reporter An interface we can report progress against.
|
||||||
|
* @return new HRegion
|
||||||
|
*
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static HRegion openHRegion(final Path tableDir, final HRegionInfo info,
|
||||||
|
final HLog wal, final Configuration conf,
|
||||||
|
final RegionServerServices rsServices,
|
||||||
|
final CancelableProgressable reporter)
|
||||||
|
throws IOException {
|
||||||
|
LOG.info("HRegion.openHRegion Region name ==" + info.getRegionNameAsString());
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Opening region: " + info);
|
||||||
|
}
|
||||||
|
if (info == null) {
|
||||||
|
throw new NullPointerException("Passed region info is null");
|
||||||
|
}
|
||||||
|
Path dir = HTableDescriptor.getTableDir(tableDir,
|
||||||
|
info.getTableName());
|
||||||
|
HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
|
||||||
|
rsServices);
|
||||||
|
return r.openHRegion(reporter);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Open HRegion.
|
* Open HRegion.
|
||||||
* Calls initialize and sets sequenceid.
|
* Calls initialize and sets sequenceid.
|
||||||
|
@ -2844,7 +2932,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkCompressionCodecs() throws IOException {
|
private void checkCompressionCodecs() throws IOException {
|
||||||
for (HColumnDescriptor fam: regionInfo.getTableDesc().getColumnFamilies()) {
|
for (HColumnDescriptor fam: this.htableDescriptor.getColumnFamilies()) {
|
||||||
CompressionTest.testCompression(fam.getCompression());
|
CompressionTest.testCompression(fam.getCompression());
|
||||||
CompressionTest.testCompression(fam.getCompactionCompression());
|
CompressionTest.testCompression(fam.getCompactionCompression());
|
||||||
}
|
}
|
||||||
|
@ -2872,6 +2960,11 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
HConstants.REGIONINFO_QUALIFIER,
|
HConstants.REGIONINFO_QUALIFIER,
|
||||||
EnvironmentEdgeManager.currentTimeMillis(),
|
EnvironmentEdgeManager.currentTimeMillis(),
|
||||||
Writables.getBytes(r.getRegionInfo())));
|
Writables.getBytes(r.getRegionInfo())));
|
||||||
|
edits.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
|
||||||
|
HConstants.META_MIGRATION_QUALIFIER,
|
||||||
|
EnvironmentEdgeManager.currentTimeMillis(),
|
||||||
|
Bytes.toBytes("true")));
|
||||||
|
|
||||||
meta.put(HConstants.CATALOG_FAMILY, edits);
|
meta.put(HConstants.CATALOG_FAMILY, edits);
|
||||||
} finally {
|
} finally {
|
||||||
meta.releaseRowLock(lid);
|
meta.releaseRowLock(lid);
|
||||||
|
@ -2910,7 +3003,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
*/
|
*/
|
||||||
public static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
|
public static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
|
||||||
return new Path(
|
return new Path(
|
||||||
HTableDescriptor.getTableDir(rootdir, info.getTableDesc().getName()),
|
HTableDescriptor.getTableDir(rootdir, info.getTableName()),
|
||||||
info.getEncodedName());
|
info.getEncodedName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2988,8 +3081,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static HRegion merge(HRegion a, HRegion b) throws IOException {
|
public static HRegion merge(HRegion a, HRegion b) throws IOException {
|
||||||
if (!a.getRegionInfo().getTableDesc().getNameAsString().equals(
|
if (!a.getRegionInfo().getTableNameAsString().equals(
|
||||||
b.getRegionInfo().getTableDesc().getNameAsString())) {
|
b.getRegionInfo().getTableNameAsString())) {
|
||||||
throw new IOException("Regions do not belong to the same table");
|
throw new IOException("Regions do not belong to the same table");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3042,7 +3135,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
? b.getEndKey()
|
? b.getEndKey()
|
||||||
: a.getEndKey());
|
: a.getEndKey());
|
||||||
|
|
||||||
HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey);
|
HRegionInfo newRegionInfo =
|
||||||
|
new HRegionInfo(tabledesc.getName(), startKey, endKey);
|
||||||
LOG.info("Creating new region " + newRegionInfo.toString());
|
LOG.info("Creating new region " + newRegionInfo.toString());
|
||||||
String encodedName = newRegionInfo.getEncodedName();
|
String encodedName = newRegionInfo.getEncodedName();
|
||||||
Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName);
|
Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName);
|
||||||
|
@ -3181,7 +3275,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
checkFamily(family);
|
checkFamily(family);
|
||||||
}
|
}
|
||||||
} else { // Adding all families to scanner
|
} else { // Adding all families to scanner
|
||||||
for (byte[] family: regionInfo.getTableDesc().getFamiliesKeys()) {
|
for (byte[] family: this.htableDescriptor.getFamiliesKeys()) {
|
||||||
get.addFamily(family);
|
get.addFamily(family);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3387,8 +3481,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
|
|
||||||
// Actually write to WAL now
|
// Actually write to WAL now
|
||||||
if (writeToWAL) {
|
if (writeToWAL) {
|
||||||
this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
|
this.log.append(regionInfo, this.htableDescriptor.getName(),
|
||||||
walEdits, now);
|
walEdits, now, this.htableDescriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
size = this.addAndGetGlobalMemstoreSize(size);
|
size = this.addAndGetGlobalMemstoreSize(size);
|
||||||
|
@ -3458,8 +3552,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
long now = EnvironmentEdgeManager.currentTimeMillis();
|
long now = EnvironmentEdgeManager.currentTimeMillis();
|
||||||
WALEdit walEdit = new WALEdit();
|
WALEdit walEdit = new WALEdit();
|
||||||
walEdit.add(newKv);
|
walEdit.add(newKv);
|
||||||
this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
|
this.log.append(regionInfo, this.htableDescriptor.getName(),
|
||||||
walEdit, now);
|
walEdit, now, this.htableDescriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now request the ICV to the store, this will set the timestamp
|
// Now request the ICV to the store, this will set the timestamp
|
||||||
|
|
|
@ -1348,6 +1348,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
||||||
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
|
public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
|
||||||
final boolean daughter)
|
final boolean daughter)
|
||||||
throws KeeperException, IOException {
|
throws KeeperException, IOException {
|
||||||
|
LOG.info("HRS.PostOpenDeployTasks");
|
||||||
// Do checks to see if we need to compact (references or too many files)
|
// Do checks to see if we need to compact (references or too many files)
|
||||||
for (Store s : r.getStores().values()) {
|
for (Store s : r.getStores().values()) {
|
||||||
if (s.hasReferences() || s.needsCompaction()) {
|
if (s.hasReferences() || s.needsCompaction()) {
|
||||||
|
@ -1357,24 +1358,36 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
||||||
|
|
||||||
// Add to online regions if all above was successful.
|
// Add to online regions if all above was successful.
|
||||||
addToOnlineRegions(r);
|
addToOnlineRegions(r);
|
||||||
|
LOG.info("addToOnlineRegions is done" + r.getRegionInfo());
|
||||||
// Update ZK, ROOT or META
|
// Update ZK, ROOT or META
|
||||||
if (r.getRegionInfo().isRootRegion()) {
|
if (r.getRegionInfo().isRootRegion()) {
|
||||||
|
|
||||||
|
LOG.info("setRootLocation");
|
||||||
RootLocationEditor.setRootLocation(getZooKeeper(),
|
RootLocationEditor.setRootLocation(getZooKeeper(),
|
||||||
this.serverNameFromMasterPOV);
|
this.serverNameFromMasterPOV);
|
||||||
} else if (r.getRegionInfo().isMetaRegion()) {
|
} else if (r.getRegionInfo().isMetaRegion()) {
|
||||||
|
LOG.info("updateMetaLocation");
|
||||||
|
|
||||||
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
|
MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
|
||||||
this.serverNameFromMasterPOV);
|
this.serverNameFromMasterPOV);
|
||||||
} else {
|
} else {
|
||||||
|
LOG.info("updateMetaLocation 111");
|
||||||
|
|
||||||
if (daughter) {
|
if (daughter) {
|
||||||
|
LOG.info("updateMetaLocation 22");
|
||||||
|
|
||||||
// If daughter of a split, update whole row, not just location.
|
// If daughter of a split, update whole row, not just location.
|
||||||
MetaEditor.addDaughter(ct, r.getRegionInfo(),
|
MetaEditor.addDaughter(ct, r.getRegionInfo(),
|
||||||
this.serverNameFromMasterPOV);
|
this.serverNameFromMasterPOV);
|
||||||
} else {
|
} else {
|
||||||
|
LOG.info("updateMetaLocation 33");
|
||||||
|
|
||||||
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
|
MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
|
||||||
this.serverNameFromMasterPOV);
|
this.serverNameFromMasterPOV);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
LOG.info("END HRS.PostOpenDeployTasks");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -22,10 +22,7 @@ package org.apache.hadoop.hbase.regionserver;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|
||||||
import org.apache.hadoop.hbase.Server;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
|
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||||
|
@ -165,6 +162,12 @@ class LogRoller extends Thread implements WALObserver {
|
||||||
// Not interested.
|
// Not interested.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
|
||||||
|
WALEdit logEdit) {
|
||||||
|
//Not interested
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void logCloseRequested() {
|
public void logCloseRequested() {
|
||||||
// not interested
|
// not interested
|
||||||
|
|
|
@ -170,9 +170,9 @@ public class SplitTransaction {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
long rid = getDaughterRegionIdTimestamp(hri);
|
long rid = getDaughterRegionIdTimestamp(hri);
|
||||||
this.hri_a = new HRegionInfo(hri.getTableDesc(), startKey, this.splitrow,
|
this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow,
|
||||||
false, rid);
|
false, rid);
|
||||||
this.hri_b = new HRegionInfo(hri.getTableDesc(), this.splitrow, endKey,
|
this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey,
|
||||||
false, rid);
|
false, rid);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,11 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.io.HeapSize;
|
import org.apache.hadoop.hbase.io.HeapSize;
|
||||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||||
|
@ -195,9 +191,15 @@ public class Store implements HeapSize {
|
||||||
|
|
||||||
// Check if this is in-memory store
|
// Check if this is in-memory store
|
||||||
this.inMemory = family.isInMemory();
|
this.inMemory = family.isInMemory();
|
||||||
|
long maxFileSize = 0L;
|
||||||
|
HTableDescriptor hTableDescriptor = region.getTableDesc();
|
||||||
|
if (hTableDescriptor != null) {
|
||||||
|
maxFileSize = hTableDescriptor.getMaxFileSize();
|
||||||
|
} else {
|
||||||
|
maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
// By default we split region if a file > HConstants.DEFAULT_MAX_FILE_SIZE.
|
// By default we split region if a file > HConstants.DEFAULT_MAX_FILE_SIZE.
|
||||||
long maxFileSize = info.getTableDesc().getMaxFileSize();
|
|
||||||
if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
|
if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
|
||||||
maxFileSize = conf.getLong("hbase.hregion.max.filesize",
|
maxFileSize = conf.getLong("hbase.hregion.max.filesize",
|
||||||
HConstants.DEFAULT_MAX_FILE_SIZE);
|
HConstants.DEFAULT_MAX_FILE_SIZE);
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.executor.EventHandler;
|
import org.apache.hadoop.hbase.executor.EventHandler;
|
||||||
|
@ -71,9 +72,7 @@ public class OpenRegionHandler extends EventHandler {
|
||||||
public void process() throws IOException {
|
public void process() throws IOException {
|
||||||
try {
|
try {
|
||||||
final String name = regionInfo.getRegionNameAsString();
|
final String name = regionInfo.getRegionNameAsString();
|
||||||
LOG.debug("Processing open of " + name);
|
|
||||||
if (this.server.isStopped() || this.rsServices.isStopping()) {
|
if (this.server.isStopped() || this.rsServices.isStopping()) {
|
||||||
LOG.info("Server stopping or stopped, skipping open of " + name);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
final String encodedName = regionInfo.getEncodedName();
|
final String encodedName = regionInfo.getEncodedName();
|
||||||
|
@ -182,6 +181,7 @@ public class OpenRegionHandler extends EventHandler {
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Was there an exception opening the region? This should trigger on
|
// Was there an exception opening the region? This should trigger on
|
||||||
// InterruptedException too. If so, we failed.
|
// InterruptedException too. If so, we failed.
|
||||||
return !t.interrupted() && t.getException() == null;
|
return !t.interrupted() && t.getException() == null;
|
||||||
|
@ -261,6 +261,33 @@ public class OpenRegionHandler extends EventHandler {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Instance of HRegion if successful open else null.
|
||||||
|
*/
|
||||||
|
HRegion openRegion(Path tableDir) {
|
||||||
|
HRegion region = null;
|
||||||
|
try {
|
||||||
|
// Instantiate the region. This also periodically tickles our zk OPENING
|
||||||
|
// state so master doesn't timeout this region in transition.
|
||||||
|
region = HRegion.openHRegion(tableDir, this.regionInfo, this.rsServices.getWAL(),
|
||||||
|
this.server.getConfiguration(), this.rsServices,
|
||||||
|
new CancelableProgressable() {
|
||||||
|
public boolean progress() {
|
||||||
|
// We may lose the znode ownership during the open. Currently its
|
||||||
|
// too hard interrupting ongoing region open. Just let it complete
|
||||||
|
// and check we still have the znode after region open.
|
||||||
|
return tickleOpening("open_region_progress");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (IOException e) {
|
||||||
|
// We failed open. Let our znode expire in regions-in-transition and
|
||||||
|
// Master will assign elsewhere. Presumes nothing to close.
|
||||||
|
LOG.error("Failed open of region=" +
|
||||||
|
this.regionInfo.getRegionNameAsString(), e);
|
||||||
|
}
|
||||||
|
return region;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Instance of HRegion if successful open else null.
|
* @return Instance of HRegion if successful open else null.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -53,11 +53,7 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathFilter;
|
import org.apache.hadoop.fs.PathFilter;
|
||||||
import org.apache.hadoop.fs.Syncable;
|
import org.apache.hadoop.fs.Syncable;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ClassSize;
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
@ -816,22 +812,6 @@ public class HLog implements Syncable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Append an entry to the log.
|
|
||||||
*
|
|
||||||
* @param regionInfo
|
|
||||||
* @param logEdit
|
|
||||||
* @param now Time of this edit write.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public void append(HRegionInfo regionInfo, WALEdit logEdit,
|
|
||||||
final long now,
|
|
||||||
final boolean isMetaRegion)
|
|
||||||
throws IOException {
|
|
||||||
byte [] regionName = regionInfo.getEncodedNameAsBytes();
|
|
||||||
byte [] tableName = regionInfo.getTableDesc().getName();
|
|
||||||
this.append(regionInfo, makeKey(regionName, tableName, -1, now), logEdit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param now
|
* @param now
|
||||||
* @param regionName
|
* @param regionName
|
||||||
|
@ -851,7 +831,8 @@ public class HLog implements Syncable {
|
||||||
* @param logKey
|
* @param logKey
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit)
|
public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit,
|
||||||
|
HTableDescriptor htd)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (this.closed) {
|
if (this.closed) {
|
||||||
throw new IOException("Cannot append; log is closed");
|
throw new IOException("Cannot append; log is closed");
|
||||||
|
@ -866,14 +847,14 @@ public class HLog implements Syncable {
|
||||||
// is greater than or equal to the value in lastSeqWritten.
|
// is greater than or equal to the value in lastSeqWritten.
|
||||||
this.lastSeqWritten.putIfAbsent(regionInfo.getEncodedNameAsBytes(),
|
this.lastSeqWritten.putIfAbsent(regionInfo.getEncodedNameAsBytes(),
|
||||||
Long.valueOf(seqNum));
|
Long.valueOf(seqNum));
|
||||||
doWrite(regionInfo, logKey, logEdit);
|
doWrite(regionInfo, logKey, logEdit, htd);
|
||||||
this.numEntries.incrementAndGet();
|
this.numEntries.incrementAndGet();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync if catalog region, and if not then check if that table supports
|
// Sync if catalog region, and if not then check if that table supports
|
||||||
// deferred log flushing
|
// deferred log flushing
|
||||||
if (regionInfo.isMetaRegion() ||
|
if (regionInfo.isMetaRegion() ||
|
||||||
!regionInfo.getTableDesc().isDeferredLogFlush()) {
|
!htd.isDeferredLogFlush()) {
|
||||||
// sync txn to file system
|
// sync txn to file system
|
||||||
this.sync();
|
this.sync();
|
||||||
}
|
}
|
||||||
|
@ -903,7 +884,7 @@ public class HLog implements Syncable {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
|
public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
|
||||||
final long now)
|
final long now, HTableDescriptor htd)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (edits.isEmpty()) return;
|
if (edits.isEmpty()) return;
|
||||||
if (this.closed) {
|
if (this.closed) {
|
||||||
|
@ -921,13 +902,13 @@ public class HLog implements Syncable {
|
||||||
byte [] hriKey = info.getEncodedNameAsBytes();
|
byte [] hriKey = info.getEncodedNameAsBytes();
|
||||||
this.lastSeqWritten.putIfAbsent(hriKey, seqNum);
|
this.lastSeqWritten.putIfAbsent(hriKey, seqNum);
|
||||||
HLogKey logKey = makeKey(hriKey, tableName, seqNum, now);
|
HLogKey logKey = makeKey(hriKey, tableName, seqNum, now);
|
||||||
doWrite(info, logKey, edits);
|
doWrite(info, logKey, edits, htd);
|
||||||
this.numEntries.incrementAndGet();
|
this.numEntries.incrementAndGet();
|
||||||
}
|
}
|
||||||
// Sync if catalog region, and if not then check if that table supports
|
// Sync if catalog region, and if not then check if that table supports
|
||||||
// deferred log flushing
|
// deferred log flushing
|
||||||
if (info.isMetaRegion() ||
|
if (info.isMetaRegion() ||
|
||||||
!info.getTableDesc().isDeferredLogFlush()) {
|
!htd.isDeferredLogFlush()) {
|
||||||
// sync txn to file system
|
// sync txn to file system
|
||||||
this.sync();
|
this.sync();
|
||||||
}
|
}
|
||||||
|
@ -1054,14 +1035,15 @@ public class HLog implements Syncable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit)
|
protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit,
|
||||||
|
HTableDescriptor htd)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!this.enabled) {
|
if (!this.enabled) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!this.listeners.isEmpty()) {
|
if (!this.listeners.isEmpty()) {
|
||||||
for (WALObserver i: this.listeners) {
|
for (WALObserver i: this.listeners) {
|
||||||
i.visitLogEntryBeforeWrite(info, logKey, logEdit);
|
i.visitLogEntryBeforeWrite(htd, logKey, logEdit);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
|
@ -1092,6 +1074,7 @@ public class HLog implements Syncable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** @return How many items have been added to the log */
|
/** @return How many items have been added to the log */
|
||||||
int getNumEntries() {
|
int getNumEntries() {
|
||||||
return numEntries.get();
|
return numEntries.get();
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver.wal;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get notification of {@link HLog}/WAL log events. The invocations are inline
|
* Get notification of {@link HLog}/WAL log events. The invocations are inline
|
||||||
|
@ -51,4 +52,14 @@ public interface WALObserver {
|
||||||
*/
|
*/
|
||||||
public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
|
public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
|
||||||
WALEdit logEdit);
|
WALEdit logEdit);
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param htd
|
||||||
|
* @param logKey
|
||||||
|
* @param logEdit
|
||||||
|
*/
|
||||||
|
public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
|
||||||
|
WALEdit logEdit);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||||
|
@ -132,12 +133,18 @@ public class Replication implements WALObserver {
|
||||||
@Override
|
@Override
|
||||||
public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
|
public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
|
||||||
WALEdit logEdit) {
|
WALEdit logEdit) {
|
||||||
|
// Not interested
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
|
||||||
|
WALEdit logEdit) {
|
||||||
NavigableMap<byte[], Integer> scopes =
|
NavigableMap<byte[], Integer> scopes =
|
||||||
new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
|
new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
|
||||||
byte[] family;
|
byte[] family;
|
||||||
for (KeyValue kv : logEdit.getKeyValues()) {
|
for (KeyValue kv : logEdit.getKeyValues()) {
|
||||||
family = kv.getFamily();
|
family = kv.getFamily();
|
||||||
int scope = info.getTableDesc().getFamily(family).getScope();
|
int scope = htd.getFamily(family).getScope();
|
||||||
if (scope != REPLICATION_SCOPE_LOCAL &&
|
if (scope != REPLICATION_SCOPE_LOCAL &&
|
||||||
!scopes.containsKey(family)) {
|
!scopes.containsKey(family)) {
|
||||||
scopes.put(family, scope);
|
scopes.put(family, scope);
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathFilter;
|
import org.apache.hadoop.fs.PathFilter;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
@ -830,4 +831,209 @@ public class FSUtils {
|
||||||
}
|
}
|
||||||
LOG.info("Finished lease recover attempt for " + p);
|
LOG.info("Finished lease recover attempt for " + p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public static Map<String, HTableDescriptor> getTableDescriptors(
|
||||||
|
final Configuration config)
|
||||||
|
throws IOException {
|
||||||
|
Path path = getRootDir(config);
|
||||||
|
// since HMaster.getFileSystem() is package private
|
||||||
|
FileSystem fs = path.getFileSystem(config);
|
||||||
|
return getTableDescriptors(fs, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Map<String, HTableDescriptor> getTableDescriptors(
|
||||||
|
final FileSystem fs, final Path hbaseRootDir)
|
||||||
|
throws IOException {
|
||||||
|
Map<String, HTableDescriptor> desc =
|
||||||
|
new HashMap<String, HTableDescriptor>();
|
||||||
|
DirFilter df = new DirFilter(fs);
|
||||||
|
// presumes any directory under hbase.rootdir is a table
|
||||||
|
FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
|
||||||
|
for (FileStatus tableDir : tableDirs) {
|
||||||
|
Path d = tableDir.getPath();
|
||||||
|
String tableName = d.getName();
|
||||||
|
|
||||||
|
if (tableName.equals(HConstants.HREGION_LOGDIR_NAME)
|
||||||
|
|| tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))
|
||||||
|
|| tableName.equals(Bytes.toString(HConstants.META_TABLE_NAME))
|
||||||
|
|| tableName.equals(HConstants.HREGION_OLDLOGDIR_NAME)
|
||||||
|
) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
LOG.info("Adding tabledescriptor for table = " + tableName);
|
||||||
|
HTableDescriptor htd = readTableDescriptor(fs, hbaseRootDir,
|
||||||
|
tableName);
|
||||||
|
if (htd != null) {
|
||||||
|
if (!desc.containsKey(tableName)) {
|
||||||
|
desc.put(tableName, htd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return desc;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Path getTableInfoPath(Path hbaseRootDir, String tableName) {
|
||||||
|
Path tablePath = new Path(hbaseRootDir, tableName);
|
||||||
|
return new Path(tablePath, HConstants.TABLEINFO_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get table info path for a table.
|
||||||
|
* @param tableName
|
||||||
|
* @return Table info path
|
||||||
|
*/
|
||||||
|
private static Path getTableInfoPath(byte[] tableName, Configuration conf) throws IOException {
|
||||||
|
Path tablePath = new Path(getRootDir(conf), Bytes.toString(tableName));
|
||||||
|
Path tableInfoPath = new Path(tablePath, HConstants.TABLEINFO_NAME);
|
||||||
|
return tableInfoPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Path getTablePath(byte[] tableName, Configuration conf) throws IOException {
|
||||||
|
return new Path(getRootDir(conf), Bytes.toString(tableName));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static FileSystem getCurrentFileSystem(Configuration conf) throws IOException {
|
||||||
|
return getRootDir(conf).getFileSystem(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get HTableDescriptor
|
||||||
|
* @param config
|
||||||
|
* @param tableName
|
||||||
|
* @return HTableDescriptor for table
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static HTableDescriptor getHTableDescriptor(Configuration config,
|
||||||
|
String tableName)
|
||||||
|
throws IOException {
|
||||||
|
Path path = getRootDir(config);
|
||||||
|
FileSystem fs = path.getFileSystem(config);
|
||||||
|
return readTableDescriptor(fs, path, tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static HTableDescriptor readTableDescriptor(FileSystem fs,
|
||||||
|
Path hbaseRootDir,
|
||||||
|
String tableName) {
|
||||||
|
try {
|
||||||
|
FSDataInputStream fsDataInputStream =
|
||||||
|
fs.open(getTableInfoPath(hbaseRootDir, tableName));
|
||||||
|
HTableDescriptor hTableDescriptor = new HTableDescriptor();
|
||||||
|
hTableDescriptor.readFields(fsDataInputStream);
|
||||||
|
fsDataInputStream.close();
|
||||||
|
return hTableDescriptor;
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.info("Exception during readTableDecriptor. Current table name = " + tableName , ioe);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static HTableDescriptor getTableDescriptor(Path tableDir, FileSystem fs) {
|
||||||
|
try {
|
||||||
|
LOG.info("Reading table descriptor from .tableinfo. current path = "
|
||||||
|
+ tableDir);
|
||||||
|
if (tableDir == null) {
|
||||||
|
LOG.info("Reading table descriptor from .tableinfo current tablename is NULL ");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
FSDataInputStream fsDataInputStream =
|
||||||
|
fs.open(new Path(tableDir, HConstants.TABLEINFO_NAME));
|
||||||
|
HTableDescriptor hTableDescriptor = new HTableDescriptor();
|
||||||
|
hTableDescriptor.readFields(fsDataInputStream);
|
||||||
|
LOG.info("Current tabledescriptor from .tableinfo is " + hTableDescriptor.toString());
|
||||||
|
fsDataInputStream.close();
|
||||||
|
return hTableDescriptor;
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.info("Exception during getTableDescriptor ", ioe);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create new HTableDescriptor in HDFS.
|
||||||
|
* @param htableDescriptor
|
||||||
|
*/
|
||||||
|
public static void createTableDescriptor(HTableDescriptor htableDescriptor,
|
||||||
|
Configuration conf) {
|
||||||
|
try {
|
||||||
|
Path tableDir = getTablePath(htableDescriptor.getName(), conf);
|
||||||
|
FileSystem fs = getCurrentFileSystem(conf);
|
||||||
|
createTableDescriptor(fs, htableDescriptor, tableDir);
|
||||||
|
} catch(IOException ioe) {
|
||||||
|
LOG.info("IOException while trying to create tableInfo in HDFS", ioe);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void createTableDescriptor(FileSystem fs,
|
||||||
|
HTableDescriptor htableDescriptor,
|
||||||
|
Path tableDir) {
|
||||||
|
try {
|
||||||
|
Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
|
||||||
|
LOG.info("Current tableInfoPath = " + tableInfoPath
|
||||||
|
+ " tableDir = " + tableDir) ;
|
||||||
|
if (fs.exists(tableInfoPath) &&
|
||||||
|
fs.getFileStatus(tableInfoPath).getLen() > 0) {
|
||||||
|
LOG.info("TableInfo already exists.. Skipping creation");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
writeTableDescriptor(fs, htableDescriptor, tableDir);
|
||||||
|
} catch(IOException ioe) {
|
||||||
|
LOG.info("IOException while trying to create tableInfo in HDFS", ioe);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void writeTableDescriptor(FileSystem fs,
|
||||||
|
HTableDescriptor hTableDescriptor,
|
||||||
|
Path tableDir) throws IOException {
|
||||||
|
// Create in tmpdir and then move into place in case we crash after
|
||||||
|
// create but before close. If we don't successfully close the file,
|
||||||
|
// subsequent region reopens will fail the below because create is
|
||||||
|
// registered in NN.
|
||||||
|
Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
|
||||||
|
Path tmpPath = new Path(new Path(tableDir,".tmp"),
|
||||||
|
HConstants.TABLEINFO_NAME);
|
||||||
|
LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath);
|
||||||
|
FSDataOutputStream out = fs.create(tmpPath, true);
|
||||||
|
try {
|
||||||
|
hTableDescriptor.write(out);
|
||||||
|
out.write('\n');
|
||||||
|
out.write('\n');
|
||||||
|
out.write(Bytes.toBytes(hTableDescriptor.toString()));
|
||||||
|
} finally {
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
|
if (!fs.rename(tmpPath, tableInfoPath)) {
|
||||||
|
throw new IOException("Unable to rename " + tmpPath + " to " +
|
||||||
|
tableInfoPath);
|
||||||
|
} else {
|
||||||
|
LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public static void updateHTableDescriptor(FileSystem fs,
|
||||||
|
Configuration conf,
|
||||||
|
HTableDescriptor hTableDescriptor) throws IOException
|
||||||
|
{
|
||||||
|
Path tableInfoPath = getTableInfoPath(hTableDescriptor.getName(), conf);
|
||||||
|
FSDataOutputStream out = fs.create(tableInfoPath, true);
|
||||||
|
try {
|
||||||
|
hTableDescriptor.write(out);
|
||||||
|
out.write('\n');
|
||||||
|
out.write('\n');
|
||||||
|
out.write(Bytes.toBytes(hTableDescriptor.toString()));
|
||||||
|
LOG.info("updateHTableDescriptor. Updated tableinfo in HDFS under "
|
||||||
|
+ tableInfoPath + " For HTD => "
|
||||||
|
+ hTableDescriptor.toString());
|
||||||
|
} finally {
|
||||||
|
out.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Path getTmpDir(HTableDescriptor htableDescriptor, Configuration configuration)
|
||||||
|
throws IOException {
|
||||||
|
return new Path(getTablePath(htableDescriptor.getName(), configuration), ".tmp");
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -156,11 +156,13 @@ public class HBaseFsck {
|
||||||
// get a list of all tables that have not changed recently.
|
// get a list of all tables that have not changed recently.
|
||||||
AtomicInteger numSkipped = new AtomicInteger(0);
|
AtomicInteger numSkipped = new AtomicInteger(0);
|
||||||
HTableDescriptor[] allTables = getTables(numSkipped);
|
HTableDescriptor[] allTables = getTables(numSkipped);
|
||||||
errors.print("Number of Tables: " + allTables.length);
|
errors.print("Number of Tables: " +
|
||||||
|
(allTables == null ? 0 : allTables.length));
|
||||||
if (details) {
|
if (details) {
|
||||||
if (numSkipped.get() > 0) {
|
if (numSkipped.get() > 0) {
|
||||||
errors.detail("Number of Tables in flux: " + numSkipped.get());
|
errors.detail("Number of Tables in flux: " + numSkipped.get());
|
||||||
}
|
}
|
||||||
|
if (allTables != null && allTables.length > 0) {
|
||||||
for (HTableDescriptor td : allTables) {
|
for (HTableDescriptor td : allTables) {
|
||||||
String tableName = td.getNameAsString();
|
String tableName = td.getNameAsString();
|
||||||
errors.detail(" Table: " + tableName + "\t" +
|
errors.detail(" Table: " + tableName + "\t" +
|
||||||
|
@ -169,6 +171,9 @@ public class HBaseFsck {
|
||||||
(td.isMetaRegion() ? "META" : " ")) + "\t" +
|
(td.isMetaRegion() ? "META" : " ")) + "\t" +
|
||||||
" families: " + td.getFamilies().size());
|
" families: " + td.getFamilies().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// From the master, get a list of all known live region servers
|
// From the master, get a list of all known live region servers
|
||||||
|
@ -255,7 +260,7 @@ public class HBaseFsck {
|
||||||
* @throws KeeperException
|
* @throws KeeperException
|
||||||
*/
|
*/
|
||||||
private boolean isTableDisabled(HRegionInfo regionInfo) {
|
private boolean isTableDisabled(HRegionInfo regionInfo) {
|
||||||
return disabledTables.contains(regionInfo.getTableDesc().getName());
|
return disabledTables.contains(regionInfo.getTableName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -521,7 +526,7 @@ public class HBaseFsck {
|
||||||
if (hbi.deployedOn.size() == 0) continue;
|
if (hbi.deployedOn.size() == 0) continue;
|
||||||
|
|
||||||
// We should be safe here
|
// We should be safe here
|
||||||
String tableName = hbi.metaEntry.getTableDesc().getNameAsString();
|
String tableName = hbi.metaEntry.getTableNameAsString();
|
||||||
TInfo modTInfo = tablesInfo.get(tableName);
|
TInfo modTInfo = tablesInfo.get(tableName);
|
||||||
if (modTInfo == null) {
|
if (modTInfo == null) {
|
||||||
modTInfo = new TInfo(tableName);
|
modTInfo = new TInfo(tableName);
|
||||||
|
@ -653,7 +658,7 @@ public class HBaseFsck {
|
||||||
* @throws IOException if an error is encountered
|
* @throws IOException if an error is encountered
|
||||||
*/
|
*/
|
||||||
HTableDescriptor[] getTables(AtomicInteger numSkipped) {
|
HTableDescriptor[] getTables(AtomicInteger numSkipped) {
|
||||||
TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
|
List<String> tableNames = new ArrayList<String>();
|
||||||
long now = System.currentTimeMillis();
|
long now = System.currentTimeMillis();
|
||||||
|
|
||||||
for (HbckInfo hbi : regionInfo.values()) {
|
for (HbckInfo hbi : regionInfo.values()) {
|
||||||
|
@ -663,15 +668,27 @@ public class HBaseFsck {
|
||||||
// pick only those tables that were not modified in the last few milliseconds.
|
// pick only those tables that were not modified in the last few milliseconds.
|
||||||
if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) {
|
if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) {
|
||||||
if (info.modTime + timelag < now) {
|
if (info.modTime + timelag < now) {
|
||||||
uniqueTables.add(info.getTableDesc());
|
tableNames.add(info.getTableNameAsString());
|
||||||
} else {
|
} else {
|
||||||
numSkipped.incrementAndGet(); // one more in-flux table
|
numSkipped.incrementAndGet(); // one more in-flux table
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
|
return getHTableDescriptors(tableNames);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HTableDescriptor[] getHTableDescriptors(List<String> tableNames) {
|
||||||
|
HTableDescriptor[] htd = null;
|
||||||
|
try {
|
||||||
|
LOG.info("getHTableDescriptors == tableNames => " + tableNames);
|
||||||
|
htd = new HBaseAdmin(conf).getTableDescriptors(tableNames);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.debug("Exception getting table descriptors", e);
|
||||||
|
}
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the entry in regionInfo corresponding to the the given encoded
|
* Gets the entry in regionInfo corresponding to the the given encoded
|
||||||
* region name. If the region has not been seen yet, a new entry is added
|
* region name. If the region has not been seen yet, a new entry is added
|
||||||
|
|
|
@ -261,7 +261,7 @@ class HMerge {
|
||||||
Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
|
Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
|
||||||
}
|
}
|
||||||
HRegionInfo region = Writables.getHRegionInfo(regionInfoValue);
|
HRegionInfo region = Writables.getHRegionInfo(regionInfoValue);
|
||||||
if (!Bytes.equals(region.getTableDesc().getName(), this.tableName)) {
|
if (!Bytes.equals(region.getTableName(), this.tableName)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return region;
|
return region;
|
||||||
|
|
|
@ -321,78 +321,6 @@ public class MetaUtils {
|
||||||
t.delete(delete);
|
t.delete(delete);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Offline version of the online TableOperation,
|
|
||||||
* org.apache.hadoop.hbase.master.AddColumn.
|
|
||||||
* @param tableName table name
|
|
||||||
* @param hcd Add this column to <code>tableName</code>
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
public void addColumn(final byte [] tableName,
|
|
||||||
final HColumnDescriptor hcd)
|
|
||||||
throws IOException {
|
|
||||||
List<HRegionInfo> metas = getMETARows(tableName);
|
|
||||||
for (HRegionInfo hri: metas) {
|
|
||||||
final HRegion m = getMetaRegion(hri);
|
|
||||||
scanMetaRegion(m, new ScannerListener() {
|
|
||||||
private boolean inTable = true;
|
|
||||||
|
|
||||||
@SuppressWarnings("synthetic-access")
|
|
||||||
public boolean processRow(HRegionInfo info) throws IOException {
|
|
||||||
LOG.debug("Testing " + Bytes.toString(tableName) + " against " +
|
|
||||||
Bytes.toString(info.getTableDesc().getName()));
|
|
||||||
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
|
|
||||||
this.inTable = false;
|
|
||||||
info.getTableDesc().addFamily(hcd);
|
|
||||||
updateMETARegionInfo(m, info);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
// If we got here and we have not yet encountered the table yet,
|
|
||||||
// inTable will be false. Otherwise, we've passed out the table.
|
|
||||||
// Stop the scanner.
|
|
||||||
return this.inTable;
|
|
||||||
}});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Offline version of the online TableOperation,
|
|
||||||
* org.apache.hadoop.hbase.master.DeleteColumn.
|
|
||||||
* @param tableName table name
|
|
||||||
* @param columnFamily Name of column name to remove.
|
|
||||||
* @throws IOException e
|
|
||||||
*/
|
|
||||||
public void deleteColumn(final byte [] tableName,
|
|
||||||
final byte [] columnFamily) throws IOException {
|
|
||||||
List<HRegionInfo> metas = getMETARows(tableName);
|
|
||||||
for (HRegionInfo hri: metas) {
|
|
||||||
final HRegion m = getMetaRegion(hri);
|
|
||||||
scanMetaRegion(m, new ScannerListener() {
|
|
||||||
private boolean inTable = true;
|
|
||||||
|
|
||||||
@SuppressWarnings("synthetic-access")
|
|
||||||
public boolean processRow(HRegionInfo info) throws IOException {
|
|
||||||
if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
|
|
||||||
this.inTable = false;
|
|
||||||
info.getTableDesc().removeFamily(columnFamily);
|
|
||||||
updateMETARegionInfo(m, info);
|
|
||||||
Path tabledir = new Path(rootdir,
|
|
||||||
info.getTableDesc().getNameAsString());
|
|
||||||
Path p = Store.getStoreHomedir(tabledir, info.getEncodedName(),
|
|
||||||
columnFamily);
|
|
||||||
if (!fs.delete(p, true)) {
|
|
||||||
LOG.warn("Failed delete of " + p);
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// If we got here and we have not yet encountered the table yet,
|
|
||||||
// inTable will be false. Otherwise, we've passed out the table.
|
|
||||||
// Stop the scanner.
|
|
||||||
return this.inTable;
|
|
||||||
}});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update COL_REGIONINFO in meta region r with HRegionInfo hri
|
* Update COL_REGIONINFO in meta region r with HRegionInfo hri
|
||||||
*
|
*
|
||||||
|
@ -466,7 +394,7 @@ public class MetaUtils {
|
||||||
|
|
||||||
public boolean processRow(HRegionInfo info) throws IOException {
|
public boolean processRow(HRegionInfo info) throws IOException {
|
||||||
SL_LOG.debug("Testing " + info);
|
SL_LOG.debug("Testing " + info);
|
||||||
if (Bytes.equals(info.getTableDesc().getName(),
|
if (Bytes.equals(info.getTableName(),
|
||||||
HConstants.META_TABLE_NAME)) {
|
HConstants.META_TABLE_NAME)) {
|
||||||
result.add(info);
|
result.add(info);
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -560,8 +560,9 @@ public class RegionSplitter {
|
||||||
if (sk.length == 0)
|
if (sk.length == 0)
|
||||||
sk = splitAlgo.firstRow();
|
sk = splitAlgo.firstRow();
|
||||||
String startKey = splitAlgo.rowToStr(sk);
|
String startKey = splitAlgo.rowToStr(sk);
|
||||||
|
HTableDescriptor htd = table.getTableDescriptor();
|
||||||
// check every Column Family for that region
|
// check every Column Family for that region
|
||||||
for (HColumnDescriptor c : hri.getTableDesc().getFamilies()) {
|
for (HColumnDescriptor c : htd.getFamilies()) {
|
||||||
Path cfDir = Store.getStoreHomedir(tableDir, hri.getEncodedName(),
|
Path cfDir = Store.getStoreHomedir(tableDir, hri.getEncodedName(),
|
||||||
c.getName());
|
c.getName());
|
||||||
if (fs.exists(cfDir)) {
|
if (fs.exists(cfDir)) {
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
|
||||||
import org.apache.hadoop.io.DataInputBuffer;
|
import org.apache.hadoop.io.DataInputBuffer;
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.Writable;
|
||||||
|
|
||||||
|
@ -62,7 +63,7 @@ public class Writables {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Put a bunch of Writables as bytes all into the one byte array.
|
* Put a bunch of Writables as bytes all into the one byte array.
|
||||||
* @param w writable
|
* @param ws writable
|
||||||
* @return The bytes of <code>w</code> gotten by running its
|
* @return The bytes of <code>w</code> gotten by running its
|
||||||
* {@link Writable#write(java.io.DataOutput)} method.
|
* {@link Writable#write(java.io.DataOutput)} method.
|
||||||
* @throws IOException e
|
* @throws IOException e
|
||||||
|
@ -215,4 +216,16 @@ public class Writables {
|
||||||
}
|
}
|
||||||
return tgt;
|
return tgt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get HREgionInfoForMigration serialized from bytes.
|
||||||
|
* @param bytes serialized bytes
|
||||||
|
* @return HRegionInfoForMigration
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static HRegionInfo090x getHRegionInfoForMigration(final byte [] bytes)
|
||||||
|
throws IOException {
|
||||||
|
return (HRegionInfo090x)getWritable(bytes, new HRegionInfo090x());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -57,9 +57,15 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
/** configuration parameter name for test directory */
|
/** configuration parameter name for test directory */
|
||||||
public static final String TEST_DIRECTORY_KEY = "test.build.data";
|
public static final String TEST_DIRECTORY_KEY = "test.build.data";
|
||||||
|
|
||||||
|
/*
|
||||||
protected final static byte [] fam1 = Bytes.toBytes("colfamily1");
|
protected final static byte [] fam1 = Bytes.toBytes("colfamily1");
|
||||||
protected final static byte [] fam2 = Bytes.toBytes("colfamily2");
|
protected final static byte [] fam2 = Bytes.toBytes("colfamily2");
|
||||||
protected final static byte [] fam3 = Bytes.toBytes("colfamily3");
|
protected final static byte [] fam3 = Bytes.toBytes("colfamily3");
|
||||||
|
*/
|
||||||
|
protected final static byte [] fam1 = Bytes.toBytes("colfamily11");
|
||||||
|
protected final static byte [] fam2 = Bytes.toBytes("colfamily21");
|
||||||
|
protected final static byte [] fam3 = Bytes.toBytes("colfamily31");
|
||||||
|
|
||||||
protected static final byte [][] COLUMNS = {fam1, fam2, fam3};
|
protected static final byte [][] COLUMNS = {fam1, fam2, fam3};
|
||||||
|
|
||||||
private boolean localfs = false;
|
private boolean localfs = false;
|
||||||
|
@ -159,9 +165,8 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
Path rootdir = filesystem.makeQualified(
|
Path rootdir = filesystem.makeQualified(
|
||||||
new Path(conf.get(HConstants.HBASE_DIR)));
|
new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
filesystem.mkdirs(rootdir);
|
filesystem.mkdirs(rootdir);
|
||||||
|
HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
|
||||||
return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey),
|
return HRegion.createHRegion(hri, rootdir, conf, desc);
|
||||||
rootdir, conf);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected HRegion openClosedRegion(final HRegion closedRegion)
|
protected HRegion openClosedRegion(final HRegion closedRegion)
|
||||||
|
@ -653,9 +658,10 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void createRootAndMetaRegions() throws IOException {
|
protected void createRootAndMetaRegions() throws IOException {
|
||||||
root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf);
|
root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir,
|
||||||
|
conf, HTableDescriptor.ROOT_TABLEDESC);
|
||||||
meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
|
meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
|
||||||
conf);
|
conf, HTableDescriptor.META_TABLEDESC);
|
||||||
HRegion.addRegionToMETA(root, meta);
|
HRegion.addRegionToMETA(root, meta);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
|
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
|
@ -830,7 +831,7 @@ public class HBaseTestingUtility {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (int i = 0; i < startKeys.length; i++) {
|
for (int i = 0; i < startKeys.length; i++) {
|
||||||
int j = (i + 1) % startKeys.length;
|
int j = (i + 1) % startKeys.length;
|
||||||
HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(),
|
HRegionInfo hri = new HRegionInfo(table.getTableName(),
|
||||||
startKeys[i], startKeys[j]);
|
startKeys[i], startKeys[j]);
|
||||||
Put put = new Put(hri.getRegionName());
|
Put put = new Put(hri.getRegionName());
|
||||||
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||||
|
@ -858,6 +859,65 @@ public class HBaseTestingUtility {
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public int createMultiRegionsWithLegacyHRI(final Configuration c,
|
||||||
|
final HTableDescriptor htd,
|
||||||
|
final byte[] columnFamily, byte [][] startKeys)
|
||||||
|
throws IOException {
|
||||||
|
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
|
||||||
|
HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
|
||||||
|
if(!htd.hasFamily(columnFamily)) {
|
||||||
|
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
|
||||||
|
htd.addFamily(hcd);
|
||||||
|
}
|
||||||
|
List<HRegionInfo090x> newRegions
|
||||||
|
= new ArrayList<HRegionInfo090x>(startKeys.length);
|
||||||
|
int count = 0;
|
||||||
|
for (int i = 0; i < startKeys.length; i++) {
|
||||||
|
int j = (i + 1) % startKeys.length;
|
||||||
|
HRegionInfo090x hri = new HRegionInfo090x(htd,
|
||||||
|
startKeys[i], startKeys[j]);
|
||||||
|
Put put = new Put(hri.getRegionName());
|
||||||
|
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||||
|
Writables.getBytes(hri));
|
||||||
|
meta.put(put);
|
||||||
|
LOG.info("createMultiRegions: PUT inserted " + hri.toString());
|
||||||
|
|
||||||
|
newRegions.add(hri);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public int createMultiRegionsWithNewHRI(final Configuration c, final HTableDescriptor htd,
|
||||||
|
final byte[] columnFamily, byte [][] startKeys)
|
||||||
|
throws IOException {
|
||||||
|
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
|
||||||
|
HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
|
||||||
|
if(!htd.hasFamily(columnFamily)) {
|
||||||
|
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
|
||||||
|
htd.addFamily(hcd);
|
||||||
|
}
|
||||||
|
List<HRegionInfo> newRegions
|
||||||
|
= new ArrayList<HRegionInfo>(startKeys.length);
|
||||||
|
int count = 0;
|
||||||
|
for (int i = 0; i < startKeys.length; i++) {
|
||||||
|
int j = (i + 1) % startKeys.length;
|
||||||
|
HRegionInfo hri = new HRegionInfo(htd.getName(),
|
||||||
|
startKeys[i], startKeys[j]);
|
||||||
|
Put put = new Put(hri.getRegionName());
|
||||||
|
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||||
|
Writables.getBytes(hri));
|
||||||
|
meta.put(put);
|
||||||
|
LOG.info("createMultiRegions: PUT inserted " + hri.toString());
|
||||||
|
|
||||||
|
newRegions.add(hri);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create rows in META for regions of the specified table with the specified
|
* Create rows in META for regions of the specified table with the specified
|
||||||
* start keys. The first startKey should be a 0 length byte array if you
|
* start keys. The first startKey should be a 0 length byte array if you
|
||||||
|
@ -878,7 +938,8 @@ public class HBaseTestingUtility {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (int i = 0; i < startKeys.length; i++) {
|
for (int i = 0; i < startKeys.length; i++) {
|
||||||
int j = (i + 1) % startKeys.length;
|
int j = (i + 1) % startKeys.length;
|
||||||
HRegionInfo hri = new HRegionInfo(htd, startKeys[i], startKeys[j]);
|
HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
|
||||||
|
startKeys[j]);
|
||||||
Put put = new Put(hri.getRegionName());
|
Put put = new Put(hri.getRegionName());
|
||||||
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||||
Writables.getBytes(hri));
|
Writables.getBytes(hri));
|
||||||
|
@ -922,8 +983,7 @@ public class HBaseTestingUtility {
|
||||||
for (Result result : s) {
|
for (Result result : s) {
|
||||||
HRegionInfo info = Writables.getHRegionInfo(
|
HRegionInfo info = Writables.getHRegionInfo(
|
||||||
result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
|
result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
|
||||||
HTableDescriptor desc = info.getTableDesc();
|
if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
|
||||||
if (Bytes.compareTo(desc.getName(), tableName) == 0) {
|
|
||||||
LOG.info("getMetaTableRows: row -> " +
|
LOG.info("getMetaTableRows: row -> " +
|
||||||
Bytes.toStringBinary(result.getRow()));
|
Bytes.toStringBinary(result.getRow()));
|
||||||
rows.add(result.getRow());
|
rows.add(result.getRow());
|
||||||
|
|
|
@ -31,25 +31,25 @@ public class TestCompare extends TestCase {
|
||||||
* Sort of HRegionInfo.
|
* Sort of HRegionInfo.
|
||||||
*/
|
*/
|
||||||
public void testHRegionInfo() {
|
public void testHRegionInfo() {
|
||||||
HRegionInfo a = new HRegionInfo(new HTableDescriptor("a"), null, null);
|
HRegionInfo a = new HRegionInfo(Bytes.toBytes("a"), null, null);
|
||||||
HRegionInfo b = new HRegionInfo(new HTableDescriptor("b"), null, null);
|
HRegionInfo b = new HRegionInfo(Bytes.toBytes("b"), null, null);
|
||||||
assertTrue(a.compareTo(b) != 0);
|
assertTrue(a.compareTo(b) != 0);
|
||||||
HTableDescriptor t = new HTableDescriptor("t");
|
HTableDescriptor t = new HTableDescriptor("t");
|
||||||
byte [] midway = Bytes.toBytes("midway");
|
byte [] midway = Bytes.toBytes("midway");
|
||||||
a = new HRegionInfo(t, null, midway);
|
a = new HRegionInfo(t.getName(), null, midway);
|
||||||
b = new HRegionInfo(t, midway, null);
|
b = new HRegionInfo(t.getName(), midway, null);
|
||||||
assertTrue(a.compareTo(b) < 0);
|
assertTrue(a.compareTo(b) < 0);
|
||||||
assertTrue(b.compareTo(a) > 0);
|
assertTrue(b.compareTo(a) > 0);
|
||||||
assertEquals(a, a);
|
assertEquals(a, a);
|
||||||
assertTrue(a.compareTo(a) == 0);
|
assertTrue(a.compareTo(a) == 0);
|
||||||
a = new HRegionInfo(t, Bytes.toBytes("a"), Bytes.toBytes("d"));
|
a = new HRegionInfo(t.getName(), Bytes.toBytes("a"), Bytes.toBytes("d"));
|
||||||
b = new HRegionInfo(t, Bytes.toBytes("e"), Bytes.toBytes("g"));
|
b = new HRegionInfo(t.getName(), Bytes.toBytes("e"), Bytes.toBytes("g"));
|
||||||
assertTrue(a.compareTo(b) < 0);
|
assertTrue(a.compareTo(b) < 0);
|
||||||
a = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
|
a = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
|
||||||
b = new HRegionInfo(t, Bytes.toBytes("e"), Bytes.toBytes("g"));
|
b = new HRegionInfo(t.getName(), Bytes.toBytes("e"), Bytes.toBytes("g"));
|
||||||
assertTrue(a.compareTo(b) < 0);
|
assertTrue(a.compareTo(b) < 0);
|
||||||
a = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
|
a = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
|
||||||
b = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("eeee"));
|
b = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("eeee"));
|
||||||
assertTrue(a.compareTo(b) < 0);
|
assertTrue(a.compareTo(b) < 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -57,10 +57,10 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase {
|
||||||
this.desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
this.desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
|
||||||
|
|
||||||
// Region 0 will contain the key range [,row_0500)
|
// Region 0 will contain the key range [,row_0500)
|
||||||
INFOS[0] = new HRegionInfo(this.desc, HConstants.EMPTY_START_ROW,
|
INFOS[0] = new HRegionInfo(desc.getName(), HConstants.EMPTY_START_ROW,
|
||||||
Bytes.toBytes("row_0500"));
|
Bytes.toBytes("row_0500"));
|
||||||
// Region 1 will contain the key range [row_0500,)
|
// Region 1 will contain the key range [row_0500,)
|
||||||
INFOS[1] = new HRegionInfo(this.desc, Bytes.toBytes("row_0500"),
|
INFOS[1] = new HRegionInfo(desc.getName(), Bytes.toBytes("row_0500"),
|
||||||
HConstants.EMPTY_END_ROW);
|
HConstants.EMPTY_END_ROW);
|
||||||
|
|
||||||
// Create root and meta regions
|
// Create root and meta regions
|
||||||
|
@ -68,7 +68,8 @@ public class TestScanMultipleVersions extends HBaseClusterTestCase {
|
||||||
// Create the regions
|
// Create the regions
|
||||||
for (int i = 0; i < REGIONS.length; i++) {
|
for (int i = 0; i < REGIONS.length; i++) {
|
||||||
REGIONS[i] =
|
REGIONS[i] =
|
||||||
HRegion.createHRegion(this.INFOS[i], this.testDir, this.conf);
|
HRegion.createHRegion(this.INFOS[i], this.testDir, this.conf,
|
||||||
|
this.desc);
|
||||||
// Insert data
|
// Insert data
|
||||||
for (int j = 0; j < TIMESTAMPS.length; j++) {
|
for (int j = 0; j < TIMESTAMPS.length; j++) {
|
||||||
Put put = new Put(ROWS[i], TIMESTAMPS[j], null);
|
Put put = new Put(ROWS[i], TIMESTAMPS[j], null);
|
||||||
|
|
|
@ -103,8 +103,8 @@ public class TestSerialization {
|
||||||
HRegionInfo deserializedHri =
|
HRegionInfo deserializedHri =
|
||||||
(HRegionInfo)Writables.getWritable(hrib, new HRegionInfo());
|
(HRegionInfo)Writables.getWritable(hrib, new HRegionInfo());
|
||||||
assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName());
|
assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName());
|
||||||
assertEquals(hri.getTableDesc().getFamilies().size(),
|
//assertEquals(hri.getTableDesc().getFamilies().size(),
|
||||||
deserializedHri.getTableDesc().getFamilies().size());
|
// deserializedHri.getTableDesc().getFamilies().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test public void testRegionInfos() throws Exception {
|
@Test public void testRegionInfos() throws Exception {
|
||||||
|
@ -126,7 +126,7 @@ public class TestSerialization {
|
||||||
for (int i = 0; i < families.length; i++) {
|
for (int i = 0; i < families.length; i++) {
|
||||||
htd.addFamily(new HColumnDescriptor(families[i]));
|
htd.addFamily(new HColumnDescriptor(families[i]));
|
||||||
}
|
}
|
||||||
return new HRegionInfo(htd, HConstants.EMPTY_START_ROW,
|
return new HRegionInfo(htd.getName(), HConstants.EMPTY_START_ROW,
|
||||||
HConstants.EMPTY_END_ROW);
|
HConstants.EMPTY_END_ROW);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ public class TimestampTestBase extends HBaseTestCase {
|
||||||
private static final long T1 = 100L;
|
private static final long T1 = 100L;
|
||||||
private static final long T2 = 200L;
|
private static final long T2 = 200L;
|
||||||
|
|
||||||
private static final byte [] FAMILY_NAME = Bytes.toBytes("colfamily1");
|
private static final byte [] FAMILY_NAME = Bytes.toBytes("colfamily11");
|
||||||
private static final byte [] QUALIFIER_NAME = Bytes.toBytes("contents");
|
private static final byte [] QUALIFIER_NAME = Bytes.toBytes("contents");
|
||||||
|
|
||||||
private static final byte [] ROW = Bytes.toBytes("row");
|
private static final byte [] ROW = Bytes.toBytes("row");
|
||||||
|
|
|
@ -181,7 +181,7 @@ public class TestFromClientSide {
|
||||||
for (Map.Entry<HRegionInfo, HServerAddress> e: loadedRegions.entrySet()) {
|
for (Map.Entry<HRegionInfo, HServerAddress> e: loadedRegions.entrySet()) {
|
||||||
HRegionInfo hri = e.getKey();
|
HRegionInfo hri = e.getKey();
|
||||||
assertTrue(HConnectionManager.isRegionCached(conf,
|
assertTrue(HConnectionManager.isRegionCached(conf,
|
||||||
hri.getTableDesc().getName(), hri.getStartKey()));
|
hri.getTableName(), hri.getStartKey()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete the temp file
|
// delete the temp file
|
||||||
|
|
|
@ -118,6 +118,7 @@ public class TestMultipleTimestamps {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReseeksWithMultipleColumnOneTimestamp() throws IOException {
|
public void testReseeksWithMultipleColumnOneTimestamp() throws IOException {
|
||||||
|
LOG.info("testReseeksWithMultipleColumnOneTimestamp");
|
||||||
byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" +
|
byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" +
|
||||||
"ColumnOneTimestamps");
|
"ColumnOneTimestamps");
|
||||||
byte [] FAMILY = Bytes.toBytes("event_log");
|
byte [] FAMILY = Bytes.toBytes("event_log");
|
||||||
|
@ -155,6 +156,8 @@ public class TestMultipleTimestamps {
|
||||||
@Test
|
@Test
|
||||||
public void testReseeksWithMultipleColumnMultipleTimestamp() throws
|
public void testReseeksWithMultipleColumnMultipleTimestamp() throws
|
||||||
IOException {
|
IOException {
|
||||||
|
LOG.info("testReseeksWithMultipleColumnMultipleTimestamp");
|
||||||
|
|
||||||
byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" +
|
byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" +
|
||||||
"ColumnMiltipleTimestamps");
|
"ColumnMiltipleTimestamps");
|
||||||
byte [] FAMILY = Bytes.toBytes("event_log");
|
byte [] FAMILY = Bytes.toBytes("event_log");
|
||||||
|
@ -197,6 +200,7 @@ public class TestMultipleTimestamps {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReseeksWithMultipleFiles() throws IOException {
|
public void testReseeksWithMultipleFiles() throws IOException {
|
||||||
|
LOG.info("testReseeksWithMultipleFiles");
|
||||||
byte [] TABLE = Bytes.toBytes("testReseeksWithMultipleFiles");
|
byte [] TABLE = Bytes.toBytes("testReseeksWithMultipleFiles");
|
||||||
byte [] FAMILY = Bytes.toBytes("event_log");
|
byte [] FAMILY = Bytes.toBytes("event_log");
|
||||||
byte [][] FAMILIES = new byte[][] { FAMILY };
|
byte [][] FAMILIES = new byte[][] { FAMILY };
|
||||||
|
@ -262,8 +266,12 @@ public class TestMultipleTimestamps {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testWithVersionDeletes(boolean flushTables) throws IOException {
|
public void testWithVersionDeletes(boolean flushTables) throws IOException {
|
||||||
|
LOG.info("testWithVersionDeletes_"+
|
||||||
|
(flushTables ? "flush" : "noflush"));
|
||||||
|
|
||||||
byte [] TABLE = Bytes.toBytes("testWithVersionDeletes_" +
|
byte [] TABLE = Bytes.toBytes("testWithVersionDeletes_" +
|
||||||
(flushTables ? "flush" : "noflush"));
|
(flushTables ? "flush" : "noflush"));
|
||||||
|
|
||||||
byte [] FAMILY = Bytes.toBytes("event_log");
|
byte [] FAMILY = Bytes.toBytes("event_log");
|
||||||
byte [][] FAMILIES = new byte[][] { FAMILY };
|
byte [][] FAMILIES = new byte[][] { FAMILY };
|
||||||
|
|
||||||
|
@ -292,6 +300,8 @@ public class TestMultipleTimestamps {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testWithMultipleVersionDeletes() throws IOException {
|
public void testWithMultipleVersionDeletes() throws IOException {
|
||||||
|
LOG.info("testWithMultipleVersionDeletes");
|
||||||
|
|
||||||
byte [] TABLE = Bytes.toBytes("testWithMultipleVersionDeletes");
|
byte [] TABLE = Bytes.toBytes("testWithMultipleVersionDeletes");
|
||||||
byte [] FAMILY = Bytes.toBytes("event_log");
|
byte [] FAMILY = Bytes.toBytes("event_log");
|
||||||
byte [][] FAMILIES = new byte[][] { FAMILY };
|
byte [][] FAMILIES = new byte[][] { FAMILY };
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
|
@ -89,6 +90,7 @@ public class TestScannerTimeout {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void test2481() throws Exception {
|
public void test2481() throws Exception {
|
||||||
|
LOG.info("START ************ test2481");
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
HTable table =
|
HTable table =
|
||||||
new HTable(new Configuration(TEST_UTIL.getConfiguration()), TABLE_NAME);
|
new HTable(new Configuration(TEST_UTIL.getConfiguration()), TABLE_NAME);
|
||||||
|
@ -109,6 +111,7 @@ public class TestScannerTimeout {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
fail("We should be timing out");
|
fail("We should be timing out");
|
||||||
|
LOG.info("END ************ test2481");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -118,6 +121,7 @@ public class TestScannerTimeout {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void test2772() throws Exception {
|
public void test2772() throws Exception {
|
||||||
|
LOG.info("START************ test2772");
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
// Set a very high timeout, we want to test what happens when a RS
|
// Set a very high timeout, we want to test what happens when a RS
|
||||||
|
@ -134,6 +138,8 @@ public class TestScannerTimeout {
|
||||||
Result[] results = r.next(NB_ROWS);
|
Result[] results = r.next(NB_ROWS);
|
||||||
assertEquals(NB_ROWS, results.length);
|
assertEquals(NB_ROWS, results.length);
|
||||||
r.close();
|
r.close();
|
||||||
|
LOG.info("END ************ test2772");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -143,14 +149,24 @@ public class TestScannerTimeout {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void test3686a() throws Exception {
|
public void test3686a() throws Exception {
|
||||||
|
LOG.info("START ************ TEST3686A---1");
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
|
||||||
|
LOG.info("START ************ TEST3686A---1111");
|
||||||
|
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setCaching(SCANNER_CACHING);
|
scan.setCaching(SCANNER_CACHING);
|
||||||
|
LOG.info("************ TEST3686A");
|
||||||
|
MetaReader.fullScanMetaAndPrint(TEST_UTIL.getHBaseCluster().getMaster().getCatalogTracker());
|
||||||
HTable table = new HTable(TABLE_NAME);
|
HTable table = new HTable(TABLE_NAME);
|
||||||
|
LOG.info("START ************ TEST3686A---22");
|
||||||
|
|
||||||
ResultScanner r = table.getScanner(scan);
|
ResultScanner r = table.getScanner(scan);
|
||||||
|
LOG.info("START ************ TEST3686A---33");
|
||||||
|
|
||||||
int count = 1;
|
int count = 1;
|
||||||
r.next();
|
r.next();
|
||||||
|
LOG.info("START ************ TEST3686A---44");
|
||||||
|
|
||||||
// Kill after one call to next(), which got 5 rows.
|
// Kill after one call to next(), which got 5 rows.
|
||||||
rs.abort("die!");
|
rs.abort("die!");
|
||||||
while(r.next() != null) {
|
while(r.next() != null) {
|
||||||
|
@ -158,6 +174,7 @@ public class TestScannerTimeout {
|
||||||
}
|
}
|
||||||
assertEquals(NB_ROWS, count);
|
assertEquals(NB_ROWS, count);
|
||||||
r.close();
|
r.close();
|
||||||
|
LOG.info("************ END TEST3686A");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -168,6 +185,7 @@ public class TestScannerTimeout {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void test3686b() throws Exception {
|
public void test3686b() throws Exception {
|
||||||
|
LOG.info("START ************ test3686b");
|
||||||
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
|
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setCaching(SCANNER_CACHING);
|
scan.setCaching(SCANNER_CACHING);
|
||||||
|
@ -189,5 +207,7 @@ public class TestScannerTimeout {
|
||||||
}
|
}
|
||||||
assertEquals(NB_ROWS, count);
|
assertEquals(NB_ROWS, count);
|
||||||
r.close();
|
r.close();
|
||||||
|
LOG.info("END ************ END test3686b");
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.TimestampTestBase;
|
||||||
* run against an HRegion and against an HTable: i.e. both local and remote.
|
* run against an HRegion and against an HTable: i.e. both local and remote.
|
||||||
*/
|
*/
|
||||||
public class TestTimestamp extends HBaseClusterTestCase {
|
public class TestTimestamp extends HBaseClusterTestCase {
|
||||||
public static String COLUMN_NAME = "colfamily1";
|
public static String COLUMN_NAME = "colfamily11";
|
||||||
|
|
||||||
/** constructor */
|
/** constructor */
|
||||||
public TestTimestamp() {
|
public TestTimestamp() {
|
||||||
|
|
|
@ -180,7 +180,8 @@ public class TestCoprocessorInterface extends HBaseTestCase {
|
||||||
|
|
||||||
HRegion reopenRegion(final HRegion closedRegion, Class<?> implClass)
|
HRegion reopenRegion(final HRegion closedRegion, Class<?> implClass)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HRegion r = new HRegion(closedRegion.getRegionDir(), closedRegion.getLog(),
|
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
|
||||||
|
HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(),
|
||||||
closedRegion.getFilesystem(), closedRegion.getConf(),
|
closedRegion.getFilesystem(), closedRegion.getConf(),
|
||||||
closedRegion.getRegionInfo(), null);
|
closedRegion.getRegionInfo(), null);
|
||||||
r.initialize();
|
r.initialize();
|
||||||
|
@ -211,9 +212,9 @@ public class TestCoprocessorInterface extends HBaseTestCase {
|
||||||
for(byte [] family : families) {
|
for(byte [] family : families) {
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
htd.addFamily(new HColumnDescriptor(family));
|
||||||
}
|
}
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
|
||||||
Path path = new Path(DIR + callingMethod);
|
Path path = new Path(DIR + callingMethod);
|
||||||
HRegion r = HRegion.createHRegion(info, path, conf);
|
HRegion r = HRegion.createHRegion(info, path, conf, htd);
|
||||||
|
|
||||||
// this following piece is a hack.
|
// this following piece is a hack.
|
||||||
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
|
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
|
||||||
|
|
|
@ -256,7 +256,7 @@ public class TestRegionObserverInterface {
|
||||||
try {
|
try {
|
||||||
for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
|
for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
|
||||||
for (HRegionInfo r : t.getRegionServer().getOnlineRegions()) {
|
for (HRegionInfo r : t.getRegionServer().getOnlineRegions()) {
|
||||||
if (!Arrays.equals(r.getTableDesc().getName(), tableName)) {
|
if (!Arrays.equals(r.getTableName(), tableName)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
|
RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
|
||||||
|
|
|
@ -91,9 +91,9 @@ public class TestRegionObserverStacking extends TestCase {
|
||||||
for(byte [] family : families) {
|
for(byte [] family : families) {
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
htd.addFamily(new HColumnDescriptor(family));
|
||||||
}
|
}
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
Path path = new Path(DIR + callingMethod);
|
Path path = new Path(DIR + callingMethod);
|
||||||
HRegion r = HRegion.createHRegion(info, path, conf);
|
HRegion r = HRegion.createHRegion(info, path, conf, htd);
|
||||||
// this following piece is a hack. currently a coprocessorHost
|
// this following piece is a hack. currently a coprocessorHost
|
||||||
// is secretly loaded at OpenRegionHandler. we don't really
|
// is secretly loaded at OpenRegionHandler. we don't really
|
||||||
// start a region server here, so just manually create cphost
|
// start a region server here, so just manually create cphost
|
||||||
|
|
|
@ -138,7 +138,12 @@ public class TestWALObserver {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testWALObserverWriteToWAL() throws Exception {
|
public void testWALObserverWriteToWAL() throws Exception {
|
||||||
|
|
||||||
HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE));
|
HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE));
|
||||||
|
final HTableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE));
|
||||||
|
HRegion region2 = HRegion.createHRegion(hri,
|
||||||
|
hbaseRootDir, this.conf, htd);
|
||||||
|
|
||||||
Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE));
|
Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE));
|
||||||
deleteDir(basedir);
|
deleteDir(basedir);
|
||||||
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
|
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
|
||||||
|
@ -190,7 +195,7 @@ public class TestWALObserver {
|
||||||
|
|
||||||
// it's where WAL write cp should occur.
|
// it's where WAL write cp should occur.
|
||||||
long now = EnvironmentEdgeManager.currentTimeMillis();
|
long now = EnvironmentEdgeManager.currentTimeMillis();
|
||||||
log.append(hri, hri.getTableDesc().getName(), edit, now);
|
log.append(hri, hri.getTableName(), edit, now, htd);
|
||||||
|
|
||||||
// the edit shall have been change now by the coprocessor.
|
// the edit shall have been change now by the coprocessor.
|
||||||
foundFamily0 = false;
|
foundFamily0 = false;
|
||||||
|
@ -221,16 +226,25 @@ public class TestWALObserver {
|
||||||
* Test WAL replay behavior with WALObserver.
|
* Test WAL replay behavior with WALObserver.
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testWALObserverReplay() throws Exception {
|
public void testWALCoprocessorReplay() throws Exception {
|
||||||
// WAL replay is handled at HRegion::replayRecoveredEdits(), which is
|
// WAL replay is handled at HRegion::replayRecoveredEdits(), which is
|
||||||
// ultimately called by HRegion::initialize()
|
// ultimately called by HRegion::initialize()
|
||||||
byte[] tableName = Bytes.toBytes("testWALCoprocessorReplay");
|
byte[] tableName = Bytes.toBytes("testWALCoprocessorReplay");
|
||||||
|
final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(Bytes.toString(tableName));
|
||||||
|
//final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
|
||||||
|
//final HRegionInfo hri1 = createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
|
||||||
|
final HRegionInfo hri = new HRegionInfo(tableName, null, null);
|
||||||
|
|
||||||
final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
|
|
||||||
final Path basedir = new Path(this.hbaseRootDir, Bytes.toString(tableName));
|
final Path basedir = new Path(this.hbaseRootDir, Bytes.toString(tableName));
|
||||||
deleteDir(basedir);
|
deleteDir(basedir);
|
||||||
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
|
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
|
||||||
|
|
||||||
|
final Configuration newConf = HBaseConfiguration.create(this.conf);
|
||||||
|
|
||||||
|
HRegion region2 = HRegion.createHRegion(hri,
|
||||||
|
hbaseRootDir, newConf,htd);
|
||||||
|
|
||||||
|
|
||||||
//HLog wal = new HLog(this.fs, this.dir, this.oldLogDir, this.conf);
|
//HLog wal = new HLog(this.fs, this.dir, this.oldLogDir, this.conf);
|
||||||
HLog wal = createWAL(this.conf);
|
HLog wal = createWAL(this.conf);
|
||||||
//Put p = creatPutWith2Families(TEST_ROW);
|
//Put p = creatPutWith2Families(TEST_ROW);
|
||||||
|
@ -238,40 +252,46 @@ public class TestWALObserver {
|
||||||
long now = EnvironmentEdgeManager.currentTimeMillis();
|
long now = EnvironmentEdgeManager.currentTimeMillis();
|
||||||
//addFamilyMapToWALEdit(p.getFamilyMap(), edit);
|
//addFamilyMapToWALEdit(p.getFamilyMap(), edit);
|
||||||
final int countPerFamily = 1000;
|
final int countPerFamily = 1000;
|
||||||
for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
|
//for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
|
||||||
|
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||||
|
//addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
|
||||||
|
//EnvironmentEdgeManager.getDelegate(), wal);
|
||||||
addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
|
addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
|
||||||
EnvironmentEdgeManager.getDelegate(), wal);
|
EnvironmentEdgeManager.getDelegate(), wal, htd);
|
||||||
}
|
}
|
||||||
wal.append(hri, tableName, edit, now);
|
wal.append(hri, tableName, edit, now, htd);
|
||||||
// sync to fs.
|
// sync to fs.
|
||||||
wal.sync();
|
wal.sync();
|
||||||
|
|
||||||
final Configuration newConf = HBaseConfiguration.create(this.conf);
|
|
||||||
User user = HBaseTestingUtility.getDifferentUser(newConf,
|
User user = HBaseTestingUtility.getDifferentUser(newConf,
|
||||||
".replay.wal.secondtime");
|
".replay.wal.secondtime");
|
||||||
user.runAs(new PrivilegedExceptionAction() {
|
user.runAs(new PrivilegedExceptionAction() {
|
||||||
public Object run() throws Exception {
|
public Object run() throws Exception {
|
||||||
runWALSplit(newConf);
|
Path p = runWALSplit(newConf);
|
||||||
|
LOG.info("WALSplit path == " + p);
|
||||||
FileSystem newFS = FileSystem.get(newConf);
|
FileSystem newFS = FileSystem.get(newConf);
|
||||||
// Make a new wal for new region open.
|
// Make a new wal for new region open.
|
||||||
HLog wal2 = createWAL(newConf);
|
HLog wal2 = createWAL(newConf);
|
||||||
HRegion region2 = new HRegion(basedir, wal2, FileSystem.get(newConf),
|
Path tableDir =
|
||||||
|
HTableDescriptor.getTableDir(hbaseRootDir, hri.getTableName());
|
||||||
|
HRegion region = new HRegion(tableDir, wal2, FileSystem.get(newConf),
|
||||||
newConf, hri, TEST_UTIL.getHBaseCluster().getRegionServer(0));
|
newConf, hri, TEST_UTIL.getHBaseCluster().getRegionServer(0));
|
||||||
long seqid2 = region2.initialize();
|
|
||||||
|
|
||||||
|
long seqid2 = region.initialize();
|
||||||
SampleRegionWALObserver cp2 =
|
SampleRegionWALObserver cp2 =
|
||||||
(SampleRegionWALObserver)region2.getCoprocessorHost().findCoprocessor(
|
(SampleRegionWALObserver)region.getCoprocessorHost().findCoprocessor(
|
||||||
SampleRegionWALObserver.class.getName());
|
SampleRegionWALObserver.class.getName());
|
||||||
// TODO: asserting here is problematic.
|
// TODO: asserting here is problematic.
|
||||||
assertNotNull(cp2);
|
assertNotNull(cp2);
|
||||||
assertTrue(cp2.isPreWALRestoreCalled());
|
assertTrue(cp2.isPreWALRestoreCalled());
|
||||||
assertTrue(cp2.isPostWALRestoreCalled());
|
assertTrue(cp2.isPostWALRestoreCalled());
|
||||||
region2.close();
|
region.close();
|
||||||
wal2.closeAndDelete();
|
wal2.closeAndDelete();
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test to see CP loaded successfully or not. There is a duplication
|
* Test to see CP loaded successfully or not. There is a duplication
|
||||||
* at TestHLog, but the purpose of that one is to see whether the loaded
|
* at TestHLog, but the purpose of that one is to see whether the loaded
|
||||||
|
@ -301,7 +321,7 @@ public class TestWALObserver {
|
||||||
HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]);
|
HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]);
|
||||||
htd.addFamily(a);
|
htd.addFamily(a);
|
||||||
}
|
}
|
||||||
return new HRegionInfo(htd, null, null, false);
|
return new HRegionInfo(htd.getName(), null, null, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -356,7 +376,7 @@ public class TestWALObserver {
|
||||||
}
|
}
|
||||||
private void addWALEdits (final byte [] tableName, final HRegionInfo hri,
|
private void addWALEdits (final byte [] tableName, final HRegionInfo hri,
|
||||||
final byte [] rowName, final byte [] family,
|
final byte [] rowName, final byte [] family,
|
||||||
final int count, EnvironmentEdge ee, final HLog wal)
|
final int count, EnvironmentEdge ee, final HLog wal, final HTableDescriptor htd)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String familyStr = Bytes.toString(family);
|
String familyStr = Bytes.toString(family);
|
||||||
for (int j = 0; j < count; j++) {
|
for (int j = 0; j < count; j++) {
|
||||||
|
@ -365,8 +385,30 @@ public class TestWALObserver {
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
edit.add(new KeyValue(rowName, family, qualifierBytes,
|
edit.add(new KeyValue(rowName, family, qualifierBytes,
|
||||||
ee.currentTimeMillis(), columnBytes));
|
ee.currentTimeMillis(), columnBytes));
|
||||||
wal.append(hri, tableName, edit, ee.currentTimeMillis());
|
wal.append(hri, tableName, edit, ee.currentTimeMillis(), htd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
private HTableDescriptor getBasic3FamilyHTableDescriptor(
|
||||||
|
final String tableName) {
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
|
|
||||||
|
for (int i = 0; i < TEST_FAMILY.length; i++ ) {
|
||||||
|
HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]);
|
||||||
|
htd.addFamily(a);
|
||||||
|
}
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
|
private HTableDescriptor createBasic3FamilyHTD(final String tableName) {
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
|
HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
|
||||||
|
htd.addFamily(a);
|
||||||
|
HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
|
||||||
|
htd.addFamily(b);
|
||||||
|
HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
|
||||||
|
htd.addFamily(c);
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,9 +33,9 @@ public class TestColumnPrefixFilter {
|
||||||
String family = "Family";
|
String family = "Family";
|
||||||
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
|
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
htd.addFamily(new HColumnDescriptor(family));
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
|
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
|
||||||
getTestDir(), TEST_UTIL.getConfiguration());
|
getTestDir(), TEST_UTIL.getConfiguration(), htd);
|
||||||
|
|
||||||
List<String> rows = generateRandomWords(100, "row");
|
List<String> rows = generateRandomWords(100, "row");
|
||||||
List<String> columns = generateRandomWords(10000, "column");
|
List<String> columns = generateRandomWords(10000, "column");
|
||||||
|
@ -89,9 +89,9 @@ public class TestColumnPrefixFilter {
|
||||||
String family = "Family";
|
String family = "Family";
|
||||||
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
|
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
htd.addFamily(new HColumnDescriptor(family));
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
|
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
|
||||||
getTestDir(), TEST_UTIL.getConfiguration());
|
getTestDir(), TEST_UTIL.getConfiguration(), htd);
|
||||||
|
|
||||||
List<String> rows = generateRandomWords(100, "row");
|
List<String> rows = generateRandomWords(100, "row");
|
||||||
List<String> columns = generateRandomWords(10000, "column");
|
List<String> columns = generateRandomWords(10000, "column");
|
||||||
|
|
|
@ -75,8 +75,9 @@ public class TestDependentColumnFilter extends TestCase {
|
||||||
HTableDescriptor htd = new HTableDescriptor(getName());
|
HTableDescriptor htd = new HTableDescriptor(getName());
|
||||||
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
|
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
|
||||||
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
|
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
this.region = HRegion.createHRegion(info, testUtil.getTestDir(), testUtil.getConfiguration());
|
this.region = HRegion.createHRegion(info, testUtil.getTestDir(),
|
||||||
|
testUtil.getConfiguration(), htd);
|
||||||
addData();
|
addData();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,8 +92,8 @@ public class TestFilter extends HBaseTestCase {
|
||||||
HTableDescriptor htd = new HTableDescriptor(getName());
|
HTableDescriptor htd = new HTableDescriptor(getName());
|
||||||
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
|
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
|
||||||
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
|
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
this.region = HRegion.createHRegion(info, this.testDir, this.conf);
|
this.region = HRegion.createHRegion(info, this.testDir, this.conf, htd);
|
||||||
|
|
||||||
// Insert first half
|
// Insert first half
|
||||||
for(byte [] ROW : ROWS_ONE) {
|
for(byte [] ROW : ROWS_ONE) {
|
||||||
|
|
|
@ -116,9 +116,14 @@ public class TestCatalogJanitor {
|
||||||
*/
|
*/
|
||||||
class MockMasterServices implements MasterServices {
|
class MockMasterServices implements MasterServices {
|
||||||
private final MasterFileSystem mfs;
|
private final MasterFileSystem mfs;
|
||||||
|
private final AssignmentManager asm;
|
||||||
|
|
||||||
MockMasterServices(final Server server) throws IOException {
|
MockMasterServices(final Server server) throws IOException {
|
||||||
this.mfs = new MasterFileSystem(server, null);
|
this.mfs = new MasterFileSystem(server, null);
|
||||||
|
HTableDescriptor htd = new HTableDescriptor("table");
|
||||||
|
htd.addFamily(new HColumnDescriptor("family"));
|
||||||
|
this.asm = Mockito.mock(AssignmentManager.class);
|
||||||
|
Mockito.when(asm.getTableDescriptor("table")).thenReturn(htd);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -128,7 +133,7 @@ public class TestCatalogJanitor {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public AssignmentManager getAssignmentManager() {
|
public AssignmentManager getAssignmentManager() {
|
||||||
return null;
|
return this.asm;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -219,11 +224,14 @@ public class TestCatalogJanitor {
|
||||||
HTableDescriptor htd = new HTableDescriptor("table");
|
HTableDescriptor htd = new HTableDescriptor("table");
|
||||||
htd.addFamily(new HColumnDescriptor("family"));
|
htd.addFamily(new HColumnDescriptor("family"));
|
||||||
HRegionInfo parent =
|
HRegionInfo parent =
|
||||||
new HRegionInfo(htd, Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
|
new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
|
||||||
|
Bytes.toBytes("eee"));
|
||||||
HRegionInfo splita =
|
HRegionInfo splita =
|
||||||
new HRegionInfo(htd, Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
|
new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
|
||||||
|
Bytes.toBytes("ccc"));
|
||||||
HRegionInfo splitb =
|
HRegionInfo splitb =
|
||||||
new HRegionInfo(htd, Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
|
new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
|
||||||
|
Bytes.toBytes("eee"));
|
||||||
// Test that when both daughter regions are in place, that we do not
|
// Test that when both daughter regions are in place, that we do not
|
||||||
// remove the parent.
|
// remove the parent.
|
||||||
List<KeyValue> kvs = new ArrayList<KeyValue>();
|
List<KeyValue> kvs = new ArrayList<KeyValue>();
|
||||||
|
|
|
@ -353,6 +353,7 @@ public class TestDistributedLogSplitting {
|
||||||
int num_edits, int edit_size) throws IOException {
|
int num_edits, int edit_size) throws IOException {
|
||||||
|
|
||||||
byte[] table = Bytes.toBytes(tname);
|
byte[] table = Bytes.toBytes(tname);
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(tname);
|
||||||
byte[] value = new byte[edit_size];
|
byte[] value = new byte[edit_size];
|
||||||
for (int i = 0; i < edit_size; i++) {
|
for (int i = 0; i < edit_size; i++) {
|
||||||
value[i] = (byte)('a' + (i % 26));
|
value[i] = (byte)('a' + (i % 26));
|
||||||
|
@ -369,7 +370,7 @@ public class TestDistributedLogSplitting {
|
||||||
System.currentTimeMillis(), value));
|
System.currentTimeMillis(), value));
|
||||||
// LOG.info("Region " + i + ": " + e);
|
// LOG.info("Region " + i + ": " + e);
|
||||||
j++;
|
j++;
|
||||||
log.append(hris.get(j % n), table, e, System.currentTimeMillis());
|
log.append(hris.get(j % n), table, e, System.currentTimeMillis(), htd);
|
||||||
counts[j % n] += 1;
|
counts[j % n] += 1;
|
||||||
// if ((i % 8096) == 0) {
|
// if ((i % 8096) == 0) {
|
||||||
// log.sync();
|
// log.sync();
|
||||||
|
|
|
@ -486,7 +486,7 @@ public class TestLoadBalancer {
|
||||||
Bytes.putInt(start, 0, numRegions << 1);
|
Bytes.putInt(start, 0, numRegions << 1);
|
||||||
Bytes.putInt(end, 0, (numRegions << 1) + 1);
|
Bytes.putInt(end, 0, (numRegions << 1) + 1);
|
||||||
HRegionInfo hri = new HRegionInfo(
|
HRegionInfo hri = new HRegionInfo(
|
||||||
new HTableDescriptor(Bytes.toBytes("table" + i)), start, end,
|
Bytes.toBytes("table" + i), start, end,
|
||||||
false, regionId++);
|
false, regionId++);
|
||||||
regions.add(hri);
|
regions.add(hri);
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,18 +31,14 @@ import java.util.TreeSet;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
|
||||||
import org.apache.hadoop.hbase.executor.RegionTransitionData;
|
import org.apache.hadoop.hbase.executor.RegionTransitionData;
|
||||||
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
|
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
|
||||||
import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
|
import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
|
||||||
import org.apache.hadoop.hbase.master.LoadBalancer.RegionPlan;
|
import org.apache.hadoop.hbase.master.LoadBalancer.RegionPlan;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
||||||
|
@ -262,12 +258,21 @@ public class TestMasterFailover {
|
||||||
byte [] enabledTable = Bytes.toBytes("enabledTable");
|
byte [] enabledTable = Bytes.toBytes("enabledTable");
|
||||||
HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
|
HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
|
||||||
htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
|
htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
|
||||||
|
|
||||||
|
FileSystem filesystem = FileSystem.get(conf);
|
||||||
|
Path rootdir = filesystem.makeQualified(
|
||||||
|
new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
|
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
|
||||||
|
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
|
||||||
|
|
||||||
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||||
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
|
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
|
||||||
|
|
||||||
byte [] disabledTable = Bytes.toBytes("disabledTable");
|
byte [] disabledTable = Bytes.toBytes("disabledTable");
|
||||||
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
|
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
|
||||||
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
|
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
|
||||||
|
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
|
||||||
|
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
|
||||||
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||||
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
|
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
|
||||||
|
|
||||||
|
@ -566,12 +571,23 @@ public class TestMasterFailover {
|
||||||
byte [] enabledTable = Bytes.toBytes("enabledTable");
|
byte [] enabledTable = Bytes.toBytes("enabledTable");
|
||||||
HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
|
HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
|
||||||
htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
|
htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
|
||||||
|
FileSystem filesystem = FileSystem.get(conf);
|
||||||
|
Path rootdir = filesystem.makeQualified(
|
||||||
|
new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
|
|
||||||
|
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
|
||||||
|
null, null);
|
||||||
|
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
|
||||||
|
|
||||||
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||||
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
|
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
|
||||||
|
|
||||||
byte [] disabledTable = Bytes.toBytes("disabledTable");
|
byte [] disabledTable = Bytes.toBytes("disabledTable");
|
||||||
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
|
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
|
||||||
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
|
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
|
||||||
|
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
|
||||||
|
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
|
||||||
|
|
||||||
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||||
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
|
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
|
||||||
|
|
||||||
|
|
|
@ -25,14 +25,12 @@ import java.util.List;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
|
import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
import org.apache.hadoop.hbase.master.ServerManager;
|
import org.apache.hadoop.hbase.master.ServerManager;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
import org.apache.hbase.tmpl.master.MasterStatusTmpl;
|
import org.apache.hbase.tmpl.master.MasterStatusTmpl;
|
||||||
|
@ -56,11 +54,20 @@ public class TestMasterStatusServlet {
|
||||||
new ServerName("fakehost", 12345, 1234567890);
|
new ServerName("fakehost", 12345, 1234567890);
|
||||||
static final HTableDescriptor FAKE_TABLE =
|
static final HTableDescriptor FAKE_TABLE =
|
||||||
new HTableDescriptor("mytable");
|
new HTableDescriptor("mytable");
|
||||||
static final HRegionInfo FAKE_REGION =
|
static final HRegionInfo FAKE_HRI =
|
||||||
new HRegionInfo(FAKE_TABLE, Bytes.toBytes("a"), Bytes.toBytes("b"));
|
new HRegionInfo(FAKE_TABLE.getName(), Bytes.toBytes("a"), Bytes.toBytes("b"));
|
||||||
|
|
||||||
|
// static final HRegionInfo FAKE_REGION = null;
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setupBasicMocks() {
|
public void setupBasicMocks() {
|
||||||
|
try {
|
||||||
|
HRegion.createHRegion(FAKE_HRI, HBaseTestingUtility.getTestDir(),
|
||||||
|
HBaseConfiguration.create(), FAKE_TABLE);
|
||||||
|
} catch(IOException ioe) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
conf = HBaseConfiguration.create();
|
conf = HBaseConfiguration.create();
|
||||||
|
|
||||||
master = Mockito.mock(HMaster.class);
|
master = Mockito.mock(HMaster.class);
|
||||||
|
@ -77,7 +84,7 @@ public class TestMasterStatusServlet {
|
||||||
NavigableMap<String, RegionState> regionsInTransition =
|
NavigableMap<String, RegionState> regionsInTransition =
|
||||||
Maps.newTreeMap();
|
Maps.newTreeMap();
|
||||||
regionsInTransition.put("r1",
|
regionsInTransition.put("r1",
|
||||||
new RegionState(FAKE_REGION, RegionState.State.CLOSING, 12345L, FAKE_HOST));
|
new RegionState(FAKE_HRI, RegionState.State.CLOSING, 12345L, FAKE_HOST));
|
||||||
Mockito.doReturn(regionsInTransition).when(am).getRegionsInTransition();
|
Mockito.doReturn(regionsInTransition).when(am).getRegionsInTransition();
|
||||||
Mockito.doReturn(am).when(master).getAssignmentManager();
|
Mockito.doReturn(am).when(master).getAssignmentManager();
|
||||||
|
|
||||||
|
|
|
@ -68,10 +68,10 @@ public class TestColumnSeeking {
|
||||||
HColumnDescriptor.DEFAULT_BLOOMFILTER);
|
HColumnDescriptor.DEFAULT_BLOOMFILTER);
|
||||||
HTableDescriptor htd = new HTableDescriptor(table);
|
HTableDescriptor htd = new HTableDescriptor(table);
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
|
||||||
HRegion region =
|
HRegion region =
|
||||||
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL
|
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL
|
||||||
.getConfiguration());
|
.getConfiguration(), htd);
|
||||||
|
|
||||||
List<String> rows = generateRandomWords(10, "row");
|
List<String> rows = generateRandomWords(10, "row");
|
||||||
List<String> allColumns = generateRandomWords(10, "column");
|
List<String> allColumns = generateRandomWords(10, "column");
|
||||||
|
@ -172,10 +172,11 @@ public class TestColumnSeeking {
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(table);
|
HTableDescriptor htd = new HTableDescriptor(table);
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
htd.addFamily(new HColumnDescriptor(family));
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
|
||||||
|
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
|
||||||
HRegion region =
|
HRegion region =
|
||||||
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL
|
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL
|
||||||
.getConfiguration());
|
.getConfiguration(), htd);
|
||||||
|
|
||||||
List<String> rows = generateRandomWords(10, "row");
|
List<String> rows = generateRandomWords(10, "row");
|
||||||
List<String> allColumns = generateRandomWords(100, "column");
|
List<String> allColumns = generateRandomWords(100, "column");
|
||||||
|
|
|
@ -82,9 +82,12 @@ public class TestCompactSelection extends TestCase {
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes("table"));
|
HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes("table"));
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
|
|
||||||
HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
|
HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
|
||||||
HRegion region = new HRegion(basedir, hlog, fs, conf, info, null);
|
HRegion.createHRegion(info, basedir, conf, htd);
|
||||||
|
Path tableDir = new Path(basedir, Bytes.toString(htd.getName()));
|
||||||
|
HRegion region = new HRegion(tableDir, hlog, fs, conf, info, null);
|
||||||
|
|
||||||
store = new Store(basedir, region, hcd, fs, conf);
|
store = new Store(basedir, region, hcd, fs, conf);
|
||||||
TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
|
TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
|
||||||
|
|
|
@ -72,17 +72,17 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
|
||||||
Path rootdir = filesystem.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
|
Path rootdir = filesystem.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
|
||||||
filesystem.mkdirs(rootdir);
|
filesystem.mkdirs(rootdir);
|
||||||
// Up flush size else we bind up when we use default catalog flush of 16k.
|
// Up flush size else we bind up when we use default catalog flush of 16k.
|
||||||
HRegionInfo.FIRST_META_REGIONINFO.getTableDesc().
|
HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024);
|
||||||
setMemStoreFlushSize(64 * 1024 * 1024);
|
|
||||||
HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
|
||||||
rootdir, this.conf);
|
rootdir, this.conf, HTableDescriptor.META_TABLEDESC);
|
||||||
// Write rows for three tables 'A', 'B', and 'C'.
|
// Write rows for three tables 'A', 'B', and 'C'.
|
||||||
for (char c = 'A'; c < 'D'; c++) {
|
for (char c = 'A'; c < 'D'; c++) {
|
||||||
HTableDescriptor htd = new HTableDescriptor("" + c);
|
HTableDescriptor htd = new HTableDescriptor("" + c);
|
||||||
final int last = 128;
|
final int last = 128;
|
||||||
final int interval = 2;
|
final int interval = 2;
|
||||||
for (int i = 0; i <= last; i += interval) {
|
for (int i = 0; i <= last; i += interval) {
|
||||||
HRegionInfo hri = new HRegionInfo(htd,
|
HRegionInfo hri = new HRegionInfo(htd.getName(),
|
||||||
i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i),
|
i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i),
|
||||||
i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval));
|
i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval));
|
||||||
Put put = new Put(hri.getRegionName());
|
Put put = new Put(hri.getRegionName());
|
||||||
|
|
|
@ -2794,9 +2794,9 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
Path path = new Path(DIR + "testBloomFilterSize");
|
Path path = new Path(DIR + "testBloomFilterSize");
|
||||||
region = HRegion.createHRegion(info, path, conf);
|
region = HRegion.createHRegion(info, path, conf, htd);
|
||||||
|
|
||||||
int num_unique_rows = 10;
|
int num_unique_rows = 10;
|
||||||
int duplicate_multiplier =2;
|
int duplicate_multiplier =2;
|
||||||
|
@ -2852,9 +2852,9 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
|
||||||
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
HTableDescriptor htd = new HTableDescriptor(TABLE);
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
Path path = new Path(DIR + "testAllColumnsWithBloomFilter");
|
Path path = new Path(DIR + "testAllColumnsWithBloomFilter");
|
||||||
region = HRegion.createHRegion(info, path, conf);
|
region = HRegion.createHRegion(info, path, conf, htd);
|
||||||
|
|
||||||
// For row:0, col:0: insert versions 1 through 5.
|
// For row:0, col:0: insert versions 1 through 5.
|
||||||
byte row[] = Bytes.toBytes("row:" + 0);
|
byte row[] = Bytes.toBytes("row:" + 0);
|
||||||
|
@ -2897,9 +2897,9 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
Path path = new Path(DIR + "TestDeleteRowWithBloomFilter");
|
Path path = new Path(DIR + "TestDeleteRowWithBloomFilter");
|
||||||
region = HRegion.createHRegion(info, path, conf);
|
region = HRegion.createHRegion(info, path, conf, htd);
|
||||||
|
|
||||||
// Insert some data
|
// Insert some data
|
||||||
byte row[] = Bytes.toBytes("row1");
|
byte row[] = Bytes.toBytes("row1");
|
||||||
|
@ -3033,14 +3033,14 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
for(byte [] family : families) {
|
for(byte [] family : families) {
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
htd.addFamily(new HColumnDescriptor(family));
|
||||||
}
|
}
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
Path path = new Path(DIR + callingMethod);
|
Path path = new Path(DIR + callingMethod);
|
||||||
if (fs.exists(path)) {
|
if (fs.exists(path)) {
|
||||||
if (!fs.delete(path, true)) {
|
if (!fs.delete(path, true)) {
|
||||||
throw new IOException("Failed delete of " + path);
|
throw new IOException("Failed delete of " + path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
region = HRegion.createHRegion(info, path, conf);
|
region = HRegion.createHRegion(info, path, conf, htd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class TestHRegionInfo {
|
||||||
public void testContainsRange() {
|
public void testContainsRange() {
|
||||||
HTableDescriptor tableDesc = new HTableDescriptor("testtable");
|
HTableDescriptor tableDesc = new HTableDescriptor("testtable");
|
||||||
HRegionInfo hri = new HRegionInfo(
|
HRegionInfo hri = new HRegionInfo(
|
||||||
tableDesc, Bytes.toBytes("a"), Bytes.toBytes("g"));
|
tableDesc.getName(), Bytes.toBytes("a"), Bytes.toBytes("g"));
|
||||||
// Single row range at start of region
|
// Single row range at start of region
|
||||||
assertTrue(hri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("a")));
|
assertTrue(hri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("a")));
|
||||||
// Fully contained range
|
// Fully contained range
|
||||||
|
|
|
@ -82,8 +82,8 @@ public class TestRSStatusServlet {
|
||||||
public void testWithRegions() throws IOException {
|
public void testWithRegions() throws IOException {
|
||||||
HTableDescriptor htd = new HTableDescriptor("mytable");
|
HTableDescriptor htd = new HTableDescriptor("mytable");
|
||||||
List<HRegionInfo> regions = Lists.newArrayList(
|
List<HRegionInfo> regions = Lists.newArrayList(
|
||||||
new HRegionInfo(htd, Bytes.toBytes("a"), Bytes.toBytes("d")),
|
new HRegionInfo(htd.getName(), Bytes.toBytes("a"), Bytes.toBytes("d")),
|
||||||
new HRegionInfo(htd, Bytes.toBytes("d"), Bytes.toBytes("z"))
|
new HRegionInfo(htd.getName(), Bytes.toBytes("d"), Bytes.toBytes("z"))
|
||||||
);
|
);
|
||||||
Mockito.doReturn(regions).when(rs).getOnlineRegions();
|
Mockito.doReturn(regions).when(rs).getOnlineRegions();
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class TestResettingCounters {
|
||||||
HTableDescriptor htd = new HTableDescriptor(table);
|
HTableDescriptor htd = new HTableDescriptor(table);
|
||||||
for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));
|
for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));
|
||||||
|
|
||||||
HRegionInfo hri = new HRegionInfo(htd, null, null, false);
|
HRegionInfo hri = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
String testDir = HBaseTestingUtility.getTestDir() + "/TestResettingCounters/";
|
String testDir = HBaseTestingUtility.getTestDir() + "/TestResettingCounters/";
|
||||||
Path path = new Path(testDir);
|
Path path = new Path(testDir);
|
||||||
if (fs.exists(path)) {
|
if (fs.exists(path)) {
|
||||||
|
@ -69,7 +69,7 @@ public class TestResettingCounters {
|
||||||
throw new IOException("Failed delete of " + path);
|
throw new IOException("Failed delete of " + path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
HRegion region = HRegion.createHRegion(hri, path, conf);
|
HRegion region = HRegion.createHRegion(hri, path, conf, htd);
|
||||||
|
|
||||||
Increment odd = new Increment(rows[0]);
|
Increment odd = new Increment(rows[0]);
|
||||||
Increment even = new Increment(rows[0]);
|
Increment even = new Increment(rows[0]);
|
||||||
|
|
|
@ -72,7 +72,7 @@ public class TestScanner extends HBaseTestCase {
|
||||||
}
|
}
|
||||||
/** HRegionInfo for root region */
|
/** HRegionInfo for root region */
|
||||||
public static final HRegionInfo REGION_INFO =
|
public static final HRegionInfo REGION_INFO =
|
||||||
new HRegionInfo(TESTTABLEDESC, HConstants.EMPTY_BYTE_ARRAY,
|
new HRegionInfo(TESTTABLEDESC.getName(), HConstants.EMPTY_BYTE_ARRAY,
|
||||||
HConstants.EMPTY_BYTE_ARRAY);
|
HConstants.EMPTY_BYTE_ARRAY);
|
||||||
|
|
||||||
private static final byte [] ROW_KEY = REGION_INFO.getRegionName();
|
private static final byte [] ROW_KEY = REGION_INFO.getRegionName();
|
||||||
|
@ -101,7 +101,7 @@ public class TestScanner extends HBaseTestCase {
|
||||||
byte [] startrow = Bytes.toBytes("bbb");
|
byte [] startrow = Bytes.toBytes("bbb");
|
||||||
byte [] stoprow = Bytes.toBytes("ccc");
|
byte [] stoprow = Bytes.toBytes("ccc");
|
||||||
try {
|
try {
|
||||||
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
|
this.r = createNewHRegion(TESTTABLEDESC, null, null);
|
||||||
addContent(this.r, HConstants.CATALOG_FAMILY);
|
addContent(this.r, HConstants.CATALOG_FAMILY);
|
||||||
List<KeyValue> results = new ArrayList<KeyValue>();
|
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||||
// Do simple test of getting one row only first.
|
// Do simple test of getting one row only first.
|
||||||
|
@ -175,7 +175,7 @@ public class TestScanner extends HBaseTestCase {
|
||||||
|
|
||||||
public void testFilters() throws IOException {
|
public void testFilters() throws IOException {
|
||||||
try {
|
try {
|
||||||
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
|
this.r = createNewHRegion(TESTTABLEDESC, null, null);
|
||||||
addContent(this.r, HConstants.CATALOG_FAMILY);
|
addContent(this.r, HConstants.CATALOG_FAMILY);
|
||||||
byte [] prefix = Bytes.toBytes("ab");
|
byte [] prefix = Bytes.toBytes("ab");
|
||||||
Filter newFilter = new PrefixFilter(prefix);
|
Filter newFilter = new PrefixFilter(prefix);
|
||||||
|
@ -203,7 +203,7 @@ public class TestScanner extends HBaseTestCase {
|
||||||
*/
|
*/
|
||||||
public void testRaceBetweenClientAndTimeout() throws Exception {
|
public void testRaceBetweenClientAndTimeout() throws Exception {
|
||||||
try {
|
try {
|
||||||
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
|
this.r = createNewHRegion(TESTTABLEDESC, null, null);
|
||||||
addContent(this.r, HConstants.CATALOG_FAMILY);
|
addContent(this.r, HConstants.CATALOG_FAMILY);
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
InternalScanner s = r.getScanner(scan);
|
InternalScanner s = r.getScanner(scan);
|
||||||
|
@ -352,7 +352,7 @@ public class TestScanner extends HBaseTestCase {
|
||||||
assertEquals(0, info.getStartKey().length);
|
assertEquals(0, info.getStartKey().length);
|
||||||
assertEquals(0, info.getEndKey().length);
|
assertEquals(0, info.getEndKey().length);
|
||||||
assertEquals(0, Bytes.compareTo(info.getRegionName(), REGION_INFO.getRegionName()));
|
assertEquals(0, Bytes.compareTo(info.getRegionName(), REGION_INFO.getRegionName()));
|
||||||
assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc()));
|
//assertEquals(0, info.getTableDesc().compareTo(REGION_INFO.getTableDesc()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Use a scanner to get the region info and then validate the results */
|
/** Use a scanner to get the region info and then validate the results */
|
||||||
|
@ -448,7 +448,7 @@ public class TestScanner extends HBaseTestCase {
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
public void testScanAndSyncFlush() throws Exception {
|
public void testScanAndSyncFlush() throws Exception {
|
||||||
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
|
this.r = createNewHRegion(TESTTABLEDESC, null, null);
|
||||||
HRegionIncommon hri = new HRegionIncommon(r);
|
HRegionIncommon hri = new HRegionIncommon(r);
|
||||||
try {
|
try {
|
||||||
LOG.info("Added: " + addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
|
LOG.info("Added: " + addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
|
||||||
|
@ -472,7 +472,7 @@ public class TestScanner extends HBaseTestCase {
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
public void testScanAndRealConcurrentFlush() throws Exception {
|
public void testScanAndRealConcurrentFlush() throws Exception {
|
||||||
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
|
this.r = createNewHRegion(TESTTABLEDESC, null, null);
|
||||||
HRegionIncommon hri = new HRegionIncommon(r);
|
HRegionIncommon hri = new HRegionIncommon(r);
|
||||||
try {
|
try {
|
||||||
LOG.info("Added: " + addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
|
LOG.info("Added: " + addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
|
||||||
|
|
|
@ -153,7 +153,7 @@ public class TestSplitTransaction {
|
||||||
int daughtersRowCount = 0;
|
int daughtersRowCount = 0;
|
||||||
for (HRegion r: daughters) {
|
for (HRegion r: daughters) {
|
||||||
// Open so can count its content.
|
// Open so can count its content.
|
||||||
HRegion openRegion = HRegion.openHRegion(r.getRegionInfo(),
|
HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
|
||||||
r.getLog(), r.getConf());
|
r.getLog(), r.getConf());
|
||||||
try {
|
try {
|
||||||
int count = countRows(openRegion);
|
int count = countRows(openRegion);
|
||||||
|
@ -208,7 +208,7 @@ public class TestSplitTransaction {
|
||||||
int daughtersRowCount = 0;
|
int daughtersRowCount = 0;
|
||||||
for (HRegion r: daughters) {
|
for (HRegion r: daughters) {
|
||||||
// Open so can count its content.
|
// Open so can count its content.
|
||||||
HRegion openRegion = HRegion.openHRegion(r.getRegionInfo(),
|
HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
|
||||||
r.getLog(), r.getConf());
|
r.getLog(), r.getConf());
|
||||||
try {
|
try {
|
||||||
int count = countRows(openRegion);
|
int count = countRows(openRegion);
|
||||||
|
@ -252,7 +252,8 @@ public class TestSplitTransaction {
|
||||||
HTableDescriptor htd = new HTableDescriptor("table");
|
HTableDescriptor htd = new HTableDescriptor("table");
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(CF);
|
HColumnDescriptor hcd = new HColumnDescriptor(CF);
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
HRegionInfo hri = new HRegionInfo(htd, STARTROW, ENDROW);
|
HRegionInfo hri = new HRegionInfo(htd.getName(), STARTROW, ENDROW);
|
||||||
return HRegion.openHRegion(hri, wal, TEST_UTIL.getConfiguration());
|
HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
|
||||||
|
return HRegion.openHRegion(testdir, hri, wal, TEST_UTIL.getConfiguration());
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -127,7 +127,7 @@ public class TestStore extends TestCase {
|
||||||
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(table);
|
HTableDescriptor htd = new HTableDescriptor(table);
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||||
HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
|
HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
|
||||||
HRegion region = new HRegion(basedir, hlog, fs, conf, info, null);
|
HRegion region = new HRegion(basedir, hlog, fs, conf, info, null);
|
||||||
|
|
||||||
|
|
|
@ -69,10 +69,6 @@ public class TestWideScanner extends HBaseTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** HRegionInfo for root region */
|
/** HRegionInfo for root region */
|
||||||
public static final HRegionInfo REGION_INFO =
|
|
||||||
new HRegionInfo(TESTTABLEDESC, HConstants.EMPTY_BYTE_ARRAY,
|
|
||||||
HConstants.EMPTY_BYTE_ARRAY);
|
|
||||||
|
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
HRegion r;
|
HRegion r;
|
||||||
|
|
||||||
|
@ -107,7 +103,7 @@ public class TestWideScanner extends HBaseTestCase {
|
||||||
public void testWideScanBatching() throws IOException {
|
public void testWideScanBatching() throws IOException {
|
||||||
final int batch = 256;
|
final int batch = 256;
|
||||||
try {
|
try {
|
||||||
this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
|
this.r = createNewHRegion(TESTTABLEDESC, null, null);
|
||||||
int inserted = addWideContent(this.r);
|
int inserted = addWideContent(this.r);
|
||||||
List<KeyValue> results = new ArrayList<KeyValue>();
|
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
|
|
|
@ -233,11 +233,17 @@ public class TestOpenRegionHandler {
|
||||||
HTableDescriptor htd =
|
HTableDescriptor htd =
|
||||||
new HTableDescriptor("testOpenRegionHandlerYankingRegionFromUnderIt");
|
new HTableDescriptor("testOpenRegionHandlerYankingRegionFromUnderIt");
|
||||||
final HRegionInfo hri =
|
final HRegionInfo hri =
|
||||||
new HRegionInfo(htd, HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW);
|
new HRegionInfo(htd.getName(), HConstants.EMPTY_END_ROW,
|
||||||
|
HConstants.EMPTY_END_ROW);
|
||||||
|
HRegion region =
|
||||||
|
HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(), HTU
|
||||||
|
.getConfiguration(), htd);
|
||||||
OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri) {
|
OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri) {
|
||||||
HRegion openRegion() {
|
HRegion openRegion() {
|
||||||
// Open region first, then remove znode as though it'd been hijacked.
|
// Open region first, then remove znode as though it'd been hijacked.
|
||||||
HRegion region = super.openRegion();
|
//HRegion region = super.openRegion();
|
||||||
|
HRegion region = super.openRegion(HBaseTestingUtility.getTestDir());
|
||||||
|
|
||||||
// Don't actually open region BUT remove the znode as though it'd
|
// Don't actually open region BUT remove the znode as though it'd
|
||||||
// been hijacked on us.
|
// been hijacked on us.
|
||||||
ZooKeeperWatcher zkw = this.server.getZooKeeper();
|
ZooKeeperWatcher zkw = this.server.getZooKeeper();
|
||||||
|
|
|
@ -38,11 +38,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
|
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
@ -145,11 +141,14 @@ public class TestHLog {
|
||||||
Path tabledir = new Path(hbaseDir, getName());
|
Path tabledir = new Path(hbaseDir, getName());
|
||||||
fs.mkdirs(tabledir);
|
fs.mkdirs(tabledir);
|
||||||
for(int i = 0; i < howmany; i++) {
|
for(int i = 0; i < howmany; i++) {
|
||||||
infos[i] = new HRegionInfo(new HTableDescriptor(tableName),
|
infos[i] = new HRegionInfo(tableName,
|
||||||
Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
|
Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
|
||||||
fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
|
fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
|
||||||
LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
|
LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
|
||||||
}
|
}
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
|
htd.addFamily(new HColumnDescriptor("column"));
|
||||||
|
|
||||||
// Add edits for three regions.
|
// Add edits for three regions.
|
||||||
try {
|
try {
|
||||||
for (int ii = 0; ii < howmany; ii++) {
|
for (int ii = 0; ii < howmany; ii++) {
|
||||||
|
@ -164,7 +163,7 @@ public class TestHLog {
|
||||||
System.currentTimeMillis(), column));
|
System.currentTimeMillis(), column));
|
||||||
LOG.info("Region " + i + ": " + edit);
|
LOG.info("Region " + i + ": " + edit);
|
||||||
log.append(infos[i], tableName, edit,
|
log.append(infos[i], tableName, edit,
|
||||||
System.currentTimeMillis());
|
System.currentTimeMillis(), htd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.rollWriter();
|
log.rollWriter();
|
||||||
|
@ -206,13 +205,15 @@ public class TestHLog {
|
||||||
HLog wal = new HLog(fs, subdir, oldLogDir, conf);
|
HLog wal = new HLog(fs, subdir, oldLogDir, conf);
|
||||||
final int total = 20;
|
final int total = 20;
|
||||||
|
|
||||||
HRegionInfo info = new HRegionInfo(new HTableDescriptor(bytes),
|
HRegionInfo info = new HRegionInfo(bytes,
|
||||||
null,null, false);
|
null,null, false);
|
||||||
|
HTableDescriptor htd = new HTableDescriptor();
|
||||||
|
htd.addFamily(new HColumnDescriptor(bytes));
|
||||||
|
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
|
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
|
||||||
wal.append(info, bytes, kvs, System.currentTimeMillis());
|
wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
|
||||||
}
|
}
|
||||||
// Now call sync and try reading. Opening a Reader before you sync just
|
// Now call sync and try reading. Opening a Reader before you sync just
|
||||||
// gives you EOFE.
|
// gives you EOFE.
|
||||||
|
@ -230,7 +231,7 @@ public class TestHLog {
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
|
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
|
||||||
wal.append(info, bytes, kvs, System.currentTimeMillis());
|
wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
|
||||||
}
|
}
|
||||||
reader = HLog.getReader(fs, walPath, conf);
|
reader = HLog.getReader(fs, walPath, conf);
|
||||||
count = 0;
|
count = 0;
|
||||||
|
@ -249,7 +250,7 @@ public class TestHLog {
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
|
kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
|
||||||
wal.append(info, bytes, kvs, System.currentTimeMillis());
|
wal.append(info, bytes, kvs, System.currentTimeMillis(), htd);
|
||||||
}
|
}
|
||||||
// Now I should have written out lots of blocks. Sync then read.
|
// Now I should have written out lots of blocks. Sync then read.
|
||||||
wal.sync();
|
wal.sync();
|
||||||
|
@ -334,17 +335,19 @@ public class TestHLog {
|
||||||
@Test
|
@Test
|
||||||
public void testAppendClose() throws Exception {
|
public void testAppendClose() throws Exception {
|
||||||
byte [] tableName = Bytes.toBytes(getName());
|
byte [] tableName = Bytes.toBytes(getName());
|
||||||
HRegionInfo regioninfo = new HRegionInfo(new HTableDescriptor(tableName),
|
HRegionInfo regioninfo = new HRegionInfo(tableName,
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
|
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
|
||||||
Path subdir = new Path(dir, "hlogdir");
|
Path subdir = new Path(dir, "hlogdir");
|
||||||
Path archdir = new Path(dir, "hlogdir_archive");
|
Path archdir = new Path(dir, "hlogdir_archive");
|
||||||
HLog wal = new HLog(fs, subdir, archdir, conf);
|
HLog wal = new HLog(fs, subdir, archdir, conf);
|
||||||
final int total = 20;
|
final int total = 20;
|
||||||
|
HTableDescriptor htd = new HTableDescriptor();
|
||||||
|
htd.addFamily(new HColumnDescriptor(tableName));
|
||||||
|
|
||||||
for (int i = 0; i < total; i++) {
|
for (int i = 0; i < total; i++) {
|
||||||
WALEdit kvs = new WALEdit();
|
WALEdit kvs = new WALEdit();
|
||||||
kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
|
kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
|
||||||
wal.append(regioninfo, tableName, kvs, System.currentTimeMillis());
|
wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd);
|
||||||
}
|
}
|
||||||
// Now call sync to send the data to HDFS datanodes
|
// Now call sync to send the data to HDFS datanodes
|
||||||
wal.sync();
|
wal.sync();
|
||||||
|
@ -460,11 +463,15 @@ public class TestHLog {
|
||||||
Bytes.toBytes(Integer.toString(i)),
|
Bytes.toBytes(Integer.toString(i)),
|
||||||
timestamp, new byte[] { (byte)(i + '0') }));
|
timestamp, new byte[] { (byte)(i + '0') }));
|
||||||
}
|
}
|
||||||
HRegionInfo info = new HRegionInfo(new HTableDescriptor(tableName),
|
HRegionInfo info = new HRegionInfo(tableName,
|
||||||
row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
|
row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
|
||||||
log.append(info, tableName, cols, System.currentTimeMillis());
|
HTableDescriptor htd = new HTableDescriptor();
|
||||||
|
htd.addFamily(new HColumnDescriptor("column"));
|
||||||
|
|
||||||
|
log.append(info, tableName, cols, System.currentTimeMillis(), htd);
|
||||||
long logSeqId = log.startCacheFlush();
|
long logSeqId = log.startCacheFlush();
|
||||||
log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId, info.isMetaRegion());
|
log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId,
|
||||||
|
info.isMetaRegion());
|
||||||
log.close();
|
log.close();
|
||||||
Path filename = log.computeFilename();
|
Path filename = log.computeFilename();
|
||||||
log = null;
|
log = null;
|
||||||
|
@ -528,9 +535,11 @@ public class TestHLog {
|
||||||
Bytes.toBytes(Integer.toString(i)),
|
Bytes.toBytes(Integer.toString(i)),
|
||||||
timestamp, new byte[] { (byte)(i + '0') }));
|
timestamp, new byte[] { (byte)(i + '0') }));
|
||||||
}
|
}
|
||||||
HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
|
HRegionInfo hri = new HRegionInfo(tableName,
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
||||||
log.append(hri, tableName, cols, System.currentTimeMillis());
|
HTableDescriptor htd = new HTableDescriptor();
|
||||||
|
htd.addFamily(new HColumnDescriptor("column"));
|
||||||
|
log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
|
||||||
long logSeqId = log.startCacheFlush();
|
long logSeqId = log.startCacheFlush();
|
||||||
log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
|
log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
|
||||||
log.close();
|
log.close();
|
||||||
|
@ -587,14 +596,17 @@ public class TestHLog {
|
||||||
DumbWALObserver visitor = new DumbWALObserver();
|
DumbWALObserver visitor = new DumbWALObserver();
|
||||||
log.registerWALActionsListener(visitor);
|
log.registerWALActionsListener(visitor);
|
||||||
long timestamp = System.currentTimeMillis();
|
long timestamp = System.currentTimeMillis();
|
||||||
HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
|
HTableDescriptor htd = new HTableDescriptor();
|
||||||
|
htd.addFamily(new HColumnDescriptor("column"));
|
||||||
|
|
||||||
|
HRegionInfo hri = new HRegionInfo(tableName,
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
||||||
for (int i = 0; i < COL_COUNT; i++) {
|
for (int i = 0; i < COL_COUNT; i++) {
|
||||||
WALEdit cols = new WALEdit();
|
WALEdit cols = new WALEdit();
|
||||||
cols.add(new KeyValue(row, Bytes.toBytes("column"),
|
cols.add(new KeyValue(row, Bytes.toBytes("column"),
|
||||||
Bytes.toBytes(Integer.toString(i)),
|
Bytes.toBytes(Integer.toString(i)),
|
||||||
timestamp, new byte[]{(byte) (i + '0')}));
|
timestamp, new byte[]{(byte) (i + '0')}));
|
||||||
log.append(hri, tableName, cols, System.currentTimeMillis());
|
log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
|
||||||
}
|
}
|
||||||
assertEquals(COL_COUNT, visitor.increments);
|
assertEquals(COL_COUNT, visitor.increments);
|
||||||
log.unregisterWALActionsListener(visitor);
|
log.unregisterWALActionsListener(visitor);
|
||||||
|
@ -602,7 +614,7 @@ public class TestHLog {
|
||||||
cols.add(new KeyValue(row, Bytes.toBytes("column"),
|
cols.add(new KeyValue(row, Bytes.toBytes("column"),
|
||||||
Bytes.toBytes(Integer.toString(11)),
|
Bytes.toBytes(Integer.toString(11)),
|
||||||
timestamp, new byte[]{(byte) (11 + '0')}));
|
timestamp, new byte[]{(byte) (11 + '0')}));
|
||||||
log.append(hri, tableName, cols, System.currentTimeMillis());
|
log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
|
||||||
assertEquals(COL_COUNT, visitor.increments);
|
assertEquals(COL_COUNT, visitor.increments);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -613,9 +625,9 @@ public class TestHLog {
|
||||||
final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
|
final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
|
||||||
|
|
||||||
HLog log = new HLog(fs, dir, oldLogDir, conf);
|
HLog log = new HLog(fs, dir, oldLogDir, conf);
|
||||||
HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
|
HRegionInfo hri = new HRegionInfo(tableName,
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
||||||
HRegionInfo hri2 = new HRegionInfo(new HTableDescriptor(tableName2),
|
HRegionInfo hri2 = new HRegionInfo(tableName2,
|
||||||
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
|
||||||
|
|
||||||
// Add a single edit and make sure that rolling won't remove the file
|
// Add a single edit and make sure that rolling won't remove the file
|
||||||
|
@ -667,12 +679,15 @@ public class TestHLog {
|
||||||
|
|
||||||
private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
|
private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
|
||||||
int times) throws IOException {
|
int times) throws IOException {
|
||||||
|
HTableDescriptor htd = new HTableDescriptor();
|
||||||
|
htd.addFamily(new HColumnDescriptor("row"));
|
||||||
|
|
||||||
final byte [] row = Bytes.toBytes("row");
|
final byte [] row = Bytes.toBytes("row");
|
||||||
for (int i = 0; i < times; i++) {
|
for (int i = 0; i < times; i++) {
|
||||||
long timestamp = System.currentTimeMillis();
|
long timestamp = System.currentTimeMillis();
|
||||||
WALEdit cols = new WALEdit();
|
WALEdit cols = new WALEdit();
|
||||||
cols.add(new KeyValue(row, row, row, timestamp, row));
|
cols.add(new KeyValue(row, row, row, timestamp, row));
|
||||||
log.append(hri, tableName, cols, timestamp);
|
log.append(hri, tableName, cols, timestamp, htd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -685,6 +700,12 @@ public class TestHLog {
|
||||||
increments++;
|
increments++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
|
||||||
|
//To change body of implemented methods use File | Settings | File Templates.
|
||||||
|
increments++;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void logRolled(Path newFile) {
|
public void logRolled(Path newFile) {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
|
|
|
@ -27,11 +27,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.*;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -88,7 +84,7 @@ public class TestWALObserver {
|
||||||
list.add(observer);
|
list.add(observer);
|
||||||
DummyWALObserver laterobserver = new DummyWALObserver();
|
DummyWALObserver laterobserver = new DummyWALObserver();
|
||||||
HLog hlog = new HLog(fs, logDir, oldLogDir, conf, list, null);
|
HLog hlog = new HLog(fs, logDir, oldLogDir, conf, list, null);
|
||||||
HRegionInfo hri = new HRegionInfo(new HTableDescriptor(SOME_BYTES),
|
HRegionInfo hri = new HRegionInfo(SOME_BYTES,
|
||||||
SOME_BYTES, SOME_BYTES, false);
|
SOME_BYTES, SOME_BYTES, false);
|
||||||
|
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
|
@ -96,8 +92,11 @@ public class TestWALObserver {
|
||||||
KeyValue kv = new KeyValue(b,b,b);
|
KeyValue kv = new KeyValue(b,b,b);
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
edit.add(kv);
|
edit.add(kv);
|
||||||
|
HTableDescriptor htd = new HTableDescriptor();
|
||||||
|
htd.addFamily(new HColumnDescriptor(b));
|
||||||
|
|
||||||
HLogKey key = new HLogKey(b,b, 0, 0);
|
HLogKey key = new HLogKey(b,b, 0, 0);
|
||||||
hlog.append(hri, key, edit);
|
hlog.append(hri, key, edit, htd);
|
||||||
if (i == 10) {
|
if (i == 10) {
|
||||||
hlog.registerWALActionsListener(laterobserver);
|
hlog.registerWALActionsListener(laterobserver);
|
||||||
}
|
}
|
||||||
|
@ -114,6 +113,7 @@ public class TestWALObserver {
|
||||||
assertEquals(2, observer.closedCount);
|
assertEquals(2, observer.closedCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Just counts when methods are called
|
* Just counts when methods are called
|
||||||
*/
|
*/
|
||||||
|
@ -142,5 +142,10 @@ public class TestWALObserver {
|
||||||
public void logCloseRequested() {
|
public void logCloseRequested() {
|
||||||
closedCount++;
|
closedCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
|
||||||
|
//To change body of implemented methods use File | Settings | File Templates.
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -133,14 +133,19 @@ public class TestWALReplay {
|
||||||
deleteDir(basedir);
|
deleteDir(basedir);
|
||||||
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
|
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
|
||||||
|
|
||||||
|
HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||||
|
HRegion region2 = HRegion.createHRegion(hri,
|
||||||
|
hbaseRootDir, this.conf, htd);
|
||||||
|
|
||||||
final byte [] tableName = Bytes.toBytes(tableNameStr);
|
final byte [] tableName = Bytes.toBytes(tableNameStr);
|
||||||
final byte [] rowName = tableName;
|
final byte [] rowName = tableName;
|
||||||
|
|
||||||
HLog wal1 = createWAL(this.conf);
|
HLog wal1 = createWAL(this.conf);
|
||||||
// Add 1k to each family.
|
// Add 1k to each family.
|
||||||
final int countPerFamily = 1000;
|
final int countPerFamily = 1000;
|
||||||
for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
|
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||||
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal1);
|
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee,
|
||||||
|
wal1, htd);
|
||||||
}
|
}
|
||||||
wal1.close();
|
wal1.close();
|
||||||
runWALSplit(this.conf);
|
runWALSplit(this.conf);
|
||||||
|
@ -149,8 +154,9 @@ public class TestWALReplay {
|
||||||
// Up the sequenceid so that these edits are after the ones added above.
|
// Up the sequenceid so that these edits are after the ones added above.
|
||||||
wal2.setSequenceNumber(wal1.getSequenceNumber());
|
wal2.setSequenceNumber(wal1.getSequenceNumber());
|
||||||
// Add 1k to each family.
|
// Add 1k to each family.
|
||||||
for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
|
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||||
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal2);
|
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily,
|
||||||
|
ee, wal2, htd);
|
||||||
}
|
}
|
||||||
wal2.close();
|
wal2.close();
|
||||||
runWALSplit(this.conf);
|
runWALSplit(this.conf);
|
||||||
|
@ -187,11 +193,14 @@ public class TestWALReplay {
|
||||||
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr);
|
final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr);
|
||||||
final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
|
final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
|
||||||
deleteDir(basedir);
|
deleteDir(basedir);
|
||||||
|
HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||||
|
HRegion region2 = HRegion.createHRegion(hri,
|
||||||
|
hbaseRootDir, this.conf, htd);
|
||||||
HLog wal = createWAL(this.conf);
|
HLog wal = createWAL(this.conf);
|
||||||
HRegion region = HRegion.openHRegion(hri, wal, this.conf);
|
HRegion region = HRegion.openHRegion(hri, wal, this.conf);
|
||||||
Path f = new Path(basedir, "hfile");
|
Path f = new Path(basedir, "hfile");
|
||||||
HFile.Writer writer = new HFile.Writer(this.fs, f);
|
HFile.Writer writer = new HFile.Writer(this.fs, f);
|
||||||
byte [] family = hri.getTableDesc().getFamilies().iterator().next().getName();
|
byte [] family = htd.getFamilies().iterator().next().getName();
|
||||||
byte [] row = Bytes.toBytes(tableNameStr);
|
byte [] row = Bytes.toBytes(tableNameStr);
|
||||||
writer.append(new KeyValue(row, family, family, row));
|
writer.append(new KeyValue(row, family, family, row));
|
||||||
writer.close();
|
writer.close();
|
||||||
|
@ -240,6 +249,9 @@ public class TestWALReplay {
|
||||||
deleteDir(basedir);
|
deleteDir(basedir);
|
||||||
final byte[] rowName = Bytes.toBytes(tableNameStr);
|
final byte[] rowName = Bytes.toBytes(tableNameStr);
|
||||||
final int countPerFamily = 10;
|
final int countPerFamily = 10;
|
||||||
|
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||||
|
HRegion region3 = HRegion.createHRegion(hri,
|
||||||
|
hbaseRootDir, this.conf, htd);
|
||||||
|
|
||||||
// Write countPerFamily edits into the three families. Do a flush on one
|
// Write countPerFamily edits into the three families. Do a flush on one
|
||||||
// of the families during the load of edits so its seqid is not same as
|
// of the families during the load of edits so its seqid is not same as
|
||||||
|
@ -250,7 +262,7 @@ public class TestWALReplay {
|
||||||
// HRegionServer usually does this. It knows the largest seqid across all regions.
|
// HRegionServer usually does this. It knows the largest seqid across all regions.
|
||||||
wal.setSequenceNumber(seqid);
|
wal.setSequenceNumber(seqid);
|
||||||
boolean first = true;
|
boolean first = true;
|
||||||
for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
|
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||||
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
|
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
|
||||||
if (first ) {
|
if (first ) {
|
||||||
// If first, so we have at least one family w/ different seqid to rest.
|
// If first, so we have at least one family w/ different seqid to rest.
|
||||||
|
@ -261,7 +273,7 @@ public class TestWALReplay {
|
||||||
// Now assert edits made it in.
|
// Now assert edits made it in.
|
||||||
final Get g = new Get(rowName);
|
final Get g = new Get(rowName);
|
||||||
Result result = region.get(g, null);
|
Result result = region.get(g, null);
|
||||||
assertEquals(countPerFamily * hri.getTableDesc().getFamilies().size(),
|
assertEquals(countPerFamily * htd.getFamilies().size(),
|
||||||
result.size());
|
result.size());
|
||||||
// Now close the region, split the log, reopen the region and assert that
|
// Now close the region, split the log, reopen the region and assert that
|
||||||
// replay of log has no effect, that our seqids are calculated correctly so
|
// replay of log has no effect, that our seqids are calculated correctly so
|
||||||
|
@ -285,7 +297,7 @@ public class TestWALReplay {
|
||||||
// Next test. Add more edits, then 'crash' this region by stealing its wal
|
// Next test. Add more edits, then 'crash' this region by stealing its wal
|
||||||
// out from under it and assert that replay of the log adds the edits back
|
// out from under it and assert that replay of the log adds the edits back
|
||||||
// correctly when region is opened again.
|
// correctly when region is opened again.
|
||||||
for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
|
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||||
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
|
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
|
||||||
}
|
}
|
||||||
// Get count of edits.
|
// Get count of edits.
|
||||||
|
@ -319,7 +331,7 @@ public class TestWALReplay {
|
||||||
Result result3 = region3.get(g, null);
|
Result result3 = region3.get(g, null);
|
||||||
// Assert that count of cells is same as before crash.
|
// Assert that count of cells is same as before crash.
|
||||||
assertEquals(result2.size(), result3.size());
|
assertEquals(result2.size(), result3.size());
|
||||||
assertEquals(hri.getTableDesc().getFamilies().size() * countPerFamily,
|
assertEquals(htd.getFamilies().size() * countPerFamily,
|
||||||
countOfRestoredEdits.get());
|
countOfRestoredEdits.get());
|
||||||
|
|
||||||
// I can't close wal1. Its been appropriated when we split.
|
// I can't close wal1. Its been appropriated when we split.
|
||||||
|
@ -342,6 +354,10 @@ public class TestWALReplay {
|
||||||
final Path basedir = new Path(hbaseRootDir, tableNameStr);
|
final Path basedir = new Path(hbaseRootDir, tableNameStr);
|
||||||
deleteDir(basedir);
|
deleteDir(basedir);
|
||||||
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
|
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
|
||||||
|
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||||
|
HRegion region2 = HRegion.createHRegion(hri,
|
||||||
|
hbaseRootDir, this.conf, htd);
|
||||||
|
|
||||||
final HLog wal = createWAL(this.conf);
|
final HLog wal = createWAL(this.conf);
|
||||||
final byte[] tableName = Bytes.toBytes(tableNameStr);
|
final byte[] tableName = Bytes.toBytes(tableNameStr);
|
||||||
final byte[] rowName = tableName;
|
final byte[] rowName = tableName;
|
||||||
|
@ -349,8 +365,9 @@ public class TestWALReplay {
|
||||||
|
|
||||||
// Add 1k to each family.
|
// Add 1k to each family.
|
||||||
final int countPerFamily = 1000;
|
final int countPerFamily = 1000;
|
||||||
for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
|
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||||
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal);
|
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily,
|
||||||
|
ee, wal, htd);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a cache flush, shouldn't have any effect
|
// Add a cache flush, shouldn't have any effect
|
||||||
|
@ -362,14 +379,14 @@ public class TestWALReplay {
|
||||||
long now = ee.currentTimeMillis();
|
long now = ee.currentTimeMillis();
|
||||||
edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName,
|
edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName,
|
||||||
now, rowName));
|
now, rowName));
|
||||||
wal.append(hri, tableName, edit, now);
|
wal.append(hri, tableName, edit, now, htd);
|
||||||
|
|
||||||
// Delete the c family to verify deletes make it over.
|
// Delete the c family to verify deletes make it over.
|
||||||
edit = new WALEdit();
|
edit = new WALEdit();
|
||||||
now = ee.currentTimeMillis();
|
now = ee.currentTimeMillis();
|
||||||
edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now,
|
edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now,
|
||||||
KeyValue.Type.DeleteFamily));
|
KeyValue.Type.DeleteFamily));
|
||||||
wal.append(hri, tableName, edit, now);
|
wal.append(hri, tableName, edit, now, htd);
|
||||||
|
|
||||||
// Sync.
|
// Sync.
|
||||||
wal.sync();
|
wal.sync();
|
||||||
|
@ -411,7 +428,7 @@ public class TestWALReplay {
|
||||||
Get get = new Get(rowName);
|
Get get = new Get(rowName);
|
||||||
Result result = region.get(get, -1);
|
Result result = region.get(get, -1);
|
||||||
// Make sure we only see the good edits
|
// Make sure we only see the good edits
|
||||||
assertEquals(countPerFamily * (hri.getTableDesc().getFamilies().size() - 1),
|
assertEquals(countPerFamily * (htd.getFamilies().size() - 1),
|
||||||
result.size());
|
result.size());
|
||||||
region.close();
|
region.close();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -441,7 +458,7 @@ public class TestWALReplay {
|
||||||
|
|
||||||
private void addWALEdits (final byte [] tableName, final HRegionInfo hri,
|
private void addWALEdits (final byte [] tableName, final HRegionInfo hri,
|
||||||
final byte [] rowName, final byte [] family,
|
final byte [] rowName, final byte [] family,
|
||||||
final int count, EnvironmentEdge ee, final HLog wal)
|
final int count, EnvironmentEdge ee, final HLog wal, final HTableDescriptor htd)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String familyStr = Bytes.toString(family);
|
String familyStr = Bytes.toString(family);
|
||||||
for (int j = 0; j < count; j++) {
|
for (int j = 0; j < count; j++) {
|
||||||
|
@ -450,7 +467,7 @@ public class TestWALReplay {
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
edit.add(new KeyValue(rowName, family, qualifierBytes,
|
edit.add(new KeyValue(rowName, family, qualifierBytes,
|
||||||
ee.currentTimeMillis(), columnBytes));
|
ee.currentTimeMillis(), columnBytes));
|
||||||
wal.append(hri, tableName, edit, ee.currentTimeMillis());
|
wal.append(hri, tableName, edit, ee.currentTimeMillis(), htd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -472,17 +489,9 @@ public class TestWALReplay {
|
||||||
* @param tableName Name of table to use when we create HTableDescriptor.
|
* @param tableName Name of table to use when we create HTableDescriptor.
|
||||||
*/
|
*/
|
||||||
private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) {
|
private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) {
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
return new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
|
||||||
HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
|
|
||||||
htd.addFamily(a);
|
|
||||||
HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
|
|
||||||
htd.addFamily(b);
|
|
||||||
HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
|
|
||||||
htd.addFamily(c);
|
|
||||||
return new HRegionInfo(htd, null, null, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Run the split. Verify only single split file made.
|
* Run the split. Verify only single split file made.
|
||||||
* @param c
|
* @param c
|
||||||
|
@ -514,4 +523,15 @@ public class TestWALReplay {
|
||||||
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
|
HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
|
||||||
return wal;
|
return wal;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private HTableDescriptor createBasic3FamilyHTD(final String tableName) {
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
|
HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
|
||||||
|
htd.addFamily(a);
|
||||||
|
HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
|
||||||
|
htd.addFamily(b);
|
||||||
|
HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
|
||||||
|
htd.addFamily(c);
|
||||||
|
return htd;
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -125,7 +125,7 @@ public class TestReplicationSourceManager {
|
||||||
col.setScope(HConstants.REPLICATION_SCOPE_LOCAL);
|
col.setScope(HConstants.REPLICATION_SCOPE_LOCAL);
|
||||||
htd.addFamily(col);
|
htd.addFamily(col);
|
||||||
|
|
||||||
hri = new HRegionInfo(htd, r1, r2);
|
hri = new HRegionInfo(htd.getName(), r1, r2);
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,8 @@ public class TestReplicationSourceManager {
|
||||||
URLEncoder.encode("regionserver:60020", "UTF8"));
|
URLEncoder.encode("regionserver:60020", "UTF8"));
|
||||||
|
|
||||||
manager.init();
|
manager.init();
|
||||||
|
HTableDescriptor htd = new HTableDescriptor();
|
||||||
|
htd.addFamily(new HColumnDescriptor(f1));
|
||||||
// Testing normal log rolling every 20
|
// Testing normal log rolling every 20
|
||||||
for(long i = 1; i < 101; i++) {
|
for(long i = 1; i < 101; i++) {
|
||||||
if(i > 1 && i % 20 == 0) {
|
if(i > 1 && i % 20 == 0) {
|
||||||
|
@ -171,7 +172,7 @@ public class TestReplicationSourceManager {
|
||||||
LOG.info(i);
|
LOG.info(i);
|
||||||
HLogKey key = new HLogKey(hri.getRegionName(),
|
HLogKey key = new HLogKey(hri.getRegionName(),
|
||||||
test, seq++, System.currentTimeMillis());
|
test, seq++, System.currentTimeMillis());
|
||||||
hlog.append(hri, key, edit);
|
hlog.append(hri, key, edit, htd);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Simulate a rapid insert that's followed
|
// Simulate a rapid insert that's followed
|
||||||
|
@ -184,7 +185,7 @@ public class TestReplicationSourceManager {
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
HLogKey key = new HLogKey(hri.getRegionName(),
|
HLogKey key = new HLogKey(hri.getRegionName(),
|
||||||
test, seq++, System.currentTimeMillis());
|
test, seq++, System.currentTimeMillis());
|
||||||
hlog.append(hri, key, edit);
|
hlog.append(hri, key, edit, htd);
|
||||||
}
|
}
|
||||||
|
|
||||||
assertEquals(6, manager.getHLogs().size());
|
assertEquals(6, manager.getHLogs().size());
|
||||||
|
@ -196,7 +197,7 @@ public class TestReplicationSourceManager {
|
||||||
|
|
||||||
HLogKey key = new HLogKey(hri.getRegionName(),
|
HLogKey key = new HLogKey(hri.getRegionName(),
|
||||||
test, seq++, System.currentTimeMillis());
|
test, seq++, System.currentTimeMillis());
|
||||||
hlog.append(hri, key, edit);
|
hlog.append(hri, key, edit, htd);
|
||||||
|
|
||||||
assertEquals(1, manager.getHLogs().size());
|
assertEquals(1, manager.getHLogs().size());
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class TestTableRegionModel extends TestCase {
|
||||||
public void testGetName() {
|
public void testGetName() {
|
||||||
TableRegionModel model = buildTestModel();
|
TableRegionModel model = buildTestModel();
|
||||||
String modelName = model.getName();
|
String modelName = model.getName();
|
||||||
HRegionInfo hri = new HRegionInfo(new HTableDescriptor(TABLE),
|
HRegionInfo hri = new HRegionInfo(Bytes.toBytes(TABLE),
|
||||||
START_KEY, END_KEY, false, ID);
|
START_KEY, END_KEY, false, ID);
|
||||||
assertEquals(modelName, hri.getRegionNameAsString());
|
assertEquals(modelName, hri.getRegionNameAsString());
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,7 +131,7 @@ public class TestHBaseFsck {
|
||||||
htd, byte[] startKey, byte[] endKey)
|
htd, byte[] startKey, byte[] endKey)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
|
HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
|
||||||
HRegionInfo hri = new HRegionInfo(htd, startKey, endKey);
|
HRegionInfo hri = new HRegionInfo(htd.getName(), startKey, endKey);
|
||||||
Put put = new Put(hri.getRegionName());
|
Put put = new Put(hri.getRegionName());
|
||||||
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
|
||||||
Writables.getBytes(hri));
|
Writables.getBytes(hri));
|
||||||
|
|
|
@ -136,8 +136,8 @@ public class TestMergeTable {
|
||||||
private HRegion createRegion(final HTableDescriptor desc,
|
private HRegion createRegion(final HTableDescriptor desc,
|
||||||
byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
|
byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HRegionInfo hri = new HRegionInfo(desc, startKey, endKey);
|
HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
|
||||||
HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration());
|
HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc);
|
||||||
LOG.info("Created region " + region.getRegionNameAsString());
|
LOG.info("Created region " + region.getRegionNameAsString());
|
||||||
for(int i = firstRow; i < firstRow + nrows; i++) {
|
for(int i = firstRow; i < firstRow + nrows; i++) {
|
||||||
Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
|
Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
|
||||||
|
@ -156,10 +156,11 @@ public class TestMergeTable {
|
||||||
protected void setupROOTAndMeta(Path rootdir, final HRegion [] regions)
|
protected void setupROOTAndMeta(Path rootdir, final HRegion [] regions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HRegion root =
|
HRegion root =
|
||||||
HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, rootdir, UTIL.getConfiguration());
|
HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, rootdir,
|
||||||
|
UTIL.getConfiguration(), HTableDescriptor.ROOT_TABLEDESC);
|
||||||
HRegion meta =
|
HRegion meta =
|
||||||
HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
|
HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
|
||||||
UTIL.getConfiguration());
|
UTIL.getConfiguration(), HTableDescriptor.META_TABLEDESC);
|
||||||
HRegion.addRegionToMETA(root, meta);
|
HRegion.addRegionToMETA(root, meta);
|
||||||
for (HRegion r: regions) {
|
for (HRegion r: regions) {
|
||||||
HRegion.addRegionToMETA(meta, r);
|
HRegion.addRegionToMETA(meta, r);
|
||||||
|
|
|
@ -71,31 +71,36 @@ public class TestMergeTool extends HBaseTestCase {
|
||||||
* Create the HRegionInfos for the regions.
|
* Create the HRegionInfos for the regions.
|
||||||
*/
|
*/
|
||||||
// Region 0 will contain the key range [row_0200,row_0300)
|
// Region 0 will contain the key range [row_0200,row_0300)
|
||||||
sourceRegions[0] = new HRegionInfo(this.desc, Bytes.toBytes("row_0200"),
|
sourceRegions[0] = new HRegionInfo(this.desc.getName(),
|
||||||
|
Bytes.toBytes("row_0200"),
|
||||||
Bytes.toBytes("row_0300"));
|
Bytes.toBytes("row_0300"));
|
||||||
|
|
||||||
// Region 1 will contain the key range [row_0250,row_0400) and overlaps
|
// Region 1 will contain the key range [row_0250,row_0400) and overlaps
|
||||||
// with Region 0
|
// with Region 0
|
||||||
sourceRegions[1] =
|
sourceRegions[1] =
|
||||||
new HRegionInfo(this.desc, Bytes.toBytes("row_0250"),
|
new HRegionInfo(this.desc.getName(),
|
||||||
|
Bytes.toBytes("row_0250"),
|
||||||
Bytes.toBytes("row_0400"));
|
Bytes.toBytes("row_0400"));
|
||||||
|
|
||||||
// Region 2 will contain the key range [row_0100,row_0200) and is adjacent
|
// Region 2 will contain the key range [row_0100,row_0200) and is adjacent
|
||||||
// to Region 0 or the region resulting from the merge of Regions 0 and 1
|
// to Region 0 or the region resulting from the merge of Regions 0 and 1
|
||||||
sourceRegions[2] =
|
sourceRegions[2] =
|
||||||
new HRegionInfo(this.desc, Bytes.toBytes("row_0100"),
|
new HRegionInfo(this.desc.getName(),
|
||||||
|
Bytes.toBytes("row_0100"),
|
||||||
Bytes.toBytes("row_0200"));
|
Bytes.toBytes("row_0200"));
|
||||||
|
|
||||||
// Region 3 will contain the key range [row_0500,row_0600) and is not
|
// Region 3 will contain the key range [row_0500,row_0600) and is not
|
||||||
// adjacent to any of Regions 0, 1, 2 or the merged result of any or all
|
// adjacent to any of Regions 0, 1, 2 or the merged result of any or all
|
||||||
// of those regions
|
// of those regions
|
||||||
sourceRegions[3] =
|
sourceRegions[3] =
|
||||||
new HRegionInfo(this.desc, Bytes.toBytes("row_0500"),
|
new HRegionInfo(this.desc.getName(),
|
||||||
|
Bytes.toBytes("row_0500"),
|
||||||
Bytes.toBytes("row_0600"));
|
Bytes.toBytes("row_0600"));
|
||||||
|
|
||||||
// Region 4 will have empty start and end keys and overlaps all regions.
|
// Region 4 will have empty start and end keys and overlaps all regions.
|
||||||
sourceRegions[4] =
|
sourceRegions[4] =
|
||||||
new HRegionInfo(this.desc, HConstants.EMPTY_BYTE_ARRAY,
|
new HRegionInfo(this.desc.getName(),
|
||||||
|
HConstants.EMPTY_BYTE_ARRAY,
|
||||||
HConstants.EMPTY_BYTE_ARRAY);
|
HConstants.EMPTY_BYTE_ARRAY);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -134,7 +139,8 @@ public class TestMergeTool extends HBaseTestCase {
|
||||||
*/
|
*/
|
||||||
for (int i = 0; i < sourceRegions.length; i++) {
|
for (int i = 0; i < sourceRegions.length; i++) {
|
||||||
regions[i] =
|
regions[i] =
|
||||||
HRegion.createHRegion(this.sourceRegions[i], this.testDir, this.conf);
|
HRegion.createHRegion(this.sourceRegions[i], this.testDir, this.conf,
|
||||||
|
this.desc);
|
||||||
/*
|
/*
|
||||||
* Insert data
|
* Insert data
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue